chore: move things to debug logging (#6610)

This commit is contained in:
Sarah Wooders
2025-12-11 18:11:51 -08:00
committed by Caren Thomas
parent fecf503ad9
commit c9ad2fd7c4
6 changed files with 19 additions and 5 deletions

View File

@@ -157,6 +157,9 @@ async def _prepare_in_context_messages_no_persist_async(
if input_messages[0].type == "approval":
# User is trying to send an approval response
if current_in_context_messages and current_in_context_messages[-1].role != "approval":
logger.warn(
f"Cannot process approval response: No tool call is currently awaiting approval. Last message: {current_in_context_messages[-1]}"
)
raise ValueError(
"Cannot process approval response: No tool call is currently awaiting approval. "
"Please send a regular message to interact with the agent."

View File

@@ -675,12 +675,18 @@ class LettaAgentV3(LettaAgentV2):
raise e
except Exception as e:
if isinstance(e, ContextWindowExceededError) and llm_request_attempt < summarizer_settings.max_summarizer_retries:
# Retry case
self.logger.info(
f"Context window exceeded (error {e}), trying to compact messages attempt {llm_request_attempt + 1} of {summarizer_settings.max_summarizer_retries + 1}"
)
# checkpoint summarized messages
# TODO: might want to delay this checkpoint in case of corrupated state
try:
summary_message, messages = await self.compact(
messages, trigger_threshold=self.agent_state.llm_config.context_window
)
self.logger.info("Summarization succeeded, continuing to retry LLM request")
continue
except SystemPromptTokenExceededError:
self.stop_reason = LettaStopReason(
stop_reason=StopReasonType.context_window_overflow_in_system_prompt.value
@@ -783,6 +789,9 @@ class LettaAgentV3(LettaAgentV2):
# check compaction
if self.context_token_estimate is not None and self.context_token_estimate > self.agent_state.llm_config.context_window:
self.logger.info(
f"Context window exceeded (current: {self.context_token_estimate}, threshold: {self.agent_state.llm_config.context_window}), trying to compact messages"
)
summary_message, messages = await self.compact(messages, trigger_threshold=self.agent_state.llm_config.context_window)
# TODO: persist + return the summary message
# TODO: convert this to a SummaryMessage

View File

@@ -97,7 +97,7 @@ class EventLoopWatchdog:
pass
# ALWAYS log every check to prove watchdog is alive
logger.info(
logger.debug(
f"WATCHDOG_CHECK: heartbeat_age={time_since_heartbeat:.1f}s, consecutive_hangs={consecutive_hangs}, tasks={task_count}"
)

View File

@@ -721,6 +721,7 @@ def start_server(
timeout_keep_alive=settings.uvicorn_timeout_keep_alive,
ssl_keyfile="certs/localhost-key.pem",
ssl_certfile="certs/localhost.pem",
access_log=False,
)
else:
@@ -759,4 +760,5 @@ def start_server(
workers=settings.uvicorn_workers,
reload=reload or settings.uvicorn_reload,
timeout_keep_alive=settings.uvicorn_timeout_keep_alive,
access_log=False,
)

View File

@@ -109,7 +109,7 @@ class LoggingMiddleware(BaseHTTPMiddleware):
if context:
update_log_context(**context)
logger.info(
logger.debug(
f"Incoming request: {request.method} {request.url.path}",
extra={
"method": request.method,

View File

@@ -292,7 +292,7 @@ def create_token_counter(
if use_gemini:
client = LLMClient.create(provider_type=model_endpoint_type, actor=actor)
token_counter = GeminiTokenCounter(client, model)
logger.info(
logger.debug(
f"Using GeminiTokenCounter for agent_id={agent_id}, model={model}, "
f"model_endpoint_type={model_endpoint_type}, "
f"environment={settings.environment}"
@@ -301,14 +301,14 @@ def create_token_counter(
anthropic_client = LLMClient.create(provider_type=ProviderType.anthropic, actor=actor)
counter_model = model if model_endpoint_type == "anthropic" else None
token_counter = AnthropicTokenCounter(anthropic_client, counter_model)
logger.info(
logger.debug(
f"Using AnthropicTokenCounter for agent_id={agent_id}, model={counter_model}, "
f"model_endpoint_type={model_endpoint_type}, "
f"environment={settings.environment}"
)
else:
token_counter = ApproxTokenCounter()
logger.info(
logger.debug(
f"Using ApproxTokenCounter for agent_id={agent_id}, model={model}, "
f"model_endpoint_type={model_endpoint_type}, "
f"environment={settings.environment}"