handle llm error on request_async [LET-5403] (#5408)
handle llm error on request_async Co-authored-by: Ari Webb <ari@letta.com>
This commit is contained in:
@@ -38,7 +38,11 @@ class SimpleLLMRequestAdapter(LettaLLMRequestAdapter):
|
||||
self.request_data = request_data
|
||||
|
||||
# Make the blocking LLM request
|
||||
self.response_data = await self.llm_client.request_async(request_data, self.llm_config)
|
||||
try:
|
||||
self.response_data = await self.llm_client.request_async(request_data, self.llm_config)
|
||||
except Exception as e:
|
||||
raise self.llm_client.handle_llm_error(e)
|
||||
|
||||
self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns()
|
||||
|
||||
# Convert response to chat completion format
|
||||
|
||||
Reference in New Issue
Block a user