From 2fc592e0b621d15dc8d2eed84071b0b98473b7b6 Mon Sep 17 00:00:00 2001 From: Charles Packer Date: Tue, 20 Jan 2026 18:45:45 -0800 Subject: [PATCH] feat(core): add image support in tool returns [LET-7140] (#8985) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(core): add image support in tool returns [LET-7140] Enable tool_return to support both string and ImageContent content parts, matching the pattern used for user message inputs. This allows tools executed client-side to return images back to the agent. Changes: - Add LettaToolReturnContentUnion type for text/image content parts - Update ToolReturn schema to accept Union[str, List[content parts]] - Update converters for each provider: - OpenAI Chat Completions: placeholder text for images - OpenAI Responses API: full image support - Anthropic: full image support with base64 - Google: placeholder text for images - Add resolve_tool_return_images() for URL-to-base64 conversion - Make create_approval_response_message_from_input() async 🐾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta * fix(core): support images in Google tool returns as sibling parts Following the gemini-cli pattern: images in tool returns are sent as sibling inlineData parts alongside the functionResponse, rather than inside it. 🐾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta * test(core): add integration tests for multi-modal tool returns [LET-7140] Tests verify that: - Models with image support (Anthropic, OpenAI Responses API) can see images in tool returns and identify the secret text - Models without image support (Chat Completions) get placeholder text and cannot see the actual image content - Tool returns with images persist correctly in the database Uses secret.png test image containing hidden text "FIREBRAWL" that models must identify to pass the test. Also fixes misleading comment about Anthropic only supporting base64 images - they support URLs too, we just pre-resolve for consistency. 🐾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta * refactor: simplify tool return image support implementation Reduce code verbosity while maintaining all functionality: - Extract _resolve_url_to_base64() helper in message_helper.py (eliminates duplication) - Add _get_text_from_part() helper for text extraction - Add _get_base64_image_data() helper for image data extraction - Add _tool_return_to_google_parts() to simplify Google implementation - Add _image_dict_to_data_url() for OpenAI Responses format - Use walrus operator and list comprehensions where appropriate - Add integration_test_multi_modal_tool_returns.py to CI workflow Net change: -120 lines while preserving all features and test coverage. 👾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta * fix(tests): improve prompt for multi-modal tool return tests Make prompts more direct to reduce LLM flakiness: - Simplify tool description: "Retrieves a secret image with hidden text. Call this function to get the image." - Change user prompt from verbose request to direct command: "Call the get_secret_image function now." - Apply to both test methods This reduces ambiguity and makes tool calling more reliable across different LLM models. 👾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta * fix bugs * test(core): add google_ai/gemini-2.0-flash-exp to multi-modal tests Add Gemini model to test coverage for multi-modal tool returns. Google AI already supports images in tool returns via sibling inlineData parts. 👾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta * fix(ui): handle multi-modal tool_return type in frontend components Convert Union to string for display: - ViewRunDetails: Convert array to '[Image here]' placeholder - ToolCallMessageComponent: Convert array to '[Image here]' placeholder Fixes TypeScript errors in web, desktop-ui, and docker-ui type-checks. 👾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta --------- Co-authored-by: Letta Co-authored-by: Caren Thomas --- fern/openapi.json | 149 ++++++- letta/agents/helpers.py | 2 +- letta/helpers/message_helper.py | 58 +++ letta/schemas/letta_message.py | 8 +- letta/schemas/letta_message_content.py | 42 ++ letta/schemas/message.py | 206 ++++++++- letta/server/rest_api/app.py | 2 + letta/server/rest_api/utils.py | 29 +- letta/services/run_manager.py | 2 +- tests/data/secret.png | Bin 0 -> 32581 bytes ...tegration_test_multi_modal_tool_returns.py | 408 ++++++++++++++++++ 11 files changed, 864 insertions(+), 42 deletions(-) create mode 100644 tests/data/secret.png create mode 100644 tests/integration_test_multi_modal_tool_returns.py diff --git a/fern/openapi.json b/fern/openapi.json index 3ae41301..b3538ac0 100644 --- a/fern/openapi.json +++ b/fern/openapi.json @@ -37275,7 +37275,7 @@ "anyOf": [ { "items": { - "$ref": "#/components/schemas/letta__schemas__message__ToolReturn" + "$ref": "#/components/schemas/letta__schemas__message__ToolReturn-Output" }, "type": "array" }, @@ -37391,7 +37391,7 @@ "$ref": "#/components/schemas/ApprovalReturn" }, { - "$ref": "#/components/schemas/letta__schemas__message__ToolReturn" + "$ref": "#/components/schemas/letta__schemas__message__ToolReturn-Output" } ] }, @@ -46069,7 +46069,7 @@ "anyOf": [ { "items": { - "$ref": "#/components/schemas/letta__schemas__message__ToolReturn" + "$ref": "#/components/schemas/letta__schemas__message__ToolReturn-Input" }, "type": "array" }, @@ -46131,7 +46131,7 @@ "$ref": "#/components/schemas/ApprovalReturn" }, { - "$ref": "#/components/schemas/letta__schemas__message__ToolReturn" + "$ref": "#/components/schemas/letta__schemas__message__ToolReturn-Input" } ] }, @@ -46374,8 +46374,19 @@ "default": "tool" }, "tool_return": { - "type": "string", - "title": "Tool Return" + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/LettaToolReturnContentUnion" + }, + "type": "array" + }, + { + "type": "string" + } + ], + "title": "Tool Return", + "description": "The tool return value - either a string or list of content parts (text/image)" }, "status": { "type": "string", @@ -46783,7 +46794,7 @@ "title": "UpdateStreamableHTTPMCPServer", "description": "Update schema for Streamable HTTP MCP server - all fields optional" }, - "letta__schemas__message__ToolReturn": { + "letta__schemas__message__ToolReturn-Input": { "properties": { "tool_call_id": { "anyOf": [ @@ -46836,12 +46847,117 @@ { "type": "string" }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/TextContent" + }, + { + "$ref": "#/components/schemas/ImageContent" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContent", + "text": "#/components/schemas/TextContent" + } + } + }, + "type": "array" + }, { "type": "null" } ], "title": "Func Response", - "description": "The function response string" + "description": "The function response - either a string or list of content parts (text/image)" + } + }, + "type": "object", + "required": ["status"], + "title": "ToolReturn" + }, + "letta__schemas__message__ToolReturn-Output": { + "properties": { + "tool_call_id": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "title": "Tool Call Id", + "description": "The ID for the tool call" + }, + "status": { + "type": "string", + "enum": ["success", "error"], + "title": "Status", + "description": "The status of the tool call" + }, + "stdout": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Stdout", + "description": "Captured stdout (e.g. prints, logs) from the tool invocation" + }, + "stderr": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Stderr", + "description": "Captured stderr from the tool invocation" + }, + "func_response": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/TextContent" + }, + { + "$ref": "#/components/schemas/ImageContent" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContent", + "text": "#/components/schemas/TextContent" + } + } + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Func Response", + "description": "The function response - either a string or list of content parts (text/image)" } }, "type": "object", @@ -47330,6 +47446,23 @@ } } }, + "LettaToolReturnContentUnion": { + "oneOf": [ + { + "$ref": "#/components/schemas/TextContent" + }, + { + "$ref": "#/components/schemas/ImageContent" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "text": "#/components/schemas/TextContent", + "image": "#/components/schemas/ImageContent" + } + } + }, "LettaUserMessageContentUnion": { "oneOf": [ { diff --git a/letta/agents/helpers.py b/letta/agents/helpers.py index b17f4ae9..dffe50ff 100644 --- a/letta/agents/helpers.py +++ b/letta/agents/helpers.py @@ -235,7 +235,7 @@ async def _prepare_in_context_messages_no_persist_async( "Please send a regular message to interact with the agent." ) validate_approval_tool_call_ids(current_in_context_messages[-1], input_messages[0]) - new_in_context_messages = create_approval_response_message_from_input( + new_in_context_messages = await create_approval_response_message_from_input( agent_state=agent_state, input_message=input_messages[0], run_id=run_id ) if len(input_messages) > 1: diff --git a/letta/helpers/message_helper.py b/letta/helpers/message_helper.py index 6250bdb3..f4e142df 100644 --- a/letta/helpers/message_helper.py +++ b/letta/helpers/message_helper.py @@ -166,3 +166,61 @@ async def _convert_message_create_to_message( batch_item_id=message_create.batch_item_id, run_id=run_id, ) + + +async def _resolve_url_to_base64(url: str) -> tuple[str, str]: + """Resolve URL to base64 data and media type.""" + if url.startswith("file://"): + parsed = urlparse(url) + file_path = unquote(parsed.path) + image_bytes = await asyncio.to_thread(lambda: open(file_path, "rb").read()) + media_type, _ = mimetypes.guess_type(file_path) + media_type = media_type or "image/jpeg" + else: + image_bytes, media_type = await _fetch_image_from_url(url) + media_type = media_type or mimetypes.guess_type(url)[0] or "image/png" + + image_data = base64.standard_b64encode(image_bytes).decode("utf-8") + return image_data, media_type + + +async def resolve_tool_return_images(func_response: str | list) -> str | list: + """Resolve URL and LettaImage sources to base64 for tool returns.""" + if isinstance(func_response, str): + return func_response + + resolved = [] + for part in func_response: + if isinstance(part, ImageContent): + if part.source.type == ImageSourceType.url: + image_data, media_type = await _resolve_url_to_base64(part.source.url) + part.source = Base64Image(media_type=media_type, data=image_data) + elif part.source.type == ImageSourceType.letta and not part.source.data: + pass + resolved.append(part) + elif isinstance(part, TextContent): + resolved.append(part) + elif isinstance(part, dict): + if part.get("type") == "image" and part.get("source", {}).get("type") == "url": + url = part["source"].get("url") + if url: + image_data, media_type = await _resolve_url_to_base64(url) + resolved.append( + ImageContent( + source=Base64Image( + media_type=media_type, + data=image_data, + detail=part.get("source", {}).get("detail"), + ) + ) + ) + else: + resolved.append(part) + elif part.get("type") == "text": + resolved.append(TextContent(text=part.get("text", ""))) + else: + resolved.append(part) + else: + resolved.append(part) + + return resolved diff --git a/letta/schemas/letta_message.py b/letta/schemas/letta_message.py index a403460c..712071ae 100644 --- a/letta/schemas/letta_message.py +++ b/letta/schemas/letta_message.py @@ -7,8 +7,10 @@ from pydantic import BaseModel, Field, field_serializer, field_validator from letta.schemas.letta_message_content import ( LettaAssistantMessageContentUnion, + LettaToolReturnContentUnion, LettaUserMessageContentUnion, get_letta_assistant_message_content_union_str_json_schema, + get_letta_tool_return_content_union_str_json_schema, get_letta_user_message_content_union_str_json_schema, ) @@ -35,7 +37,11 @@ class ApprovalReturn(MessageReturn): class ToolReturn(MessageReturn): type: Literal[MessageReturnType.tool] = Field(default=MessageReturnType.tool, description="The message type to be created.") - tool_return: str + tool_return: Union[str, List[LettaToolReturnContentUnion]] = Field( + ..., + description="The tool return value - either a string or list of content parts (text/image)", + json_schema_extra=get_letta_tool_return_content_union_str_json_schema(), + ) status: Literal["success", "error"] tool_call_id: str stdout: Optional[List[str]] = None diff --git a/letta/schemas/letta_message_content.py b/letta/schemas/letta_message_content.py index 24265777..7c62ebd3 100644 --- a/letta/schemas/letta_message_content.py +++ b/letta/schemas/letta_message_content.py @@ -138,6 +138,48 @@ def get_letta_user_message_content_union_str_json_schema(): } +# ------------------------------- +# Tool Return Content Types +# ------------------------------- + + +LettaToolReturnContentUnion = Annotated[ + Union[TextContent, ImageContent], + Field(discriminator="type"), +] + + +def create_letta_tool_return_content_union_schema(): + return { + "oneOf": [ + {"$ref": "#/components/schemas/TextContent"}, + {"$ref": "#/components/schemas/ImageContent"}, + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "text": "#/components/schemas/TextContent", + "image": "#/components/schemas/ImageContent", + }, + }, + } + + +def get_letta_tool_return_content_union_str_json_schema(): + """Schema that accepts either string or list of content parts for tool returns.""" + return { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/LettaToolReturnContentUnion", + }, + }, + {"type": "string"}, + ], + } + + # ------------------------------- # Assistant Content Types # ------------------------------- diff --git a/letta/schemas/message.py b/letta/schemas/message.py index 0af6d2f0..102b8824 100644 --- a/letta/schemas/message.py +++ b/letta/schemas/message.py @@ -50,6 +50,7 @@ from letta.schemas.letta_message_content import ( ImageContent, ImageSourceType, LettaMessageContentUnion, + LettaToolReturnContentUnion, OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, @@ -71,6 +72,34 @@ def truncate_tool_return(content: Optional[str], limit: Optional[int]) -> Option return content[:limit] + f"... [truncated {len(content) - limit} chars]" +def _get_text_from_part(part: Union[TextContent, ImageContent, dict]) -> Optional[str]: + """Extract text from a content part, returning None for images.""" + if isinstance(part, TextContent): + return part.text + elif isinstance(part, dict) and part.get("type") == "text": + return part.get("text", "") + return None + + +def tool_return_to_text(func_response: Optional[Union[str, List]]) -> Optional[str]: + """Convert tool return content to text, replacing images with placeholders.""" + if func_response is None: + return None + if isinstance(func_response, str): + return func_response + + text_parts = [text for part in func_response if (text := _get_text_from_part(part))] + image_count = sum( + 1 for part in func_response if isinstance(part, ImageContent) or (isinstance(part, dict) and part.get("type") == "image") + ) + + result = "\n".join(text_parts) + if image_count > 0: + placeholder = "[Image omitted]" if image_count == 1 else f"[{image_count} images omitted]" + result = (result + " " + placeholder) if result else placeholder + return result if result else None + + def add_inner_thoughts_to_tool_call( tool_call: OpenAIToolCall, inner_thoughts: str, @@ -786,8 +815,14 @@ class Message(BaseMessage): for tool_return in self.tool_returns: parsed_data = self._parse_tool_response(tool_return.func_response) + # Preserve multi-modal content (ToolReturn supports Union[str, List]) + if isinstance(tool_return.func_response, list): + tool_return_value = tool_return.func_response + else: + tool_return_value = parsed_data["message"] + tool_return_obj = LettaToolReturn( - tool_return=parsed_data["message"], + tool_return=tool_return_value, status=parsed_data["status"], tool_call_id=tool_return.tool_call_id, stdout=tool_return.stdout, @@ -801,11 +836,18 @@ class Message(BaseMessage): first_tool_return = all_tool_returns[0] + # Convert deprecated string-only field to text (preserve images in tool_returns list) + deprecated_tool_return_text = ( + tool_return_to_text(first_tool_return.tool_return) + if isinstance(first_tool_return.tool_return, list) + else first_tool_return.tool_return + ) + return ToolReturnMessage( id=self.id, date=self.created_at, # deprecated top-level fields populated from first tool return - tool_return=first_tool_return.tool_return, + tool_return=deprecated_tool_return_text, status=first_tool_return.status, tool_call_id=first_tool_return.tool_call_id, stdout=first_tool_return.stdout, @@ -840,11 +882,11 @@ class Message(BaseMessage): """Check if message has exactly one text content item.""" return self.content and len(self.content) == 1 and isinstance(self.content[0], TextContent) - def _parse_tool_response(self, response_text: str) -> dict: + def _parse_tool_response(self, response_text: Union[str, List]) -> dict: """Parse tool response JSON and extract message and status. Args: - response_text: Raw JSON response text + response_text: Raw JSON response text OR list of content parts (for multi-modal) Returns: Dictionary with 'message' and 'status' keys @@ -852,6 +894,14 @@ class Message(BaseMessage): Raises: ValueError: If JSON parsing fails """ + # Handle multi-modal content (list with text/images) + if isinstance(response_text, list): + text_representation = tool_return_to_text(response_text) or "[Multi-modal content]" + return { + "message": text_representation, + "status": "success", + } + try: function_return = parse_json(response_text) return { @@ -1301,7 +1351,9 @@ class Message(BaseMessage): tool_return = self.tool_returns[0] if not tool_return.tool_call_id: raise TypeError("OpenAI API requires tool_call_id to be set.") - func_response = truncate_tool_return(tool_return.func_response, tool_return_truncation_chars) + # Convert to text first (replaces images with placeholders), then truncate + func_response_text = tool_return_to_text(tool_return.func_response) + func_response = truncate_tool_return(func_response_text, tool_return_truncation_chars) openai_message = { "content": func_response, "role": self.role, @@ -1356,8 +1408,9 @@ class Message(BaseMessage): for tr in m.tool_returns: if not tr.tool_call_id: raise TypeError("ToolReturn came back without a tool_call_id.") - # Ensure explicit tool_returns are truncated for Chat Completions - func_response = truncate_tool_return(tr.func_response, tool_return_truncation_chars) + # Convert multi-modal to text (images → placeholders), then truncate + func_response_text = tool_return_to_text(tr.func_response) + func_response = truncate_tool_return(func_response_text, tool_return_truncation_chars) result.append( { "content": func_response, @@ -1456,17 +1509,17 @@ class Message(BaseMessage): ) elif self.role == "tool": - # Handle tool returns - similar pattern to Anthropic + # Handle tool returns - supports images via content arrays if self.tool_returns: for tool_return in self.tool_returns: if not tool_return.tool_call_id: raise TypeError("OpenAI Responses API requires tool_call_id to be set.") - func_response = truncate_tool_return(tool_return.func_response, tool_return_truncation_chars) + output = self._tool_return_to_responses_output(tool_return.func_response, tool_return_truncation_chars) message_dicts.append( { "type": "function_call_output", "call_id": tool_return.tool_call_id[:max_tool_id_length] if max_tool_id_length else tool_return.tool_call_id, - "output": func_response, + "output": output, } ) else: @@ -1534,6 +1587,50 @@ class Message(BaseMessage): return None + @staticmethod + def _image_dict_to_data_url(part: dict) -> Optional[str]: + """Convert image dict to data URL.""" + source = part.get("source", {}) + if source.get("type") == "base64" and source.get("data"): + media_type = source.get("media_type", "image/png") + return f"data:{media_type};base64,{source['data']}" + elif source.get("type") == "url": + return source.get("url") + return None + + @staticmethod + def _tool_return_to_responses_output( + func_response: Optional[Union[str, List]], + tool_return_truncation_chars: Optional[int] = None, + ) -> Union[str, List[dict]]: + """Convert tool return to OpenAI Responses API format.""" + if func_response is None: + return "" + if isinstance(func_response, str): + return truncate_tool_return(func_response, tool_return_truncation_chars) or "" + + output_parts: List[dict] = [] + for part in func_response: + if isinstance(part, TextContent): + text = truncate_tool_return(part.text, tool_return_truncation_chars) or "" + output_parts.append({"type": "input_text", "text": text}) + elif isinstance(part, ImageContent): + image_url = Message._image_source_to_data_url(part) + if image_url: + detail = getattr(part.source, "detail", None) or "auto" + output_parts.append({"type": "input_image", "image_url": image_url, "detail": detail}) + elif isinstance(part, dict): + if part.get("type") == "text": + text = truncate_tool_return(part.get("text", ""), tool_return_truncation_chars) or "" + output_parts.append({"type": "input_text", "text": text}) + elif part.get("type") == "image": + image_url = Message._image_dict_to_data_url(part) + if image_url: + detail = part.get("source", {}).get("detail", "auto") + output_parts.append({"type": "input_image", "image_url": image_url, "detail": detail}) + + return output_parts if output_parts else "" + @staticmethod def to_openai_responses_dicts_from_list( messages: List[Message], @@ -1550,6 +1647,68 @@ class Message(BaseMessage): ) return result + @staticmethod + def _get_base64_image_data(part: Union[ImageContent, dict]) -> Optional[tuple[str, str]]: + """Extract base64 data and media type from ImageContent or dict.""" + if isinstance(part, ImageContent): + source = part.source + if source.type == ImageSourceType.base64: + return source.data, source.media_type + elif source.type == ImageSourceType.letta and getattr(source, "data", None): + return source.data, getattr(source, "media_type", None) or "image/png" + elif isinstance(part, dict) and part.get("type") == "image": + source = part.get("source", {}) + if source.get("type") == "base64" and source.get("data"): + return source["data"], source.get("media_type", "image/png") + return None + + @staticmethod + def _tool_return_to_google_parts( + func_response: Optional[Union[str, List]], + tool_return_truncation_chars: Optional[int] = None, + ) -> tuple[str, List[dict]]: + """Extract text and image parts for Google API format.""" + if isinstance(func_response, str): + return truncate_tool_return(func_response, tool_return_truncation_chars) or "", [] + + text_parts = [] + image_parts = [] + for part in func_response: + if text := _get_text_from_part(part): + text_parts.append(text) + elif image_data := Message._get_base64_image_data(part): + data, media_type = image_data + image_parts.append({"inlineData": {"data": data, "mimeType": media_type}}) + + text = truncate_tool_return("\n".join(text_parts), tool_return_truncation_chars) or "" + if image_parts: + suffix = f"[{len(image_parts)} image(s) attached]" + text = f"{text}\n{suffix}" if text else suffix + + return text, image_parts + + @staticmethod + def _tool_return_to_anthropic_content( + func_response: Optional[Union[str, List]], + tool_return_truncation_chars: Optional[int] = None, + ) -> Union[str, List[dict]]: + """Convert tool return to Anthropic tool_result content format.""" + if func_response is None: + return "" + if isinstance(func_response, str): + return truncate_tool_return(func_response, tool_return_truncation_chars) or "" + + content: List[dict] = [] + for part in func_response: + if text := _get_text_from_part(part): + text = truncate_tool_return(text, tool_return_truncation_chars) or "" + content.append({"type": "text", "text": text}) + elif image_data := Message._get_base64_image_data(part): + data, media_type = image_data + content.append({"type": "image", "source": {"type": "base64", "data": data, "media_type": media_type}}) + + return content if content else "" + def to_anthropic_dict( self, current_model: str, @@ -1759,12 +1918,13 @@ class Message(BaseMessage): f"Message ID: {self.id}, Tool: {self.name or 'unknown'}, " f"Tool return index: {idx}/{len(self.tool_returns)}" ) - func_response = truncate_tool_return(tool_return.func_response, tool_return_truncation_chars) + # Convert to Anthropic format (supports images) + tool_result_content = self._tool_return_to_anthropic_content(tool_return.func_response, tool_return_truncation_chars) content.append( { "type": "tool_result", "tool_use_id": resolved_tool_call_id, - "content": func_response, + "content": tool_result_content, } ) if content: @@ -2003,7 +2163,7 @@ class Message(BaseMessage): elif self.role == "tool": # NOTE: Significantly different tool calling format, more similar to function calling format - # Handle tool returns - similar pattern to Anthropic + # Handle tool returns - Google supports images as sibling inlineData parts if self.tool_returns: parts = [] for tool_return in self.tool_returns: @@ -2013,26 +2173,24 @@ class Message(BaseMessage): # Use the function name if available, otherwise use tool_call_id function_name = self.name if self.name else tool_return.tool_call_id - # Truncate the tool return if needed - func_response = truncate_tool_return(tool_return.func_response, tool_return_truncation_chars) + text_content, image_parts = Message._tool_return_to_google_parts( + tool_return.func_response, tool_return_truncation_chars + ) - # NOTE: Google AI API wants the function response as JSON only, no string try: - function_response = parse_json(func_response) + function_response = parse_json(text_content) except: - function_response = {"function_response": func_response} + function_response = {"function_response": text_content} parts.append( { "functionResponse": { "name": function_name, - "response": { - "name": function_name, # NOTE: name twice... why? - "content": function_response, - }, + "response": {"name": function_name, "content": function_response}, } } ) + parts.extend(image_parts) google_ai_message = { "role": "function", @@ -2325,7 +2483,9 @@ class ToolReturn(BaseModel): status: Literal["success", "error"] = Field(..., description="The status of the tool call") stdout: Optional[List[str]] = Field(default=None, description="Captured stdout (e.g. prints, logs) from the tool invocation") stderr: Optional[List[str]] = Field(default=None, description="Captured stderr from the tool invocation") - func_response: Optional[str] = Field(None, description="The function response string") + func_response: Optional[Union[str, List[LettaToolReturnContentUnion]]] = Field( + None, description="The function response - either a string or list of content parts (text/image)" + ) class MessageSearchRequest(BaseModel): diff --git a/letta/server/rest_api/app.py b/letta/server/rest_api/app.py index d56fb4f2..e4ac3333 100644 --- a/letta/server/rest_api/app.py +++ b/letta/server/rest_api/app.py @@ -64,6 +64,7 @@ from letta.schemas.letta_message import create_letta_error_message_schema, creat from letta.schemas.letta_message_content import ( create_letta_assistant_message_content_union_schema, create_letta_message_content_union_schema, + create_letta_tool_return_content_union_schema, create_letta_user_message_content_union_schema, ) from letta.server.constants import REST_DEFAULT_PORT @@ -105,6 +106,7 @@ def generate_openapi_schema(app: FastAPI): letta_docs["components"]["schemas"]["LettaMessageUnion"] = create_letta_message_union_schema() letta_docs["components"]["schemas"]["LettaMessageContentUnion"] = create_letta_message_content_union_schema() letta_docs["components"]["schemas"]["LettaAssistantMessageContentUnion"] = create_letta_assistant_message_content_union_schema() + letta_docs["components"]["schemas"]["LettaToolReturnContentUnion"] = create_letta_tool_return_content_union_schema() letta_docs["components"]["schemas"]["LettaUserMessageContentUnion"] = create_letta_user_message_content_union_schema() letta_docs["components"]["schemas"]["LettaErrorMessage"] = create_letta_error_message_schema() diff --git a/letta/server/rest_api/utils.py b/letta/server/rest_api/utils.py index 25186f8e..66e15572 100644 --- a/letta/server/rest_api/utils.py +++ b/letta/server/rest_api/utils.py @@ -20,7 +20,7 @@ from letta.constants import ( ) from letta.errors import ContextWindowExceededError, RateLimitExceededError from letta.helpers.datetime_helpers import get_utc_time, get_utc_timestamp_ns, ns_to_ms -from letta.helpers.message_helper import convert_message_creates_to_messages +from letta.helpers.message_helper import convert_message_creates_to_messages, resolve_tool_return_images from letta.log import get_logger from letta.otel.context import get_ctx_attributes from letta.otel.metric_registry import MetricRegistry @@ -171,18 +171,26 @@ async def create_input_messages( return messages -def create_approval_response_message_from_input( +async def create_approval_response_message_from_input( agent_state: AgentState, input_message: ApprovalCreate, run_id: Optional[str] = None ) -> List[Message]: - def maybe_convert_tool_return_message(maybe_tool_return: LettaToolReturn): + async def maybe_convert_tool_return_message(maybe_tool_return: LettaToolReturn): if isinstance(maybe_tool_return, LettaToolReturn): - packaged_function_response = package_function_response( - maybe_tool_return.status == "success", maybe_tool_return.tool_return, agent_state.timezone - ) + tool_return_content = maybe_tool_return.tool_return + + # Handle tool_return content - can be string or list of content parts (text/image) + if isinstance(tool_return_content, str): + # String content - wrap with package_function_response as before + func_response = package_function_response(maybe_tool_return.status == "success", tool_return_content, agent_state.timezone) + else: + # List of content parts (text/image) - resolve URL images to base64 first + resolved_content = await resolve_tool_return_images(tool_return_content) + func_response = resolved_content + return ToolReturn( tool_call_id=maybe_tool_return.tool_call_id, status=maybe_tool_return.status, - func_response=packaged_function_response, + func_response=func_response, stdout=maybe_tool_return.stdout, stderr=maybe_tool_return.stderr, ) @@ -196,6 +204,11 @@ def create_approval_response_message_from_input( getattr(input_message, "approval_request_id", None), ) + # Process all tool returns concurrently (for async image resolution) + import asyncio + + converted_approvals = await asyncio.gather(*[maybe_convert_tool_return_message(approval) for approval in approvals_list]) + return [ Message( role=MessageRole.approval, @@ -204,7 +217,7 @@ def create_approval_response_message_from_input( approval_request_id=input_message.approval_request_id, approve=input_message.approve, denial_reason=input_message.reason, - approvals=[maybe_convert_tool_return_message(approval) for approval in approvals_list], + approvals=list(converted_approvals), run_id=run_id, group_id=input_message.group_id if input_message.group_id diff --git a/letta/services/run_manager.py b/letta/services/run_manager.py index be550734..4aedc99e 100644 --- a/letta/services/run_manager.py +++ b/letta/services/run_manager.py @@ -719,7 +719,7 @@ class RunManager: ) # Use the standard function to create properly formatted approval response messages - approval_response_messages = create_approval_response_message_from_input( + approval_response_messages = await create_approval_response_message_from_input( agent_state=agent_state, input_message=approval_input, run_id=run_id, diff --git a/tests/data/secret.png b/tests/data/secret.png new file mode 100644 index 0000000000000000000000000000000000000000..c75d088473e9e3d2f610827fb5a28134aff45b2c GIT binary patch literal 32581 zcmdRVby!s0+BcvG0uoY!q)1ChcZh;C(miyI4BagP0s<-}IUwEL4Z=w0Fm#R5T>}gQ z-}s#Kp67hucRl|9&UNkCd-lH9TK8J}UcWmhQbSGQ0Uk9T1_s6h#n&%2F);2)U|?Wk z;NC;u!3kg1!oa|5u$Pn5P?VEn&~SCKv43lgf$=&rF&RfAewG4qfXEXze8Hyq!s(qm z*6aJ(*czV*zdihd5khQ%hvjMIf(?5Yg_}ffLjI=m&G+yR7Di-#fKRVp8c^cvYxvOf zzF!PDSxWP=*-u|iKUi=ZZ97>^xy25)7)X5mvXUCZz@kZj??)U7_|uQhHZ0;`%=;Q4 z_mWxTE1x{+#;}OLIS}$E>X+ZG-_tJ(y1fCYX7GGE!U%psthPGnZGa2L_^~DtsZEH1 zXKq&gnwtuagFBh~Iuw`aqsyE~4rF^wljW;WCMj*d{LDiPiIl;_d`uZn`L&NUrqr4d zG%=NJUwO!wj6YO*>FNs6uL`~A`b=}b+wv@hJ*X)Dydjl+Ak`R@`i1qJ59A$`t@31T z9R-FQb#N)?el_mFqWbA>@BDP_?2{W(l3!fAe=T8lHU8PpxQ#fs<|iUHUwrsH*Paf( ze3P!Y?uL>}^mtj*!y>BlsBnm6Ky*C**(dCms?`GGnoXeMgyB@Ha`$4KS2iE{GPzgS zb~XwWAgZphyq@*d4MT-vMKtw%+&V8g5KoA4E>+7O7fM7tn+2S6|76=3n9K=H+~~~M z1~AyDLKAv9{owf5ck*2=5-uM$fnB*G*9CsohrS^rd>Lq$26U;jrLDX)k{=AwIePFoHq zhNYMH8l{gqeMcHGYFqoQjv$69tAc`px@zwW5SFt56@~>$d&rdz!yy2}SZY}7wHu2C z-0Q)e4>IIq!GydRES(Q$uoPDCBtm#_F&G(WUWVS~r9YGr`+=odiTezbQm3~EafJ4MHj=PdrZW4R~hhLhPgAy{UE3gpNFh-F7;2u~5Ug|>IHeI=MQ z3z(tF#N;=pputn%ZN!thE8MxU3OqDv#1DKxl#LPf{#(vRje0hg zs2Ds2=5LyBG^hBqpBEG8gcZLM%$a-B@W!MNbIMkLLiH8@SiPOPJFnQiVL6_cJaQGU z^sImT>67_bVt}hPaN?SuFYMd7a}3*KbT5ccbNr9LBziInO=H)!yb=* zq&mb<{xpdc7)cqa{^3CJ>6hpI83GYcpO!Hs#>al*j&D{3_fuN|`O?x~>VATKM#LNa zAnHG0CyNs8P5$!wv)1&RVs=WlQ8uu4A%`ai4SD;3Yob!(y+k7pC++*iHpS1Nm&KRG z*2NGVr$WL)lBpr+jDvBVwL_um`E*w;r%_{vQVMIqU^0ws*;ovgCZaKn&{YX zX_KPvBmt7BM<_Z}bW%HovT|0Pe2MN((zL>!{apJT)}F>>G*6QO6NXHU*3cJ{_w%7t^CvgmVp#+UPh zWP>_`K`X#d=9RN6rLrB2(6M<9yT;kW^i|Gqme5|}=L1xZA7z;&bIh2+#z8rRIV6_& z@JDbgxEQ>EFNS~4*1y54igNa#E@(Gvm3U2K zWo-9wWPI-PhW?f}Byx>xvv#4Vf2*t}rl!-U;N%Wv@!+#$D0dn6vhkvEUU^+P3X~7( z2hA_;^V<*Mt{bjfdwpyHBV7@b`(R09$)9u(iJQ~fv!PRo%}dw(UX`w)Hh#a>k8O>n zEp5%MR7?F!(tci-k1m}9iv2T@sTaoqh|BY;QhlmYxl-Z4^FYlG-;TJqMCY2G(b5Cb zKRRYQm}Hn3ShAk17-YGa8JelCn0-{~(71Q5n)4O0=g;2Tg+P$&_` z;FaM9(Am(kJOE~5kK(1&*-yvM+SK&cJnoSdVfEL#n8c0#5Jpn4Znp7qs3`s9$poXT zqLm++po8pGSK+E%$L861oaHl1vgc)0#Z`(`&?}fqx-l3HlZ8R?;ZP zo}RGB9x5T^#CagNoZOMJ%yalK0q;t)O;cG5knfY{sI;SwP~=gjC$0`7?{1*#qH5uj zvI)1{AEhDhjohw2H0U>A?WJB*aWSfw^#pkqe3#dJx@EXoBpt+D9Uvn zDAFB~xQna@w07*s{0ymgXxQEk^~Hg~It8OrZ{!psKy>HQ3)(=VmX=skJ=eQ5Z)kix z1THt*EVChoar|+xOkVM-woF670*2sEK?0$CVNXG30gp@q(b>7(S>vFA{i&Nt`2x;S zyG(>GiSE*_1p0yjJ(@3~1nfHSE`< zop<(A_wbovk4N&(Sx57=;qvO-xO2~*{G=)(Z8(Epe01r!$>XN}c2C($#H)NU zBcS2Lbf^sn5x?&xxgza-sd0L^my@o*6sITACBYt8d|tbovQ$&NTpC!i@JIS&)x)2$IG~xQkKvA)H|(V{8U~Pqh`l@BTGw=GZDc zoGsb#drUu*Cq9P4C-ZhJ3>^SQU+$Y)p5To11WQe#I3VJA&y$L;hHC?^FP)*F|o-q7H;trI2 zqOYpKAm`+2%^<|h%gy^l3Xg$-LBiF_Mojai{J)!{e@Q;Eb9Z+ZTbUvmp54|mBY zPks;dpU=PewDz|DdnAzCzo&(sAkXh7JkPm#dH&Nkx~atPyJ8yl-qvpoUfMgN*9<*| z6rZ4w#9#FP-;=*b{I8b!f46)tDEfaj{jW#=Z&MvNYgaiZNA#HPQh&d%e~X}twY@QS7je?bUd=&{_-rM6-Q|Zc4^D@C!^LZ|7+!0T-G8F^isTJh z*Rv0J@1ii7adA0pK6+I|ZK-&zr}J(+GdmBssna!@oNp7a5g**&uDGb2oSZzGx6jYS zJ1J|w+U^i}h=t1#jPVa&V;`{BnO4R>#L=b2{u2$lv1xEH4lC9_^zplM0bfikx|kc{ z>N|fJOr|r`4FmHovCKbwQMX}W#>ofa&i;Xhm|>+$^dF~#eh`D^RUt?E{q7&8gG+q4 zLi0~tpxFzN=aK2m8K-~Y`461_H3eL6XTpD8*e(StSZI{6NtTU(=Pb-u}M5wRSk zmt(CftQCsqU8oCr63xBc;A8sIuefK`?GIBc%#dl;DL80HO~cL$7d1IY7tOHfT!v$v zI_hvoy*x-)i&Q!v{i!`LB#g3(B9o%bZncQp!B^lzaKtxpW$wD#hNm-cU{f zvFlTpZ_Kg(fK(b^WK4RowS<>m~yN?Y{@dX-u{=p8|}=6bKJ?n3Xa$*^nXAPB?^X> zH+)q5f0A}NQZ!tS#p2F~{sBRl(QwF?@{#IKkePiS4ViWd@Av*R=PzhTOi<|~_=6CP zjon4VW-U|q`JdSCN5i5Li{-sPNWGoiZ|IyOG57lu+g4~8XO4e<=bzs)u7CiV=E2^F zO36R4tx=8EqF?&IV*cTMP^Y44!aZNk(EWjJHUqRK?YA2GN5Mxwc%X`=krRLNmgo;` zCp<%IQ!753e-tOQ#tmmd(`3_<)Z_et?f<=EGWxta_waF_D3!A0rCLKeL%xW^?{1G&@#liJn(#rb-!j*iqqFo0vMfd z@#4Rwqfible07~B7Lm*gf|E|}C&R+vG~`|b=oWQeR9ibaO0zDs?2j)UBXzx*6K9+( zC#IVkOM*L<7|h2IyANCDxhdsW_aa)5u6bLM>N$J{+R97z1RI6#%i`2?`Zo0%i@)M2 zO}(%FNML5K5k3-F97)>OR8nA~Gfd-u?MFUS;)CCCeazVyCw1Mq=&?bI-IC(P4fee< z0F{*0pHB)c0~_A?6#Kf()83xbOIyj-^Q(zB6)RpoV?Ok&8x)6Vwu~Cx-8NxJF991? z=4e6AUpbDp`cH#y5wr_)JEcZbo|3ladZv!Ad10=Grm@3-Qj_(0Hu8BVA`uRHV7OFxiS0rvKVSkO zh@ECL(pq3vPakyA)ppo@vY%1_c9ov5|Cv`jeLE}+K)#@-4?L7z+D(C<4NcX!wd~b< zS#mdLbw10?_K6NCbCw}REP5?B11mBqg0y*C2g_SpbaYLisMASx&~}0%Hn`WxbMBHJ z6eF0{?6ik~o&p1nOlKg7r2yC>D(E)K*8I`mJ1XQQdb_@Ob0t%wdb2hn>Se`eIWNSu zm@ew}1%bk$IqSi)a1-N5go6W;S`e8n(WVD)jyJTJ-3m9FY|7fpAtxSryC1 zHu|-iI*;dpXz?}3;+N_^9`z9J?XsF!#t1GlC!4HBkZX(hZKm`;X?EY+pYsx7@Bs@S z=0Vr#9KsYb1GoE?;upLJ_du&*6vYBk!0$#*ZCk!)A_A9%GXYndIYg*~52dNXy`{G} z_%m!XZUdiW`c$69O7}zPgLcKd#CY9~mAM~pOzT@dxHW)rb$nLc7}K9f$3y<8v!l%9)sqPjP;U%;GRx@t}#f<_y4q-10Dx87bM zVn$u5P$5pJD|62dG4ovN9K1~IO;B3XMi!R3rj@QI} zNOGG&h3i|U!LPzt@%hGksrvV)MBO%;>&p;a`u0mrDxEpOMbl0rG@h}@`3lUq=`uro z`;`L@t{N#Uy56^Xc@JXbcjsNDVJ)L1fkVucfS;iDt?=+T$XYPi7>#tmVD;VHkY8J$xJKR*z6j9>r@50c(Gwa z-W}BrH?4MIuh+0Nj-+}+%%D%y(H80L60O@5$$pHmDbGb=!WnRhiReIQ@8HW0D1DW} z$bH;Oa^h@K}n#eQ$cP4hSF#{wiQ^VY3(0p?}hD3+8_^w01Mj z&B}ku3h;;tLQWew8JHCsUHk|Oc)IfW6Yt;p@DrmM3ee8Znb^1O`a|}Y z;j=;6tfwKV_arC`tVq0iR2!~5pszY}ax=^jd}4vmJNdF|S)6 zq6OZp_$;mF*-Dkc9RhE%8WWv8|3Td{lZvGXq#~}jHZzo0nrY>YxAF0QM3@$IxrLZU z!z`|`2k9Nh=rf)qzzpZpBEX_ zs4gsVox99CcE~;S_x|}gVzG!7tX(;%QwBic?zcls%X=_sdXYBoHBKE4WK4cpl@J7+M)Y{$}s<1GzOUzgo?e7i-!pz8{8I&cD9z)!GST4y)Lc$ zZFTl6`AGiDFAefWD-aKFy!V>LagyhS;y#Cw9*z0Mlw}jUZEBu6~-5G~=tlmucgzp0PFVs+d9ujuAo1TmU|7*1Z?n6AYdq@o~&lXB7Zzk;51mF3Q0mA&ubj-bFv>{o^pmV)N#%#b}X^)WHPnUZ|KW# zxnjqzlW>TqqE9l?A-2s+)b)h3s2qnCwO?HUy<}RtI|H8YEH@hPdzC}g^oq-qz8{{9 zc%|uAzM4M`lN8ngN|uD@0=}Z|*U27V2@=#j>pAruF5VJ;d*QQR5wtj^KHX+yi`Lc$ zEHz{`asA&3{>$9t+7R} zD$rf3+nhPxu2uxe$2O078D50e@mG9NciF1q&6vZdmv)It3%a>9(f7J|+s~Zr+IB^= zUtdRfkJ8Bf`EAc@$CW@(hPqnExr5i>uyD9yJj`^O48lEq(>U!RgB0q&@Rp&ibgJwM zSKGS>feym$kHeuvFV#7z8c>8B8M;-fZtEj1+wO;bkIP5*yq$kZ-_91HR-n2@!(2@- z!!}^kKfn7Ow8O>K{m&AU@pYT#wo6(_n0OB1<>?|z^=DeJqbxO09TaxB$5FBuSaza< z{Cri;l`~TdNJE)GVl%FPI$*7Ih7#7terz~`m@-XfP|@4%yq$HBZaCFcX1xIi9#^%v zuHE$-T_n$-vMobqg%VXg^!97EP6n9nCA~r4Qd+st= zQTfiuIRra>-S4&yvCy6)tZ1=IML0FCHs=<- z6Vb}uFkf6aVkM|%SrV#5nGQIGrb;#r&uw+|uPYXu){zF46Qs5)(8JLtk8+Lv$U%Id zOo@rVsKn*>+qM1@8r@C>X6&H)ox?&VKh`~^g3-jL0X>*6}w@j&$^0}RDy;*7LBd-z}#EL~Sbk2Zy>+5?lSV;4P)Yl+a7LVLyu zOlMm_-1K$9h>1ybfZ>%#|BGQgiQ8+p8@e2~m$2eKYY&zdo5}qBL#8;-op~1xqct(l zRNalSSCk)J0R~9;uh(t9^OBaZ#p`bgTk+7$CMlYLab}S9VD2Sa^Ow?|aZ@Kg-HR3z zLrsEjepZk=?g-a(xbPpeL1NlYEeib1(%m!ZEr;XTA3XMzpCECR7{8Fatw_au zbmL$&T^9?gnPZF_{S``B`V}x(7#$pORVlLiz3b)%RpxQ%YnQAR2$)2qn0PJsAnxQD zvWpQVjOVI(j#lSuSEs(4I4`f8o+~d{9Mz!~t!1u2cBMIuxF!QU6}w#KK*)}kKnTIu zThM9YXKdO{2jf;r1IWdSTyta6bV7W287PB@W3xx;1hQ4jAAaR%IF)JAOp#z?1lHRo z5DDOhV8-oKwDgrPn}UY(ftO!~o5QEhQ5T$j6uZi;<&O1>BOc=YS#;DDPZ zzhN<2I;q{EcYDvQTh=_@uX;|e2eo+OG=}Z^0>0mH_9MmTM*zFaE3&MO$pGK+xVX_O z&4c7X%yWg|dmiGaybaw^oR_xm&s7n{;!YjV7In>8|nLRnuFffy43unJA=XiO~SUruE{21eQh^$K#Knr4(p>-5kXQT>-OBx^T3cxpBb$sapI}e3CQ!oMNUf znpuvE9xylhz>91_o+br3g~L**=}s8zA3HVrK}AU0#!>Xx|_7#TmjpA z&d02vilO&xq=MiwuUK~g@JV%P#kXsQJ>a>64=m@4YWAm_pZ8V71?BKj^Ej_JF#d>5rPZqe4D-A=6AGG$wQ2T(o6p*{~0}YXMe2jQf zwZd1~o>4j*dh(#xJLL`Sj{~|M-JW-#y$!yhhSz-0y9IoqtqSxWjefs=bq6a-s7fs4 z=+QSU2a<khy)O0d(drD79didIlEz=Zaye-z>S)&pS4!j!YTDH18{dt4B zI7-$V5K#t*oA*i2HiiLC*WJ0T^9sO$0}mw1gSr&9VYV}eB#M)y*z`A7EQ=~~Uv09i-yhUYj;uO% zmGFfF0)COhhLr3$^6oee3^g6D4Spf`^)6*E#zDwkGB*dgl%zFZ3!R>x>a{0VHTem( z(6XO8B4tJDv^k^<2gGe=l%A8Nc>#^&O;fE?zyZD7ZsmMr%C$KKoOyUZH#vCeo6xC2(jPcs93iP!zwg9h2Y|O+y0}RhA!O82&n_XyY z*v@(A3{X)b2OqL(|01n`MRp#orB>^DMU<4Zb|vl}d{SL2&b z#9U~$cI$YHisY|4GnE>ZzHLoZ@taCsSs3KYe^C}jBz4j0vEy3FQXqFaRBV zDZ9HFyPg> z^}Bt`yZ{|M@!jaF?53S^ag^2QjOf`%6s`^)mG?~y>dgsmoPJ+X{xYbg0~tUF&QnI_ zRb=ok-VY6Lr1e_p?|{1ItF{cFOB!HGLbr>1l|5>a<_f?so605DZ%hUT-}j5@(bcV_ zD4l`KVbOejiSDi4E?N)Dl1=@`ab(tpu5zx<*>N+szP{f%WGYEKUWU-S0gO&a=Rtbj zBL!&=n`Rqow&D@pzJMCd#JG8(qVS3QG)Egjsib3Yn6!&cDPJLrO6r>O$hI5Q%y(hB zz$gJRG zKS=k^7V@Co%jj*BwWgb59qLrYXPB=#G2gJ$lB#9BCiN08r7E?&c=Tir2=iy$tAvlB zV?Qu=Km7;wFhPM?YQFv_pNT+B})Wy*!pRG*@K9 z9w%s9mg%EU6DQ15+)ewa5jIKPNBzOZVNNuc2pIesZcE5b_=OHAU9>8h3?`jMbYGg13Tlb&ipWm z@|{Jq!hvm_jP?u*(Di!0E2rV!kK+v>3v^JtDx1*0!!euR{BS z0|3drFyBtf8Pl|hB@9nW>wa<#tU~JZk~;mJMYwTBXBHfMe#nIb3Rg*ZxUJ@XCI-e9 zs1gB0ty6E0D(LTRsgXdh-vyfOKyr#xN@|P0q(+PfFH(fnJ2R?ESj=XFVV@){g_0oamci*elebh_#H}d5L0B& zhz!UUTy;wm93=TtyjfE0|AS_NNYX$%dF|s}YvOdfyzxxG{n>Z3Wd4tAQ$&?g^A!-> ziYJQHh`6Brwb+{Vri2Mco{~r`!JQnNL|@MX-7$h-zU}Z?qE~q$Icf)RO<#jNKPZ90 zkN1}%9GBg*iWjr#HfL9mBQ+|2z;pyA;4J`Z#6Obndb7*KCmYS3*Vfh@0i`X_H z!Jv`8iF!8?8puo)wC2WJ5gsuT;_jF&qV*aK=5>~lB^8S>=!Z<}VrjCMTjynMh?GDp zg9Y!OpU*qBEJ+e^i9lj&AIAC_DJtZ-V{mdimOlK17;c5B7f3fR=@>n+q5vAx9O61U z0X)xOtz!_KH`@6 za0VTK0PHA)ytHO0DoS_fH`_w0EHn9D+5~nV>$ttH11C((i@Kh^0)?IlAA+r|Y0hhR z@)H;J64zoa;3xjl_;e9-(gUYbi%PeUqBf7yo0vsNVy$#G?ZqHhD9U%xj)#SOQcu2e z^0=Dsu*WBF6yelXkp`5tV5a4S>VB$7P4U_6!nyWFjkf_1G?%v6n^CjnR_0W9ToiA& z#kgR*c}qNU_wv?bLL+=Tz6knw&i*3(@^Sk@Wb34rOjvpn`tFwAkTLjE!t+v=m4wT( zp+gO<7NOB5k`2J8yItTBwkBGf7w{EX^qxnY=pl{Mq$F6>+xoZ~c3l(86$LvQ=J);X zY67COLrvrudy$WoW?}1o#?RG`B%kiwl)wXz@{*AoG4=cb;QRBLXQ_zN7$BZXZKoQ| z!jwmmVgI1PiM&H9*COkyX=-Um@Dq2ZH?UD|>nl$wrr=)`V$+E1f?UJ8u0 zgJDmx4~HIid}NHhM3V85#(*nx*p#1Ms7x#<_IsT(66L$ssEdqxb@ zHOx*1KydA=&eH#xF?acOph& z-zTNs@a>P?ui!Q@E7ym2ZR5<+`k zp$&fmn%KNS9=OSkUYBi^wE|fMFuy&7Z0n0z*@W+LoC)%fvPkK56CHZb7CZ7<{Irwa z<|_G|u;0M4)KF@s)mrN9onoEjqE%6DC(p)>S@fljiAzpnD?5VD**4uXiaz$1t>$JM zSfS1*xfZN|OZLlJ!+r_4E1`*(a(Gk?GjXvLGtw8Zl|V7xNBR9|XP^V8cDaM0rt0h7fT7JJJ%`Sm1f=!Css2*k zV`)-2{82s6&e;o~p;CYZJ2~=M>98AjdX8ARfoiqAxO>JkU{@%*l7OiRrw{hZL^23S zdiWuYQAhU67(!z+%Rsfp(F);}*O<~a$W4Es+z7#D==RD_^^fE1W4{4W0Y(GifvCxW z)B^%+wc||Iqf>K{1A}=e-w(NEBA6kmg5VyIrBtE9#hHN)Oc#6LaaB4K45`u0`ju1Y zQx-;P`rV5{Eemg=_Ch6~U0*$VAY*UGoo73q^>~UyxzVPYgQsE?J=C!d67Z1B(_i4YWM`U z$kgc(idpoZw`+wm4EJGksleh*Fylc){`wKJ$(WSW8up#bQc(Je)~)pRa~Xq7nl5?W z9c@F@+zvTpA|UoA7&px0!4P`Nt5iK|KQ`P{_;+3bwQ^{ zk#=+})?UfoNST#%7V+0^SYGhI z<#;XuuWz|VF%#CRR;fip3FbahHRx+uKE51Tf}aQq%qJzYCob(Dwtww>;q93ZtPQg+ zgzv}S1FtQsrSpym_mW9P13co4my}*{-Z8m_9_TcBiWvcfvWwVw73j*zB#3IDr=pbLoGl?_PxXl@e6O@*!^shg&H=Rh7FevtKGhzHKQaQKW0-&F#?a4PI+k zagQ?{N>wv*jbx5Di_TXUF358^x;m5M*+TS)03Yt;Kdu-}e@a#z3A7Er50|mmT@fm$ z_1E*^Zj>shT?TKUsfV88RY@mwWeeR@7E(Ur)}jGb{hQ?-_h6g-xWeN?;c%6>R+XtwqFOrnuXXQbCV{>7N}IPG+TnX+yI=ElH2DPwXf*6E8;eq- z)K6~Lt-H0o%D-dAwGw=6VxQm$DI8aS}JCY65=Ng z{Dq@Y**zQ%sN6er+Zv6_RglY%s%QP5lDGL+-fW}zxSwYEil!pcbvVa{p9P>p?9w6l9Q)$6R%|Pie`c%$}s~ra1~Wow~%!c&}qovOQ0q{ zCzOR>YFEqONxedD)Hi^K5K$iuG2R3^m)(Lf&6x|JW3<4gD73#CmHbEJ>qFFy)+yOGg*M> z=qvt~c-i-Q|0#?Kd4|>`1{};E2M24RPhRA!(tKqz9|vs&?XJbr>yK~9*-dkR2=kZ6 z6|P1C>r||b7u*N;0*_o+9Hy_ji9VwfP25;?M?$?_iKUK8aYlX_9;GLOvb;SU`BWxCs1DXBHGK<6DCzs(OK zgx=q!3FIOQj;_rcV-kyZKzTa})N-A2^F=mbLaE$nEf$0398dsVa^N(wSksh61+Ci1 z*qS%j2Ko3&JQkn}ikk!??YeQ>u;o2_T0YL+kkHfiup#>xBFwn9bI|@3orema{nBND z%Hs?@4xKbT{AUKQKi}c_T$Cy=R?*W2!Y&En8+Z7|mdF3{1?%ilQ+i!-WPJXW&>;L{ z(?$BIc(Ns=@$ze_$9H)5S=z`7=laqk9#(<~WRN$#Dg1n_r}5BGCEQxKlc^k21KMUb zk1mLjQ!BUA{KK0wfY+h+Y*k`ET8hQ+_;3Gtp74s{|YY z(V^a{BEKMf&-b=+`W5HOUHKv(=Dwm!Rm<4+2Gxu5@p}TpZ_G`7k9u>5QHKUgC3v#C zu>3MX^?IiXh`K~ss?qjroAMVb+Hz9WcrS3j@5o*=lCHeN+LyDpOGx@#s&ub0rQl+D z<@OwUq~-qR>5%IJaDYufVjV)&&9S;4VB82PxWix$c9(#L)R@8#RLt8e1GZqL2QKC* zsX{zv)al)I8Kxj_`c!)5Wul%9g9@L~Io>j?nr)gu6V4y;9x;T~fiV*(nJ}Npir%|8 z6DOMiRhV&;>f*DgjclCwSMHTrfwuVjoJA=e+!5M$FXQhA(iWnl5miVsoTn3YE63A3 zPPCJ~6ed$n_Bvq!YqqAow-X1Gm>&;)aGeVeqyzz!4kY<)3$I^p5e$IOPLk5_| zauR5#tU(cr!QAHvxFpp#$@}rY$}utH3^VU|P#uothoK9F%!pG-#^XLTo+zuPUCY!F z4nh&Kx|&;h)_okkdDC!u5g%Hgh%X@q8H%=L%J3H*ST2vDK9ha$2t(GzG(+1(r8z`Z za%`2*0pNA&><8rwp@d+_Uvc{~p(sA?7s!hvsb3Eug7*`eN*)d(mQYTJ6zlZ&WLN5u z{OoJzad4@J-4d&U`w1dE@ggdZJErv>#Cbm!C|z%xT5Y+)|DgP21G=I(3QoSSrH2{U zh;p;OR~UMDD|KTh3wX*{!!a@yrMv~%Gbli?CbU8?hz~X97vl3(PU4pwH%6LzBJylY zCJ~G|EK_|N9THq+p+0%;j){$KXT?`l_nGtI>kVtc5ZoOYe0jP?w-v)t-$SArJIDp|pfSF>&=B* zp4i1`iG2~?n0bpwZE7iNMy`#EBHQwzzLL_AX|b*7F`3L)4Iq6l*{o-k3p8>#kt}~y za8e2`90R{zSbpt11~e9LNXLw;B+SseB5c?&^_#z}&*AC(Hd>vkj(3s^=;1GzIxrG^ z;BALmr5Sf#2+CZFAZxFI<(|v3Bgr!3&}9T?b@!qTf);j{Hbgr}-QU&{Wea`C+%jM% zq=TsHhTFq!V^V$a!y2YO>WLM4)SxRaDs{U)sS3{zaaq#Vb~qyTG9yr)Chvc3qf>E5 zu8-vZQz3N`fUXWM{B=~e6AUrqjA_Y89Dv4&CH|Nu;zlv_Z@V!h@J`j1m4PD;_yY^y z)Bl>=8=I~Thg09%e*Y$eBwyWSPg zDMD|%Z@f3?dVS9faaw(QshF?MzY=nPf>iI&sY{>KKDG-;&edY$GDMppKNi$s1BKgj ziWRJ_dMj{|k_S1tLbXH(CU4xB{WH(RD(=uR!d2(_4lgVhC)FhlDh*t=hD26KaK4gV zY%lG7dM9;oV(w5YCIq~aWaKFtL_dyts+KJjg0Act zFhl(4xs61tJ(mZc8v&7x;oY@GTImk_x$#)dc)J{e@6nn2S0epN+$9-7*qkp|j_s#y zZhNLZgVLk=D~IsB?Wh<@3yb|^(`Qo%g~H52_%f#6M)9?d(EF6P?2JUQ5X(V^3~zR4 zmmBj{VXnWECRe9o&z8a*BH(Dl&2}T(Q-xjCuorU$T^$1}2ahDyGm||U!_mQ(IIb_5 zz{(+Uv9U0<9TRK=^rh4a8=VLy>+=hFWRSf~vp}6}`o{nb(8hMb-h-~W)*)ThoDrCw zp^tUYfrd1Z5*ncEX1U9ltZYi;&g#pI9JCeL3B`)X-d#n%ahT7yo^T(#TXL2!Wi`b* z6)V2CTT!)>4}h0fwv)Xm9!4};q_5dm@5|1`HckcTjGk8xuL>R*JeC^u)p33`{CUeK zJD25jDP3@oNbjs5VMV4j{lYL+^n}jqd@O1XJ2hV!wy%5V7lyP?b9jV&UtNNGW-V}X zAP8{Ae|^Qe5V6Czk`3gW`ue(}p!qDKJ*BqE08!*kZ#y!yFDG}>hhnqFp4CIw`U;;% z-A|n=Dtbw~d?B?aeQ~BPlEm?dTufUXKzVcu-LsV7`tQJy@n2r;xsLT*xJugCkg}?7 zU98||=Mcfftkd0^Q4ztwO7mR%Y&>Vr;+?TE&S8 zWGEW;6TCzAB~`Dr}EE<49wo)20=Le`Sr<{AI~N4{@n<=aNbZJr(rt&p!}e_~GP0b2#>4 zjIl0UCweE~W%-EYc$ zhRe^Qn+=>=-?+yMo8N4k%5}a%lAZIo>r;=mxYXF}8w1PL6!Wx*L<{A2{AxTm+Rdo?@b1m(4ZZCgq)T;i z-q-zLP3n3j)T&*CpRYnOwKX_`FFBzH zZD`B-yp)~5d$U!1W8oy;jfwVmneCe6YoddHr&sAS5JRKlkTvUH*dKbl$+0vXquui> z`HJGiv%m3#&hLqSBaAFjYL-vH?8p*~xXkohnM6oyjXH%NVPowly$xKCmZRgJRy55!09;{6X z@ItL357Hf40?e1=*4)wI(|sr1%~y)cBsF*o7eqPIjPb^ZB29;>X_l32Xhc4RV7ktr zfWMb2&J%$E4|K{n3aq#lJ6K{Aw6L5!(&I_-2|1qmim{jWiyoyv-p&BN)%$nK=CeM_ zvuhg`U+W{F`yYTCVMo3XcFH1MT4}=n{-aFb@?QxV{~A8Wo+onos;27-U(1_eW9tE5 zxhqbK`9dM;>2IaADG~7;@FQ+u;7jHmuc{FTs7mj{b4%T=8>DS>_O!d=YnLs3RB)@q zz*Dac+6H7h*?CpI{rQaL=+m6S1hjOpL5%X1*y{2gcJYInW+f2|{L?Dq<>lyt@h?); zYhOxS@+$+M+WVQJi!B;@V#S-(GFqM@3-_GZFo_(M(`U=*EgRe5-RWQ>g{ylvgGfwK3WFyEZDrU#Zt+%EfOb1bOW; zY3sKSUDz{ow22r3#BegNWN*o7-JzZq&2f7HfdhfXibXW_ZCqPWCL)CvlLa%K-wlR6`47#5mmsbRC!#T*jhXZ9BvxC+vD zYCA(2CEm0``K0Lua)uigCTwxTKl+)-EsD__SAKL}va^BSG`erCiC!b+TlF4>KP^*O{h3-Z6M>nUpCjB|XdrIRCae#n%w%_v zZ|8kZ3YMo5#8i^Te4M_2*IS(-T_Z)F=T?A_guO61wn*@=N}glwiN#AAwAm?ertxk3 z!s`4oaO5tNLoxBrdL^#6W&u)=%xrad45#S1TR&`9j+&%Ms9vfI9PaDveuf2i?O2i$ za5+Y1w+8&#a?q|;s0uth23LiT$=q!#;6DD)x+<$jPUaDhyWzqmaaeO@E4M@V$BWVS zArGf^GfwSvVc{$imIoh9M!ulmP$iK{Y&CFO9I_2l89oTz9d$1&f;X5QC=dwkv-j$H zj(sD1cFwXGCI)*neKrTjpSFF5`O?D<%_&LoM#r@{iMAsG@>QMmyIj2Rx!Y4NW3~34 z_xWEKqt+H;@z9McPV^j$;iznm5@DXo=F|2cG_;C`?&z`mL)SJ7&t-UTSI&mXH4(E< z9L;#wot2$8KU+B!GCVT3D!VaJA(i(u zz$$DVsS{T6!9~~owOLX?aOm3FyntiNS*?|nsrKP{`6ZhGa?a8wNP<&7)E$dGgHZp#b0ueHRw#FTt$(X>>Gu0-)g z%_qfJTJ97%js;mOl=nsOv4lovdTln8jz!%3SU(#DA9m;!Kd4ExH~(9+R`smZtTT#P z8GP*W(M!Wf2@Ap0_gP|ElXV)C+xC-YfvN+_LxtkqF^;M|-cz|ug{Sm`(gzA+?6kjy zq;@Xl*r+I_#F;QrUFnr-&tJgwyy0`!gA^NtWu7}GE?_X?;S$A1jw$Wz8VKnu?eeqt zteZ#Wmv0m8zt6o6$q}?Oio-sGqZ5af+FM@COo=^_fy=D5irOSH$DA(L}k zOtPvB5E?5Gg4&a5sMnRqX)5uOdIui5TBN*3>1}6hRqK#FJz*p##%Gy)IXmKEKe1=3 z-_U}^ZQ_1pnk+lHd+r5pky~(_fuy19_crK-KiXIJ*6@eY_M4ApOJb-Lj0jBu2@{y& zG~@TyPW9in*iT-3O75MT9`HYOi2l*;T>y)pK94x}X@rkXbFokRfM7{%lSlQ~pu#iD z7RK6ycpoUe8{nhNFZ{G7#`PYVta}|sm3Fj83Ehk+57#VBiXzjU%^# zA_sMX;&j4{+vUdNwbq z{z%8wWoa!DM4AGtys(wkPl^h*vw$7ZGtD-%C3G9QnMyUbRQ)NBA~k2|3l&$a9X@Np%bic^()OZ_f2yY*%`KN)`Gt(qNvYOz|^@B}G};2~ZY zMGi{0_^Y95oWQzU=83rNd{YP5Cksz$NIjY~e^4Z3lzIaW7XC^ed=7T#sewxpgCO!8 zd?_36dpp;zcTf4glD`sC2L5==ZGKTx&(6i@X~lP;ygq?}21ezq_ph?UvaFk6KXmH* zgs6vez-n2wr(*JY+?QVXJV9A^tj9qg-Ons>nBsNo&DDIVI->u1x>c(xdl4pD`m1nF za3}SBpWjnY&sw;`fNQBL(t%$9jgHiJHp0JW`RE7gUPYh8W#z@QxQ-02OJo^+E*zkn^HyP0MyEaA>ioH7HN1Uj zAXm?A$LbO;1Ea5!)>YcSoTG|@&*b<9rbP~Wu*&K&s%4+Kq|CC@`v_#)@l^jOLdxPg ze{oD$_wDbLqYs!NixIlkRy4ulSQ}yV7To4IKq1E{N1x0>pR#tciVJ&gcE?akqIm30 z0T^4eT1J0Oqg{nK)}=G;K1sKkW@IYoA8(prSn5dX@T0uII0Kf*W#lB zy|ix;qWN>qA!ju89+F~9C9HMj+H}5QWE^^yBb7CxCmdewEk%v3FX}LGYofQ* zlqER~?{_z4g(h8;O->6l4@UmXS`=6cV76O)@MAsoJ@28U1p{PO*WH>l6^pO)*!u%1 z*7t6>h!+^7rnzJd^GcbVg}o2WFn`mp;}t(07ee~>4M>{r&y4al2_V*_I+tvrOf3XY zXj=Pb8!uBhrB>rwUu_oKgq15xo@wc+sfEW#xR#+$D=kQ%Ac@jT+fXUdDaJ@i%LUub zmAT0jks0jT7gXJ9jc>_}TDYoc6uZ>p`meqVQ*Ze#wMlzeze_R>`y$L~WR3_TfablWG3i>RG`F*zE$?L4D6jf|e#TQ+Q%{v^Y@-M`q&-UlvyK9isAZ_^i z#lV$M-tab2I?pf4<(CV30LpYm)TNysga<#X8Fm307E2R+QZ z8)>BBTkJ0i&Sz7KahGu^G6=2@(E$#h#RQjC<(+PE8rPIjMZABt0aOS5EQ9GYg3YEg zbn>WXj2eJKQZLEyk8EumE}#$BG}%7hRxotmu51r3SB(_mOCPw^U75WE>yC+_iN5MF zT=0fGo(S3A9AWRb)zGK7k>C^Na&4sfcDn|}y(P%(bwP#R60|Oc9XQGF_zX4cIA)ew zfqk*SEwlBy@PopbMMoqk9jf7En$@Zy&vR4IByGWT^Jk=+k*=?Dv~){ZVtzx|^LyWJ zdQGhl%no`)qX+>ThJp+Iy2GN7@~|OD{i@~*+kP1&#pr@$aeINYQo>M-^-dq*EipaF zhW$+O$dG*`hFoRTSs3FnRPM+=3fD4%(%MX#Yuq8lLPpxoMIA3?wJGK`o6f6QNhFg$ z{;*I6usie!7Lr$51_^saAXMb%4J`)FtpveZf4WFl$s_0mlt z8x{vO)A>O2n!cK=SN`hY`|H(BKA;n;U18~Te`;sO&NhK%)I1iFuy!nY=p59OMYB;g z>daxvi{a9;HZ!wNNZ_y-cMqWXxepVl2Yo9TE;P{R-AvtY@fBd@nAmcc7l1-;e8<*% zRW8;X+v^KBZ95;Jx$1#^hv2}P(qsllB+yD}Cm>kZSciN4FZ}vHn&`VW0AU2WOx$zU zD+}6qtFP;&2P0EL=B7|kLcHlEps5Z=Vtjn)DYjPcOi4Nzn@W6o!CikxK7Qg$@B7={ zogOLrfGv&ytrI^Z^}f~U=_K-}>sBvb;W)*=wU}%_Q|r%?8G!Wo>)iHVTJr&=0xfB& z_B;`zX?9)%SsFHXyzqB0f(w7Fgxxwngj>|*4%Gp4UwAwN967^-cd^kS;jOV-UJE(o z^#LOnq~vO_pwl|cYzaGF0ID4|+9qm^eI}d0B~?#{4*7t^r`7ABka}`jwe>gCm=P|y zcd!0!z5ndt__k~5aWUL9|Lp1bC(rv&Vdw7y?aZ|W#j5{2j@$e2jdb9u14rv3?*H;k z#(97^TW{=?-%pS~t1Xju6ZUVJtwn!TU+#I8WC%d@JB=9EzKZ=Vzn|j?;0lX$wzhkF z=kt#N{X7|f`!e_za{$QwbnaVHz(spI zJI^0jF&{c~m1lQny)^0-XP{xshZAgVzxM8xT--b9n!5>wgjELbLz31*4e3L@}G~XdO@VlF?`Qf{bLwrU3&IjqfE>Bq>bbB3|FFG#t29smos09 zlDqAeuY`0!-sBgtzz=$L(v6Wf-xVVVfiB~IH`-OiW`}OJ$?{}Nlkw}2iHV7u7MPkf zv|hP5|NUH?_7TeCr!GtT9y0VsIlc;wAFo*_tMNR0GJiT->eBQL!l*$pW-b&eqX32Z)>j3 zbTB(K;c(C9=H@Oe>r~e>Wp~8hf$(~JaQVqr@0ePG{}aEzF(uAN`2dNT!(!=Uh3rn; zlFsO1pol@CP+h&fY7keZVsT24i#_{6!p>9Di7gfon@qkI+K*O!^JgigYwnurlA&k0 zC6=0>pYK}vPRmdBN)G4seGF(Is;x53rPgM5w_A;L1 z*4KVGs%$%Qfk-0JWa_2vzf_NNqz4b*Ev%(Z{;iL0mG%0yD3d0S*?Q>r^jr^8X>8qT zPz6lGhEt7ZFH-!@QD5?D4xK1TRbV$5j_J#8Vkys!TO-?FdwkZfh~oK2gI)Pk<``Hs zb`;cTZnn#thj=fgF^4xyZMSMn^6s;6RMgXC*6|J5 zx@9<&g%a=fYiNnKe+&&{+RAzPI(9)X*;I7Z~B4yLG~0pAjU+N@26bi@?6VNq!bEB)p1;1ChK zSwx97aLT$zHGV~uY|Q$K__`vZ;NWa~oCv@7^lNxp`Ir}Sbb{GvClac;UYMOn2NVX; zEcEBm4#4pIB<_4^L7?BBQkJFm+E$ZbA}thZ)0o=^p>`XoyM6NHNkygysBMAaXMVE9 zPet0`XWiG@6If8BIzk|Y#rmh%kUQ0&A`Gm`qANl77AIaSL$9l4wvjUq1FK zS3V-m+M6$=6*bV)dFF9+L&~A=b`w1f{5X%?ncs*Jb8h5r+q~vUXGI@)N$rbuL>u3CA)$~v7wcaz_xL4Uy zA0#JaV9S1ME=Bx;z(RchP4oef^q zc-Di|YuiK#>UjN=TS{}hdvK6xdltKMS9mgrwQ;FD_oP%EAt5c{D$eq?!ai&9oQe{} zNI@gchWC&(>R>)~$>V$XL<~|t^0c8xi)>@qbw29!L^4VIK8A?yaYHqu83l9hRij<4kemBr&P7+N9s8(Xht9HLALbeMBfIJ zr}di0K(c%~ryoh!S!q{1GoDMh#kwe0*M%xUqn({r#jq975x$byj@a`N36>PSRm6c84-5OJ0wQu`L4;E^jv@@=o$?#_xbSA7Zde zOSA^90rN)WZ*M1(kAW{ZC^J9|BW=gGdr3a00Y}4}RhOR5UbC{n*k~}e=_>Oxz34H@ z_cF7)zIQj(M4>E6h+0IbO?4j2fnDGLLV}*z_Dee;OZu%jUV}4(tglp z7lEgKBalYi@krG^SXC6<>Lk4UDI5VEpGm*tZDJNI(*C>FETMbP%8=DDJEfP@`W)K#8UUu1dB?TMj`AS zl-R@HnAr-tr3@K*Z;z_W?_v#L!PenE;u^2Lf93UUBs8a>w^J6FUG=9v;F_!pAo4mZ zYt^tA%tyOy@vSkMsjcxj70vuWfGd`cfO2xH8jw91{xqZSHR~?IU;IWm6-eapK8xCg zKXL^ADlkLC5ApGQ?G1hBR_?~xm*;DtlYh1)j&~i{RVpht{;765NTNK>$bDYRTJhtjG zt=Ln)3$D#wDe-W?j$H!~T9B$+-mA{J>zMfD<4#Io{0 zlaRLxx}#CtRxYp<4Nlp;7Lau6upgp1C-;PSkx9DeL0UZ0{N%)TfVJFtk z9~<^XaRZ}LMB^WoP6($VX4r#CN3FJq_z&D~T%VDNm}_%YSavD8a^8B8t!V@yKo_UM z*VP~gz#SLFaGw&yD*PJ8AI${1rBb_Ai&ci*klk5%#Zwk2yF4{Ry*Gxxr8zFnR%Wd+ zzMSR+yHkVz6y6hP;I7 zq0nZw(>X%6CV^I*SZ+t7WwgF52c?!I>V$hpsm!VL6|jm<(;k0t45Ty6(yEOLZ*Sbh zxM37u1dqyH2$PsZe&I6o&==;{S(z8im$>EZ;@Z!L&;gg{RtQo{*;6Y^ZvH`Dg~m@W zA?xPBP=oPXx}U}gj8vf7+SMi|xz~84QFUg*YghJIamw+2O*ZfNaD z?VzD15hNps2w!v2@WK@47t+?hjBX0yLedPkzT2%ino|@Is^Xm5IvK_Y|(gpcI z*;_9I1zgy~q^7}SPzQ1{-{)-uYls|TvgHk8TPjoNDOKH44hA9$!AMwRGWOzaD9AY-h}Lv~+t&*4+L3^|-zH zVaW@?i%+TqwJAZ@7!ws~TFjESm)nfuZ~>jud`tOT8yZ&8vOeq2Dkc<=5pY;2xarLX zqGyoRNBN?~lN^I;A)UPWkb>;-YRc-)8v2NAne?$SbGHDijC}uH}O-rvfTF79l_R_kyRTo z+`YqIHvs_PnV9j8G4c|7FKuHnzqq7l%THqjeu;5J1C?4xRBtI@ef^VGJZ7+Dt71y| zxB^n!ymy7#N1Y19INq#KYGqEyC*Ciz+ElVaSAd2p>q%a$m=OtqGilf5AY1-|{mb!U z*yCK`g1wW=K(>`=tGj(WHUdudlYeiv)dNHv5*v)*kEIT*+bD3R52lCB6;VUn*KG`$ zQCnU+tURkf`FyTy5kqCaa7&48Qp`8Og|A+6-I$ojNC}}^r5DXG#Kb7?K&!s@h021? zZg0e6<)wDM4D6&POC_NSO%P5IB~@1kDWO_LOrF)32D?(SjAcvOncYnzORAdtA%bP&H(=XU;C(}=OHTwHB+9dlJxRpVK(m+J#LeIB;Fi@kH9@e|$l zezJ=)CnY8sB(gmLZ}R1Y_$0E6I^WYp+qHjiV#8EZ77t&%WBcP>z$GqCZhE~0E3yy+ zkcce`pZG0)&GpCaZk+N-J)e_gA)bZq^H*USQymmj$~os1Y*5?o|@(y@5#YF z)r)nN9W=m7H8ocAx+~Xy7%&8)f9%c%oJbmK#qNC8Yut5VL?NFm&~*)PzQGA_&7ur? zW<=&ib(mRU-U$i{3LrdO6N{aifsnm6J`*tV`2!KXuOW3S@H;P!B-fr0J(d!M)uxe> zxX)eDFIjB{@cw-IFayubQ3|*&DSIpC&gCEzpN`UjbV_jT%F45~ZiU7k{z!k+d3?Xl zq-{t=yG~rk@|(n+oI00#wbC@oEBN%mwBx^~Za5yw*tN9R>D%$)MjEDmT*JAOJ`Im9 zD!!DH%9v)J>QP;$nRjo#{?cb<$}1cDVoG;UY&I_mnFNZ zb`5@-P30+?D_=#&W3XD zYQ^VGq>rZ&am1Y*#0po85EJEZj}qap^_ikIqNCqgNnX<*54{cacmPR}Sd)pf3>fBZ zaN3cZzC2f-q0DYrl<0T`y9&(j0RTS$nRR8KG>Wb8f4~hYo)|~<^_tMbU%k_3-Qz!Q zC;=IJceuCPSqp1L{M=YSIKlF%`B&3KH9XY8Ba;T@iM8-8&|P5zlBhuYfAjbxVMTawrI2W!_D@cc1Qj81cZfDKkAa(O`Q3 zd_qP>hBePC+~_hy`Z0|-k=XxIFLEqw(V}QYL?Qks#B53s*jP*{dzO9rJ)70EK}Lz~ zS8thRU`2EvU+i%Nz76UpfXpI&Y;Z7|+pI4Z6JN+f%~fVU*rR@bRBpVRznZ<49YZLz zKqdrW;K^+x9;-b-l#wDk%Mfm;2TKFmMAe)R@IMCDHwws!(iAe>&FHs3St#sUwOYd7 zA3WqoO=azec$W>0$Hl=^yL5>VZ?)7B_(xfF@?KM*Ot1h#%j}@XA zx1!sRbKvCMecs_{BVwQDcbT~)#a{#hNpqJgx)0AFQtyq&yYEO->~cl40vuYQyPUvR zL4A9+J!}4(4a#NMC7_hD5&5aRM9LR1jn)AiptRL50?IAtf0lsK-zNk3rM6URfYmxP zTKFfy(x<#9zx%D-i>JOGrEF?yG9ImRxl*UO3{aQH13{jaLHVUAyX2wnSAh6DrTO9X ze=>2uRqi)Kd5%Z-OjULD!ku*jAy6q+NP~AR=!z@^0+~ajGYMwTp54qbs;}Fw3u5$> zWyqS}YstSyRyxrh&ClKr@TJWL4gs>R5zRqw?1<($LJ(Aq$S&jCb&f{-#7_;8Jsi<* zd|T>2S>uL(!P<_6nYCkrY5|59i}a0P*jMg`RmG(2Y_3brU3Xa{%Cc_XJt>ypz0!J9 zKtSF74kgH+(Q6;0`Srt=k1q2<1a10rz8wYMn!9k}!VPbx?3}Cy*6fM6gT9mT$+saT ztSPPg02}d@)|4I(?R&Nl;qZIa)PYkAl~&_k;QtZ!NBU*1%eb%B!Us{Dl$;!waT(JE zR>1!v>_5W0v!~om1>^QoJbzm2hcl<#)g6Bb0RM-u{|H|J7glt!^8o8UZ>lPAIR`)oXa>!jiRiQNMY3c9$n_(c>8 zkyO3}_CvHsAA8BfZxN9E`E!8@0?$E(2d=u>A0$cceiU@VH=SMF0u9960^-Ho)TMAi zrlnR~%OgdaDxdEjurzohzmJZ790om{bXxlT^9`m$@`{e_D?eZ`-}T@F@bHJ<&|*jw zwXJelbr8wBeA4nwg9&jBjh?wyDC02yZ9LB)cP>_Re>nl}Ug-2Lr`DXyJISC09V)g! zZZoFG7k3cU|X*-Sd3JY0rT^83*E@f)t49w~@5|bV$C4tRAIAsRE zqC)4JLqSM~{Uaw!us2S$M$-@gOp|3IMh+o2ydo)_iFDP938NDj%} MfAmM*y(iEA4<>j=+yDRo literal 0 HcmV?d00001 diff --git a/tests/integration_test_multi_modal_tool_returns.py b/tests/integration_test_multi_modal_tool_returns.py new file mode 100644 index 00000000..831913e6 --- /dev/null +++ b/tests/integration_test_multi_modal_tool_returns.py @@ -0,0 +1,408 @@ +""" +Integration tests for multi-modal tool returns (images in tool responses). + +These tests verify that: +1. Models supporting images in tool returns can see and describe image content +2. Models NOT supporting images (e.g., Chat Completions API) receive placeholder text +3. The image data is properly passed through the approval flow + +The test uses a secret.png image containing hidden text that the model must identify. +""" + +import base64 +import os +import uuid + +import pytest +from letta_client import Letta +from letta_client.types.agents import ApprovalRequestMessage, AssistantMessage, ToolCallMessage + +# ------------------------------ +# Constants +# ------------------------------ + +# The secret text embedded in the test image +# This is the actual text visible in secret.png +SECRET_TEXT_IN_IMAGE = "FIREBRAWL" + +# Models that support images in tool returns (Responses API, Anthropic, or Google AI) +MODELS_WITH_IMAGE_SUPPORT = [ + "anthropic/claude-sonnet-4-5-20250929", + "openai/gpt-5", # Uses Responses API + "google_ai/gemini-2.5-flash", # Google AI with vision support +] + +# Models that do NOT support images in tool returns (Chat Completions only) +MODELS_WITHOUT_IMAGE_SUPPORT = [ + "openai/gpt-4o-mini", # Uses Chat Completions API, not Responses +] + + +def _load_secret_image() -> str: + """Loads the secret test image and returns it as base64.""" + image_path = os.path.join(os.path.dirname(__file__), "data/secret.png") + with open(image_path, "rb") as f: + return base64.standard_b64encode(f.read()).decode("utf-8") + + +SECRET_IMAGE_BASE64 = _load_secret_image() + + +def get_image_tool_schema(): + """Returns a client-side tool schema that returns an image.""" + return { + "name": "get_secret_image", + "description": "Retrieves a secret image with hidden text. Call this function to get the image.", + "parameters": { + "type": "object", + "properties": {}, + "required": [], + }, + } + + +# ------------------------------ +# Fixtures +# ------------------------------ + + +@pytest.fixture +def client(server_url: str) -> Letta: + """Create a Letta client.""" + return Letta(base_url=server_url) + + +# ------------------------------ +# Test Cases +# ------------------------------ + + +class TestMultiModalToolReturns: + """Test multi-modal (image) content in tool returns.""" + + @pytest.mark.parametrize("model", MODELS_WITH_IMAGE_SUPPORT) + def test_model_can_see_image_in_tool_return(self, client: Letta, model: str) -> None: + """ + Test that models supporting images can see and describe image content + returned from a tool. + + Flow: + 1. User asks agent to get the secret image and tell them what's in it + 2. Agent calls client-side tool, execution pauses + 3. Client provides tool return with image content + 4. Agent processes the image and describes what it sees + 5. Verify the agent mentions the secret text from the image + """ + # Create agent for this test + agent = client.agents.create( + name=f"multimodal_test_{uuid.uuid4().hex[:8]}", + model=model, + embedding="openai/text-embedding-3-small", + include_base_tools=False, + tool_ids=[], + include_base_tool_rules=False, + tool_rules=[], + ) + + try: + tool_schema = get_image_tool_schema() + print(f"\n=== Testing image support with model: {model} ===") + + # Step 1: User asks for the secret image + print("\nStep 1: Asking agent to call get_secret_image tool...") + response1 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "role": "user", + "content": "Call the get_secret_image function now.", + } + ], + client_tools=[tool_schema], + ) + + # Validate Step 1: Should pause with approval request + assert response1.stop_reason.stop_reason == "requires_approval", f"Expected requires_approval, got {response1.stop_reason}" + + # Find the approval request with tool call + approval_msg = None + for msg in response1.messages: + if isinstance(msg, ApprovalRequestMessage): + approval_msg = msg + break + + assert approval_msg is not None, f"Expected an ApprovalRequestMessage but got {[type(m).__name__ for m in response1.messages]}" + assert approval_msg.tool_call.name == "get_secret_image" + + print(f"Tool call ID: {approval_msg.tool_call.tool_call_id}") + + # Step 2: Provide tool return with image content + print("\nStep 2: Providing tool return with image...") + + # Build image content as list of content parts + image_content = [ + {"type": "text", "text": "Here is the secret image:"}, + { + "type": "image", + "source": { + "type": "base64", + "data": SECRET_IMAGE_BASE64, + "media_type": "image/png", + }, + }, + ] + + response2 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "type": "approval", + "approvals": [ + { + "type": "tool", + "tool_call_id": approval_msg.tool_call.tool_call_id, + "tool_return": image_content, + "status": "success", + }, + ], + }, + ], + ) + + # Validate Step 2: Agent should process the image and respond + print(f"Stop reason: {response2.stop_reason}") + print(f"Messages: {len(response2.messages)}") + + # Find the assistant message with the response + assistant_response = None + for msg in response2.messages: + if isinstance(msg, AssistantMessage): + assistant_response = msg.content + print(f"Assistant response: {assistant_response[:200]}...") + break + + assert assistant_response is not None, "Expected an AssistantMessage with the image description" + + # Verify the model saw the secret text in the image + # The model should mention the secret code if it can see the image + assert SECRET_TEXT_IN_IMAGE in assistant_response.upper() or SECRET_TEXT_IN_IMAGE.lower() in assistant_response.lower(), ( + f"Model should have seen the secret text '{SECRET_TEXT_IN_IMAGE}' in the image, but response was: {assistant_response}" + ) + + print("\nSUCCESS: Model correctly identified secret text in image!") + + finally: + # Cleanup + client.agents.delete(agent_id=agent.id) + + @pytest.mark.parametrize("model", MODELS_WITHOUT_IMAGE_SUPPORT) + def test_model_without_image_support_gets_placeholder(self, client: Letta, model: str) -> None: + """ + Test that models NOT supporting images receive placeholder text + and cannot see the actual image content. + + This verifies that Chat Completions API models (which don't support + images in tool results) get a graceful fallback. + + Flow: + 1. User asks agent to get the secret image + 2. Agent calls client-side tool, execution pauses + 3. Client provides tool return with image content + 4. Agent processes but CANNOT see the image (only placeholder text) + 5. Verify the agent does NOT mention the secret text + """ + # Create agent for this test + agent = client.agents.create( + name=f"no_image_test_{uuid.uuid4().hex[:8]}", + model=model, + embedding="openai/text-embedding-3-small", + include_base_tools=False, + tool_ids=[], + include_base_tool_rules=False, + tool_rules=[], + ) + + try: + tool_schema = get_image_tool_schema() + print(f"\n=== Testing placeholder for model without image support: {model} ===") + + # Step 1: User asks for the secret image + print("\nStep 1: Asking agent to call get_secret_image tool...") + response1 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "role": "user", + "content": "Call the get_secret_image function now.", + } + ], + client_tools=[tool_schema], + ) + + # Validate Step 1: Should pause with approval request + assert response1.stop_reason.stop_reason == "requires_approval", f"Expected requires_approval, got {response1.stop_reason}" + + # Find the approval request with tool call + approval_msg = None + for msg in response1.messages: + if isinstance(msg, ApprovalRequestMessage): + approval_msg = msg + break + + assert approval_msg is not None, f"Expected an ApprovalRequestMessage but got {[type(m).__name__ for m in response1.messages]}" + + # Step 2: Provide tool return with image content + print("\nStep 2: Providing tool return with image...") + + image_content = [ + {"type": "text", "text": "Here is the secret image:"}, + { + "type": "image", + "source": { + "type": "base64", + "data": SECRET_IMAGE_BASE64, + "media_type": "image/png", + }, + }, + ] + + response2 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "type": "approval", + "approvals": [ + { + "type": "tool", + "tool_call_id": approval_msg.tool_call.tool_call_id, + "tool_return": image_content, + "status": "success", + }, + ], + }, + ], + ) + + # Find the assistant message + assistant_response = None + for msg in response2.messages: + if isinstance(msg, AssistantMessage): + assistant_response = msg.content + print(f"Assistant response: {assistant_response[:200]}...") + break + + assert assistant_response is not None, "Expected an AssistantMessage" + + # Verify the model did NOT see the secret text (it got placeholder instead) + assert ( + SECRET_TEXT_IN_IMAGE not in assistant_response.upper() and SECRET_TEXT_IN_IMAGE.lower() not in assistant_response.lower() + ), ( + f"Model should NOT have seen the secret text '{SECRET_TEXT_IN_IMAGE}' (it doesn't support images), " + f"but response was: {assistant_response}" + ) + + # The model should mention something about image being omitted/not visible + response_lower = assistant_response.lower() + mentions_image_issue = any( + phrase in response_lower + for phrase in ["image", "omitted", "cannot see", "can't see", "unable to", "not able to", "no image"] + ) + + print("\nSUCCESS: Model correctly did not see the secret (image support not available)") + if mentions_image_issue: + print("Model acknowledged it cannot see the image content") + + finally: + # Cleanup + client.agents.delete(agent_id=agent.id) + + +class TestMultiModalToolReturnsSerialization: + """Test that multi-modal tool returns serialize/deserialize correctly.""" + + @pytest.mark.parametrize("model", MODELS_WITH_IMAGE_SUPPORT[:1]) # Just test one model + def test_tool_return_with_image_persists_in_db(self, client: Letta, model: str) -> None: + """ + Test that tool returns with images are correctly persisted and + can be retrieved from the database. + """ + agent = client.agents.create( + name=f"persist_test_{uuid.uuid4().hex[:8]}", + model=model, + embedding="openai/text-embedding-3-small", + include_base_tools=False, + tool_ids=[], + include_base_tool_rules=False, + tool_rules=[], + ) + + try: + tool_schema = get_image_tool_schema() + + # Trigger tool call + response1 = client.agents.messages.create( + agent_id=agent.id, + messages=[{"role": "user", "content": "Call the get_secret_image tool."}], + client_tools=[tool_schema], + ) + + assert response1.stop_reason.stop_reason == "requires_approval" + + approval_msg = None + for msg in response1.messages: + if isinstance(msg, ApprovalRequestMessage): + approval_msg = msg + break + + assert approval_msg is not None + + # Provide image tool return + image_content = [ + {"type": "text", "text": "Image result"}, + { + "type": "image", + "source": { + "type": "base64", + "data": SECRET_IMAGE_BASE64, + "media_type": "image/png", + }, + }, + ] + + response2 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "type": "approval", + "approvals": [ + { + "type": "tool", + "tool_call_id": approval_msg.tool_call.tool_call_id, + "tool_return": image_content, + "status": "success", + }, + ], + }, + ], + ) + + # Verify we got a response + assert response2.stop_reason is not None + + # Retrieve messages from DB and verify they persisted + messages_from_db = client.agents.messages.list(agent_id=agent.id) + + # Look for the tool return message in the persisted messages + found_tool_return = False + for msg in messages_from_db.items: + # Check if this is a tool return message that might contain our image + if hasattr(msg, "tool_returns") and msg.tool_returns: + found_tool_return = True + break + + # The tool return should have been saved + print(f"Found {len(messages_from_db.items)} messages in DB") + print(f"Tool return message found: {found_tool_return}") + + finally: + client.agents.delete(agent_id=agent.id)