merge this (#4759)
* wait I forgot to comit locally * cp the entire core directory and then rm the .git subdir
This commit is contained in:
0
tests/mcp_tests/__init__.py
Normal file
0
tests/mcp_tests/__init__.py
Normal file
1
tests/mcp_tests/mcp_config.json
Normal file
1
tests/mcp_tests/mcp_config.json
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
377
tests/mcp_tests/test_mcp.py
Normal file
377
tests/mcp_tests/test_mcp.py
Normal file
@@ -0,0 +1,377 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
import venv
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
from letta_client import Letta, McpTool, ToolCallMessage, ToolReturnMessage
|
||||
|
||||
from letta.functions.mcp_client.types import SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig
|
||||
from letta.schemas.embedding_config import EmbeddingConfig
|
||||
from letta.schemas.letta_message_content import TextContent
|
||||
from letta.schemas.llm_config import LLMConfig
|
||||
from letta.schemas.message import MessageCreate
|
||||
from tests.utils import wait_for_server
|
||||
|
||||
|
||||
def create_virtualenv_and_install_requirements(requirements_path: Path, name="venv", force_recreate=True) -> Path:
|
||||
requirements_path = requirements_path.resolve()
|
||||
|
||||
if not requirements_path.exists():
|
||||
raise FileNotFoundError(f"Requirements file not found: {requirements_path}")
|
||||
if requirements_path.name != "requirements.txt":
|
||||
raise ValueError(f"Expected file named 'requirements.txt', got: {requirements_path.name}")
|
||||
|
||||
venv_dir = requirements_path.parent / name
|
||||
|
||||
# Always clean up existing venv if force_recreate is True (default)
|
||||
# This prevents corruption issues
|
||||
if venv_dir.exists() and force_recreate:
|
||||
try:
|
||||
shutil.rmtree(venv_dir)
|
||||
print(f"Cleaned up existing venv at {venv_dir}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to remove existing venv: {e}")
|
||||
# Continue anyway, might still work
|
||||
|
||||
# Create fresh venv
|
||||
if not venv_dir.exists():
|
||||
venv.EnvBuilder(with_pip=True, clear=True).create(venv_dir)
|
||||
|
||||
pip_path = venv_dir / ("Scripts" if os.name == "nt" else "bin") / "pip"
|
||||
|
||||
# Wait a moment for venv creation to complete
|
||||
for _ in range(10):
|
||||
if pip_path.exists():
|
||||
break
|
||||
time.sleep(0.1)
|
||||
|
||||
if not pip_path.exists():
|
||||
raise FileNotFoundError(f"pip executable not found at: {pip_path}")
|
||||
|
||||
try:
|
||||
# Upgrade pip first to avoid potential issues
|
||||
subprocess.check_call([str(pip_path), "install", "--upgrade", "pip"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
# Install requirements
|
||||
subprocess.check_call([str(pip_path), "install", "-r", str(requirements_path)])
|
||||
except subprocess.CalledProcessError as exc:
|
||||
# On failure, try to clean up and recreate once more
|
||||
if not force_recreate: # Avoid infinite recursion
|
||||
print("Initial pip install failed, attempting clean recreation...")
|
||||
return create_virtualenv_and_install_requirements(requirements_path, name, force_recreate=False)
|
||||
raise RuntimeError(f"pip install failed with exit code {exc.returncode}")
|
||||
|
||||
return venv_dir
|
||||
|
||||
|
||||
# --- Server Management --- #
|
||||
|
||||
|
||||
def _run_server():
|
||||
"""Starts the Letta server in a background thread."""
|
||||
load_dotenv()
|
||||
from letta.server.rest_api.app import start_server
|
||||
|
||||
start_server(debug=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def empty_mcp_config():
|
||||
path = Path(__file__).parent / "mcp_config.json"
|
||||
path.write_text(json.dumps({})) # writes "{}"
|
||||
|
||||
return path
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup_test_venvs():
|
||||
"""Fixture to clean up test virtual environments before and after tests."""
|
||||
venv_path = Path(__file__).parent / "weather" / "venv"
|
||||
|
||||
# Clean before test (in case of previous failure)
|
||||
if venv_path.exists():
|
||||
try:
|
||||
shutil.rmtree(venv_path)
|
||||
except Exception:
|
||||
pass # Ignore errors during cleanup
|
||||
|
||||
yield # Run the test
|
||||
|
||||
# Note: We don't clean after test to allow debugging if needed
|
||||
# The next test run will clean it anyway
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def server_url(empty_mcp_config):
|
||||
"""Ensures a server is running and returns its base URL."""
|
||||
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
|
||||
|
||||
if not os.getenv("LETTA_SERVER_URL"):
|
||||
thread = threading.Thread(target=_run_server, daemon=True)
|
||||
thread.start()
|
||||
wait_for_server(url)
|
||||
|
||||
return url
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def client(server_url):
|
||||
"""Creates a REST client for testing."""
|
||||
client = Letta(base_url=server_url)
|
||||
return client
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def agent_state(client):
|
||||
"""Creates an agent and ensures cleanup after tests."""
|
||||
agent_state = client.agents.create(
|
||||
name=f"test_compl_{str(uuid.uuid4())[5:]}",
|
||||
include_base_tools=True,
|
||||
memory_blocks=[
|
||||
{
|
||||
"label": "human",
|
||||
"value": "Name: Matt",
|
||||
},
|
||||
{
|
||||
"label": "persona",
|
||||
"value": "Friendly agent",
|
||||
},
|
||||
],
|
||||
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
|
||||
embedding_config=EmbeddingConfig.default_config(provider="openai"),
|
||||
)
|
||||
yield agent_state
|
||||
client.agents.delete(agent_state.id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_sse_mcp_server(client, agent_state):
|
||||
try:
|
||||
mcp_server_name = "deepwiki"
|
||||
server_url = "https://mcp.deepwiki.com/sse"
|
||||
sse_mcp_config = SSEServerConfig(server_name=mcp_server_name, server_url=server_url)
|
||||
client.tools.add_mcp_server(request=sse_mcp_config)
|
||||
|
||||
# Check that it's in the server mapping
|
||||
mcp_server_mapping = client.tools.list_mcp_servers()
|
||||
assert mcp_server_name in mcp_server_mapping
|
||||
|
||||
# Check tools
|
||||
tools = client.tools.list_mcp_tools_by_server(mcp_server_name=mcp_server_name)
|
||||
assert len(tools) > 0
|
||||
assert isinstance(tools[0], McpTool)
|
||||
|
||||
# Test with the ask_question tool which is one of the available deepwiki tools
|
||||
ask_question_tool = next((t for t in tools if t.name == "ask_question"), None)
|
||||
assert ask_question_tool is not None, f"ask_question tool not found. Available tools: {[t.name for t in tools]}"
|
||||
|
||||
# Check that the tool is executable
|
||||
letta_tool = client.tools.add_mcp_tool(mcp_server_name=mcp_server_name, mcp_tool_name=ask_question_tool.name)
|
||||
|
||||
tool_args = {"repoName": "facebook/react", "question": "What is React?"}
|
||||
|
||||
# Add to agent, have agent invoke tool
|
||||
client.agents.tools.attach(agent_id=agent_state.id, tool_id=letta_tool.id)
|
||||
response = client.agents.messages.create(
|
||||
agent_id=agent_state.id,
|
||||
messages=[
|
||||
MessageCreate(
|
||||
role="user",
|
||||
content=[TextContent(text=f"Use the `{letta_tool.name}` tool with these arguments: {tool_args}.")],
|
||||
)
|
||||
],
|
||||
)
|
||||
seq = response.messages
|
||||
calls = [m for m in seq if isinstance(m, ToolCallMessage)]
|
||||
assert calls, "Expected a ToolCallMessage"
|
||||
assert calls[0].tool_call.name == "ask_question"
|
||||
|
||||
returns = [m for m in seq if isinstance(m, ToolReturnMessage)]
|
||||
assert returns, "Expected a ToolReturnMessage"
|
||||
tr = returns[0]
|
||||
# status field
|
||||
assert tr.status == "success", f"Bad status: {tr.status}"
|
||||
# Check that we got some content back
|
||||
assert len(tr.tool_return.strip()) > 0, f"Expected non-empty tool return, got: {tr.tool_return}"
|
||||
finally:
|
||||
client.tools.delete_mcp_server(mcp_server_name=mcp_server_name)
|
||||
assert mcp_server_name not in client.tools.list_mcp_servers()
|
||||
|
||||
|
||||
def test_stdio_mcp_server(client, agent_state, server_url):
|
||||
req_file = Path(__file__).parent / "weather" / "requirements.txt"
|
||||
create_virtualenv_and_install_requirements(req_file, name="venv")
|
||||
|
||||
mcp_server_name = "weather"
|
||||
command = str(Path(__file__).parent / "weather" / "venv" / "bin" / "python3")
|
||||
args = [str(Path(__file__).parent / "weather" / "weather.py")]
|
||||
|
||||
stdio_config = StdioServerConfig(
|
||||
server_name=mcp_server_name,
|
||||
command=command,
|
||||
args=args,
|
||||
)
|
||||
|
||||
try:
|
||||
client.tools.add_mcp_server(request=stdio_config)
|
||||
|
||||
servers = client.tools.list_mcp_servers()
|
||||
assert mcp_server_name in servers
|
||||
|
||||
tools = client.tools.list_mcp_tools_by_server(mcp_server_name=mcp_server_name)
|
||||
assert tools, "Expected at least one tool from the weather MCP server"
|
||||
assert any(t.name == "get_alerts" for t in tools), f"Got: {[t.name for t in tools]}"
|
||||
|
||||
get_alerts = next(t for t in tools if t.name == "get_alerts")
|
||||
|
||||
letta_tool = client.tools.add_mcp_tool(
|
||||
mcp_server_name=mcp_server_name,
|
||||
mcp_tool_name=get_alerts.name,
|
||||
)
|
||||
|
||||
client.agents.tools.attach(agent_id=agent_state.id, tool_id=letta_tool.id)
|
||||
|
||||
response = client.agents.messages.create(
|
||||
agent_id=agent_state.id,
|
||||
messages=[
|
||||
MessageCreate(
|
||||
role="user",
|
||||
content=[TextContent(text=(f"Use the `{letta_tool.name}` tool with these arguments: {{'state': 'CA'}}."))],
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
calls = [m for m in response.messages if isinstance(m, ToolCallMessage) and m.tool_call.name == "get_alerts"]
|
||||
assert calls, "Expected a get_alerts ToolCallMessage"
|
||||
|
||||
returns = [m for m in response.messages if isinstance(m, ToolReturnMessage) and m.tool_call_id == calls[0].tool_call.tool_call_id]
|
||||
assert returns, "Expected a ToolReturnMessage for get_alerts"
|
||||
ret = returns[0]
|
||||
|
||||
assert ret.status == "success", f"Unexpected status: {ret.status}"
|
||||
# make sure there's at least some payload
|
||||
assert len(ret.tool_return.strip()) >= 10, f"Expected at least 10 characters in tool_return, got {len(ret.tool_return.strip())}"
|
||||
finally:
|
||||
client.tools.delete_mcp_server(mcp_server_name=mcp_server_name)
|
||||
assert mcp_server_name not in client.tools.list_mcp_servers()
|
||||
|
||||
|
||||
# Optional OpenAI validation test for MCP-normalized schema
|
||||
# Skips unless OPENAI_API_KEY is set to avoid network flakiness in CI
|
||||
EXAMPLE_BAD_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"conversation_type": {
|
||||
"type": "string",
|
||||
"const": "Group",
|
||||
"description": "Specifies the type of conversation to be created. Must be 'Group' for this action.",
|
||||
},
|
||||
"message": {
|
||||
"type": "object",
|
||||
"additionalProperties": {}, # invalid for OpenAI: missing "type"
|
||||
"description": "Initial message payload",
|
||||
},
|
||||
"participant_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Participant IDs",
|
||||
},
|
||||
},
|
||||
"required": ["conversation_type", "message", "participant_ids"],
|
||||
"additionalProperties": False,
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("OPENAI_API_KEY"),
|
||||
reason="Requires OPENAI_API_KEY to call OpenAI for schema validation",
|
||||
)
|
||||
def test_openai_rejects_untyped_additional_properties_and_accepts_normalized_schema():
|
||||
"""Test written to check if our extra schema validation works.
|
||||
|
||||
Some MCP servers will return faulty schemas that require correction, or they will brick the LLM client calls.
|
||||
"""
|
||||
import copy
|
||||
|
||||
try:
|
||||
from openai import OpenAI
|
||||
except Exception as e: # pragma: no cover
|
||||
pytest.skip(f"openai package not available: {e}")
|
||||
|
||||
client = OpenAI()
|
||||
|
||||
def run_request_with_schema(schema: dict):
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "TWITTER_CREATE_A_NEW_DM_CONVERSATION",
|
||||
"description": "Create a DM conversation",
|
||||
"parameters": schema,
|
||||
"strict": True,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
return client.chat.completions.create(
|
||||
model="gpt-4o-mini",
|
||||
messages=[{"role": "user", "content": "hello"}],
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
# Bad schema should raise
|
||||
with pytest.raises(Exception):
|
||||
run_request_with_schema(EXAMPLE_BAD_SCHEMA)
|
||||
|
||||
# Normalized should succeed
|
||||
normalized = copy.deepcopy(EXAMPLE_BAD_SCHEMA)
|
||||
normalized["properties"]["message"]["additionalProperties"] = False
|
||||
normalized["properties"]["message"]["properties"] = {"text": {"type": "string"}}
|
||||
normalized["properties"]["message"]["required"] = ["text"]
|
||||
resp = run_request_with_schema(normalized)
|
||||
assert getattr(resp, "id", None)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streamable_http_mcp_server_update_schema_no_docstring_required(client, agent_state, server_url):
|
||||
"""
|
||||
Repro for schema-derivation-on-update error with MCP tools.
|
||||
|
||||
Without the fix, calling add_mcp_tool a second time for the same MCP tool
|
||||
triggers a docstring-based schema derivation on a generated wrapper that has
|
||||
no docstring, causing a 500. With the fix in place, updates should succeed.
|
||||
"""
|
||||
mcp_server_name = f"deepwiki_http_{uuid.uuid4().hex[:6]}"
|
||||
mcp_url = "https://mcp.deepwiki.com/mcp"
|
||||
|
||||
http_mcp_config = StreamableHTTPServerConfig(server_name=mcp_server_name, server_url=mcp_url)
|
||||
try:
|
||||
client.tools.add_mcp_server(request=http_mcp_config)
|
||||
|
||||
# Ensure server is registered
|
||||
servers = client.tools.list_mcp_servers()
|
||||
assert mcp_server_name in servers
|
||||
|
||||
# Fetch available tools from server
|
||||
tools = client.tools.list_mcp_tools_by_server(mcp_server_name=mcp_server_name)
|
||||
assert tools, "Expected at least one tool from deepwiki streamable-http MCP server"
|
||||
ask_question_tool = next((t for t in tools if t.name == "ask_question"), None)
|
||||
assert ask_question_tool is not None, f"ask_question tool not found. Available: {[t.name for t in tools]}"
|
||||
|
||||
# Initial create
|
||||
letta_tool_1 = client.tools.add_mcp_tool(mcp_server_name=mcp_server_name, mcp_tool_name=ask_question_tool.name)
|
||||
assert letta_tool_1 is not None
|
||||
|
||||
# Update path (re-register same tool); should not attempt Python docstring schema derivation
|
||||
letta_tool_2 = client.tools.add_mcp_tool(mcp_server_name=mcp_server_name, mcp_tool_name=ask_question_tool.name)
|
||||
assert letta_tool_2 is not None
|
||||
finally:
|
||||
client.tools.delete_mcp_server(mcp_server_name=mcp_server_name)
|
||||
assert mcp_server_name not in client.tools.list_mcp_servers()
|
||||
459
tests/mcp_tests/test_mcp_schema_validation.py
Normal file
459
tests/mcp_tests/test_mcp_schema_validation.py
Normal file
@@ -0,0 +1,459 @@
|
||||
"""
|
||||
Test MCP tool schema validation integration.
|
||||
"""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from letta.functions.mcp_client.types import MCPTool, MCPToolHealth
|
||||
from letta.functions.schema_generator import generate_tool_schema_for_mcp
|
||||
from letta.functions.schema_validator import SchemaHealth, validate_complete_json_schema
|
||||
from letta.server.rest_api.dependencies import HeaderParams
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mcp_tools_get_health_status():
|
||||
"""Test that MCP tools receive health status when listed."""
|
||||
from letta.server.server import SyncServer
|
||||
|
||||
# Create mock tools with different schema types
|
||||
mock_tools = [
|
||||
# Strict compliant tool
|
||||
MCPTool(
|
||||
name="strict_tool",
|
||||
inputSchema={"type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"], "additionalProperties": False},
|
||||
),
|
||||
# Non-strict tool (free-form object)
|
||||
MCPTool(
|
||||
name="non_strict_tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {"message": {"type": "object", "additionalProperties": {}}}, # Free-form object
|
||||
"required": ["message"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
),
|
||||
# Invalid tool (missing type)
|
||||
MCPTool(name="invalid_tool", inputSchema={"properties": {"data": {"type": "string"}}, "required": ["data"]}),
|
||||
]
|
||||
|
||||
# Mock the server and client
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tools)
|
||||
|
||||
# Call the method directly
|
||||
actual_server = SyncServer.__new__(SyncServer)
|
||||
actual_server.mcp_clients = {"test_server": mock_client}
|
||||
|
||||
tools = await actual_server.get_tools_from_mcp_server("test_server")
|
||||
|
||||
# Verify health status was added
|
||||
assert len(tools) == 3
|
||||
|
||||
# Check strict tool
|
||||
strict_tool = tools[0]
|
||||
assert strict_tool.name == "strict_tool"
|
||||
assert strict_tool.health is not None
|
||||
assert strict_tool.health.status == SchemaHealth.STRICT_COMPLIANT.value
|
||||
assert strict_tool.health.reasons == []
|
||||
|
||||
# Check non-strict tool
|
||||
non_strict_tool = tools[1]
|
||||
assert non_strict_tool.name == "non_strict_tool"
|
||||
assert non_strict_tool.health is not None
|
||||
assert non_strict_tool.health.status == SchemaHealth.NON_STRICT_ONLY.value
|
||||
assert len(non_strict_tool.health.reasons) > 0
|
||||
assert any("additionalProperties" in reason for reason in non_strict_tool.health.reasons)
|
||||
|
||||
# Check invalid tool
|
||||
invalid_tool = tools[2]
|
||||
assert invalid_tool.name == "invalid_tool"
|
||||
assert invalid_tool.health is not None
|
||||
assert invalid_tool.health.status == SchemaHealth.INVALID.value
|
||||
assert len(invalid_tool.health.reasons) > 0
|
||||
assert any("type" in reason for reason in invalid_tool.health.reasons)
|
||||
|
||||
|
||||
def test_composio_like_schema_marked_non_strict():
|
||||
"""Test that Composio-like schemas are correctly marked as NON_STRICT_ONLY."""
|
||||
|
||||
# Example schema from Composio with free-form message object
|
||||
composio_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {"type": "object", "additionalProperties": {}, "description": "Message to send"} # Free-form, missing "type"
|
||||
},
|
||||
"required": ["message"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(composio_schema)
|
||||
|
||||
assert status == SchemaHealth.NON_STRICT_ONLY
|
||||
assert len(reasons) > 0
|
||||
assert any("additionalProperties" in reason for reason in reasons)
|
||||
|
||||
|
||||
def test_empty_object_in_required_marked_invalid():
|
||||
"""Test that required properties allowing empty objects are marked INVALID."""
|
||||
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"config": {"type": "object", "properties": {}, "required": [], "additionalProperties": False} # Empty object schema
|
||||
},
|
||||
"required": ["config"], # Required but allows empty object
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("empty object" in reason for reason in reasons)
|
||||
assert any("config" in reason for reason in reasons)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_mcp_tool_accepts_non_strict_schemas():
|
||||
"""Test that adding MCP tools with non-strict schemas is allowed."""
|
||||
from letta.server.rest_api.routers.v1.tools import add_mcp_tool
|
||||
from letta.settings import tool_settings
|
||||
|
||||
# Mock a non-strict tool
|
||||
non_strict_tool = MCPTool(
|
||||
name="test_tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {"message": {"type": "object"}}, # Missing additionalProperties: false
|
||||
"required": ["message"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
non_strict_tool.health = MCPToolHealth(status=SchemaHealth.NON_STRICT_ONLY.value, reasons=["Missing additionalProperties for message"])
|
||||
|
||||
# Mock server response
|
||||
with patch("letta.server.rest_api.routers.v1.tools.get_letta_server") as mock_get_server:
|
||||
with patch.object(tool_settings, "mcp_read_from_config", True): # Ensure we're using config path
|
||||
mock_server = AsyncMock()
|
||||
mock_server.get_tools_from_mcp_server = AsyncMock(return_value=[non_strict_tool])
|
||||
mock_server.user_manager.get_user_or_default = MagicMock()
|
||||
mock_server.tool_manager.create_mcp_tool_async = AsyncMock(return_value=non_strict_tool)
|
||||
mock_get_server.return_value = mock_server
|
||||
|
||||
# Should accept non-strict schema without raising an exception
|
||||
headers = HeaderParams(actor_id="test_user")
|
||||
result = await add_mcp_tool(mcp_server_name="test_server", mcp_tool_name="test_tool", server=mock_server, headers=headers)
|
||||
|
||||
# Verify the tool was added successfully
|
||||
assert result is not None
|
||||
|
||||
# Verify create_mcp_tool_async was called with the right parameters
|
||||
mock_server.tool_manager.create_mcp_tool_async.assert_called_once()
|
||||
call_args = mock_server.tool_manager.create_mcp_tool_async.call_args
|
||||
assert call_args.kwargs["mcp_server_name"] == "test_server"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_mcp_tool_rejects_invalid_schemas():
|
||||
"""Test that adding MCP tools with invalid schemas is rejected."""
|
||||
from fastapi import HTTPException
|
||||
|
||||
from letta.server.rest_api.routers.v1.tools import add_mcp_tool
|
||||
from letta.settings import tool_settings
|
||||
|
||||
# Mock an invalid tool
|
||||
invalid_tool = MCPTool(
|
||||
name="test_tool",
|
||||
inputSchema={
|
||||
"properties": {"data": {"type": "string"}},
|
||||
"required": ["data"],
|
||||
# Missing "type": "object"
|
||||
},
|
||||
)
|
||||
invalid_tool.health = MCPToolHealth(status=SchemaHealth.INVALID.value, reasons=["Missing 'type' at root level"])
|
||||
|
||||
# Mock server response
|
||||
with patch("letta.server.rest_api.routers.v1.tools.get_letta_server") as mock_get_server:
|
||||
with patch.object(tool_settings, "mcp_read_from_config", True): # Ensure we're using config path
|
||||
mock_server = AsyncMock()
|
||||
mock_server.get_tools_from_mcp_server = AsyncMock(return_value=[invalid_tool])
|
||||
mock_server.user_manager.get_user_or_default = MagicMock()
|
||||
mock_get_server.return_value = mock_server
|
||||
|
||||
# Should raise HTTPException for invalid schema
|
||||
headers = HeaderParams(actor_id="test_user")
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await add_mcp_tool(mcp_server_name="test_server", mcp_tool_name="test_tool", server=mock_server, headers=headers)
|
||||
|
||||
assert exc_info.value.status_code == 400
|
||||
assert "invalid schema" in exc_info.value.detail["message"].lower()
|
||||
assert exc_info.value.detail["health_status"] == SchemaHealth.INVALID.value
|
||||
|
||||
|
||||
def test_mcp_schema_healing_for_optional_fields():
|
||||
"""Test that optional fields in MCP schemas are healed only in strict mode."""
|
||||
# Create an MCP tool with optional field 'b'
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {"type": "integer", "description": "Required field"},
|
||||
"b": {"type": "integer", "description": "Optional field"},
|
||||
},
|
||||
"required": ["a"], # Only 'a' is required
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate schema without strict mode - should NOT heal optional fields
|
||||
non_strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=False)
|
||||
assert "a" in non_strict_schema["parameters"]["required"]
|
||||
assert "b" not in non_strict_schema["parameters"]["required"] # Should remain optional
|
||||
assert non_strict_schema["parameters"]["properties"]["b"]["type"] == "integer" # No null added
|
||||
|
||||
# Validate non-strict schema - should still be STRICT_COMPLIANT because validator is relaxed
|
||||
status, _ = validate_complete_json_schema(non_strict_schema["parameters"])
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
|
||||
# Generate schema with strict mode - should heal optional fields
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
assert strict_schema["strict"] is True
|
||||
assert "a" in strict_schema["parameters"]["required"]
|
||||
assert "b" in strict_schema["parameters"]["required"] # Now required
|
||||
assert set(strict_schema["parameters"]["properties"]["b"]["type"]) == {"integer", "null"} # Now accepts null
|
||||
|
||||
# Validate strict schema
|
||||
status, _ = validate_complete_json_schema(strict_schema["parameters"])
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT # Should pass strict mode
|
||||
|
||||
|
||||
def test_mcp_schema_healing_with_anyof():
|
||||
"""Test schema healing for fields with anyOf that include optional types."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {"type": "string", "description": "Required field"},
|
||||
"b": {
|
||||
"anyOf": [{"type": "integer"}, {"type": "null"}],
|
||||
"description": "Optional field with anyOf",
|
||||
},
|
||||
},
|
||||
"required": ["a"], # Only 'a' is required
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate strict schema
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
assert strict_schema["strict"] is True
|
||||
assert "a" in strict_schema["parameters"]["required"]
|
||||
assert "b" in strict_schema["parameters"]["required"] # Now required
|
||||
# Type should be flattened array with deduplication
|
||||
assert set(strict_schema["parameters"]["properties"]["b"]["type"]) == {"integer", "null"}
|
||||
|
||||
# Validate strict schema
|
||||
status, _ = validate_complete_json_schema(strict_schema["parameters"])
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
|
||||
|
||||
def test_mcp_schema_type_deduplication():
|
||||
"""Test that duplicate types are deduplicated in schema generation."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"field": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "string"}, # Duplicate
|
||||
{"type": "null"},
|
||||
],
|
||||
"description": "Field with duplicate types",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate strict schema
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
|
||||
# Check that duplicates were removed
|
||||
field_types = strict_schema["parameters"]["properties"]["field"]["type"]
|
||||
assert len(field_types) == len(set(field_types)) # No duplicates
|
||||
assert set(field_types) == {"string", "null"}
|
||||
|
||||
|
||||
def test_mcp_schema_healing_preserves_existing_null():
|
||||
"""Test that schema healing doesn't add duplicate null when it already exists."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"field": {
|
||||
"type": ["string", "null"], # Already has null
|
||||
"description": "Field that already accepts null",
|
||||
},
|
||||
},
|
||||
"required": [], # Optional
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate strict schema
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
|
||||
# Check that null wasn't duplicated
|
||||
field_types = strict_schema["parameters"]["properties"]["field"]["type"]
|
||||
null_count = field_types.count("null")
|
||||
assert null_count == 1 # Should only have one null
|
||||
|
||||
|
||||
def test_mcp_schema_healing_all_fields_already_required():
|
||||
"""Test that schema healing works correctly when all fields are already required."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {"type": "string", "description": "Field A"},
|
||||
"b": {"type": "integer", "description": "Field B"},
|
||||
},
|
||||
"required": ["a", "b"], # All fields already required
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate strict schema
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
|
||||
# Check that fields remain as-is
|
||||
assert set(strict_schema["parameters"]["required"]) == {"a", "b"}
|
||||
assert strict_schema["parameters"]["properties"]["a"]["type"] == "string"
|
||||
assert strict_schema["parameters"]["properties"]["b"]["type"] == "integer"
|
||||
|
||||
# Should be strict compliant
|
||||
status, _ = validate_complete_json_schema(strict_schema["parameters"])
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
|
||||
|
||||
def test_mcp_schema_with_uuid_format():
|
||||
"""Test handling of UUID format in anyOf schemas (root cause of duplicate string types)."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool with UUID formatted field",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"session_id": {
|
||||
"anyOf": [{"type": "string"}, {"format": "uuid", "type": "string"}, {"type": "null"}],
|
||||
"description": "Session ID that can be a string, UUID, or null",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate strict schema
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
|
||||
# Check that string type is not duplicated
|
||||
session_props = strict_schema["parameters"]["properties"]["session_id"]
|
||||
assert set(session_props["type"]) == {"string", "null"} # No duplicate strings
|
||||
# Format should NOT be preserved because field is optional (has null type)
|
||||
assert "format" not in session_props
|
||||
|
||||
# Should be in required array (healed)
|
||||
assert "session_id" in strict_schema["parameters"]["required"]
|
||||
|
||||
# Should be strict compliant
|
||||
status, _ = validate_complete_json_schema(strict_schema["parameters"])
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
|
||||
|
||||
def test_mcp_schema_healing_only_in_strict_mode():
|
||||
"""Test that schema healing only happens in strict mode."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="Test that healing only happens in strict mode",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"required_field": {"type": "string", "description": "Already required"},
|
||||
"optional_field1": {"type": "integer", "description": "Optional 1"},
|
||||
"optional_field2": {"type": "boolean", "description": "Optional 2"},
|
||||
},
|
||||
"required": ["required_field"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Test with strict=False - no healing
|
||||
non_strict = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=False)
|
||||
assert "strict" not in non_strict # strict flag not set
|
||||
assert non_strict["parameters"]["required"] == ["required_field"] # Only originally required field
|
||||
assert non_strict["parameters"]["properties"]["required_field"]["type"] == "string"
|
||||
assert non_strict["parameters"]["properties"]["optional_field1"]["type"] == "integer" # No null
|
||||
assert non_strict["parameters"]["properties"]["optional_field2"]["type"] == "boolean" # No null
|
||||
|
||||
# Test with strict=True - healing happens
|
||||
strict = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
assert strict["strict"] is True # strict flag is set
|
||||
assert set(strict["parameters"]["required"]) == {"required_field", "optional_field1", "optional_field2"}
|
||||
assert strict["parameters"]["properties"]["required_field"]["type"] == "string"
|
||||
assert set(strict["parameters"]["properties"]["optional_field1"]["type"]) == {"integer", "null"}
|
||||
assert set(strict["parameters"]["properties"]["optional_field2"]["type"]) == {"boolean", "null"}
|
||||
|
||||
# Both should be strict compliant (validator is relaxed)
|
||||
status1, _ = validate_complete_json_schema(non_strict["parameters"])
|
||||
status2, _ = validate_complete_json_schema(strict["parameters"])
|
||||
assert status1 == SchemaHealth.STRICT_COMPLIANT
|
||||
assert status2 == SchemaHealth.STRICT_COMPLIANT
|
||||
|
||||
|
||||
def test_mcp_schema_with_uuid_format_required_field():
|
||||
"""Test that UUID format is preserved for required fields that don't have null type."""
|
||||
mcp_tool = MCPTool(
|
||||
name="test_tool",
|
||||
description="A test tool with required UUID formatted field",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"session_id": {
|
||||
"anyOf": [{"type": "string"}, {"format": "uuid", "type": "string"}],
|
||||
"description": "Session ID that must be a string with UUID format",
|
||||
},
|
||||
},
|
||||
"required": ["session_id"], # Required field
|
||||
"additionalProperties": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Generate strict schema
|
||||
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
|
||||
|
||||
# Check that string type is not duplicated and format IS preserved
|
||||
session_props = strict_schema["parameters"]["properties"]["session_id"]
|
||||
assert session_props["type"] == ["string"] # No null, no duplicates
|
||||
assert "format" in session_props
|
||||
assert session_props["format"] == "uuid" # Format should be preserved for non-optional field
|
||||
|
||||
# Should be in required array
|
||||
assert "session_id" in strict_schema["parameters"]["required"]
|
||||
|
||||
# Should be strict compliant
|
||||
status, _ = validate_complete_json_schema(strict_schema["parameters"])
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
345
tests/mcp_tests/test_schema_validator.py
Normal file
345
tests/mcp_tests/test_schema_validator.py
Normal file
@@ -0,0 +1,345 @@
|
||||
"""
|
||||
Unit tests for the JSON Schema validator for OpenAI strict mode compliance.
|
||||
"""
|
||||
|
||||
from letta.functions.schema_validator import SchemaHealth, validate_complete_json_schema
|
||||
|
||||
|
||||
class TestSchemaValidator:
|
||||
"""Test cases for the schema validator."""
|
||||
|
||||
def test_valid_strict_compliant_schema(self):
|
||||
"""Test a fully strict-compliant schema."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "The name of the user"},
|
||||
"age": {"type": "integer", "description": "The age of the user"},
|
||||
"address": {
|
||||
"type": "object",
|
||||
"properties": {"street": {"type": "string"}, "city": {"type": "string"}},
|
||||
"required": ["street", "city"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
"required": ["name", "age", "address"], # All properties must be required for strict mode
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_free_form_object_non_strict(self):
|
||||
"""Test that free-form objects (like Composio message) are marked as NON_STRICT_ONLY."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "object",
|
||||
"description": "A message object",
|
||||
# Missing additionalProperties: false makes this free-form
|
||||
}
|
||||
},
|
||||
"required": ["message"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.NON_STRICT_ONLY
|
||||
assert any("additionalProperties" in reason for reason in reasons)
|
||||
|
||||
def test_empty_object_in_required_invalid(self):
|
||||
"""Test that required properties allowing empty objects are marked INVALID."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"config": {"type": "object", "properties": {}, "required": [], "additionalProperties": False} # Empty object schema
|
||||
},
|
||||
"required": ["config"], # Required but allows empty object
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("empty object" in reason for reason in reasons)
|
||||
|
||||
def test_missing_type_invalid(self):
|
||||
"""Test that schemas missing type are marked INVALID."""
|
||||
schema = {
|
||||
# Missing "type": "object"
|
||||
"properties": {"name": {"type": "string"}},
|
||||
"required": ["name"],
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("type" in reason.lower() for reason in reasons)
|
||||
|
||||
def test_missing_items_in_array_invalid(self):
|
||||
"""Test that arrays without items definition are marked INVALID."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tags": {
|
||||
"type": "array"
|
||||
# Missing "items" definition
|
||||
}
|
||||
},
|
||||
"required": ["tags"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("items" in reason for reason in reasons)
|
||||
|
||||
def test_required_property_not_in_properties_invalid(self):
|
||||
"""Test that required properties not defined in properties are marked INVALID."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {"name": {"type": "string"}},
|
||||
"required": ["name", "email"], # "email" not in properties
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("email" in reason and "not found" in reason for reason in reasons)
|
||||
|
||||
def test_nested_object_validation(self):
|
||||
"""Test that nested objects are properly validated."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"profile": {
|
||||
"type": "object",
|
||||
"properties": {"bio": {"type": "string"}},
|
||||
# Missing additionalProperties and required
|
||||
}
|
||||
},
|
||||
"required": ["profile"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
},
|
||||
"required": ["user"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.NON_STRICT_ONLY
|
||||
# Should have warnings about nested profile object
|
||||
assert any("profile" in reason.lower() or "properties.profile" in reason for reason in reasons)
|
||||
|
||||
def test_union_types_with_anyof(self):
|
||||
"""Test schemas with anyOf union types."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {"value": {"anyOf": [{"type": "string"}, {"type": "number"}]}},
|
||||
"required": ["value"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_array_with_proper_items(self):
|
||||
"""Test arrays with properly defined items."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {"id": {"type": "string"}, "value": {"type": "number"}},
|
||||
"required": ["id", "value"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
}
|
||||
},
|
||||
"required": ["items"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_empty_array_in_required_invalid(self):
|
||||
"""Test that required properties allowing empty arrays are marked INVALID."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
# No minItems constraint, allows empty array
|
||||
}
|
||||
},
|
||||
"required": ["tags"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
# This should actually be STRICT_COMPLIANT since empty arrays with defined items are OK
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
|
||||
def test_array_without_constraints_invalid(self):
|
||||
"""Test that arrays without any constraints in required props are invalid."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data": {
|
||||
"type": "array"
|
||||
# No items defined at all - completely unconstrained
|
||||
}
|
||||
},
|
||||
"required": ["data"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("items" in reason for reason in reasons)
|
||||
|
||||
def test_composio_like_schema(self):
|
||||
"""Test a schema similar to Composio's free-form message structure."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "object",
|
||||
"description": "Message to send",
|
||||
# No properties defined, no additionalProperties: false
|
||||
# This is a free-form object
|
||||
}
|
||||
},
|
||||
"required": ["message"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.NON_STRICT_ONLY
|
||||
assert any("additionalProperties" in reason for reason in reasons)
|
||||
|
||||
def test_non_dict_schema(self):
|
||||
"""Test that non-dict schemas are marked INVALID."""
|
||||
schema = "not a dict"
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.INVALID
|
||||
assert any("dict" in reason for reason in reasons)
|
||||
|
||||
def test_schema_with_defaults_non_strict(self):
|
||||
"""Test that root-level schemas without required field are STRICT_COMPLIANT (validator is relaxed)."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {"name": {"type": "string"}, "optional": {"type": "string"}},
|
||||
# Missing "required" field at root level - validator now accepts this
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
# Validator is relaxed - schemas with optional fields are now STRICT_COMPLIANT
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_composio_schema_with_optional_root_properties_non_strict(self):
|
||||
"""Test that Composio-like schemas with optional root properties are STRICT_COMPLIANT (validator is relaxed)."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"thinking": {"type": "string", "description": "Deep inner monologue"},
|
||||
"connected_account_id": {"type": "string", "description": "Specific connected account ID"},
|
||||
"toolkit": {"type": "string", "description": "Name of the toolkit"},
|
||||
"request_heartbeat": {"type": "boolean", "description": "Request immediate heartbeat"},
|
||||
},
|
||||
"required": ["thinking", "request_heartbeat"], # Not all properties are required
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_root_level_without_required_non_strict(self):
|
||||
"""Test that root-level objects without 'required' field are STRICT_COMPLIANT (validator is relaxed)."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"age": {"type": "integer"},
|
||||
},
|
||||
# No "required" field at root level - validator now accepts this
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
# Validator is relaxed - accepts schemas without required field
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_nested_object_without_required_non_strict(self):
|
||||
"""Test that nested objects without 'required' are STRICT_COMPLIANT (validator is relaxed)."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"preferences": {
|
||||
"type": "object",
|
||||
"properties": {"theme": {"type": "string"}, "language": {"type": "string"}},
|
||||
# Missing "required" field in nested object
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"name": {"type": "string"},
|
||||
},
|
||||
"required": ["name"], # Don't require preferences so it's not marked INVALID
|
||||
"additionalProperties": False,
|
||||
}
|
||||
},
|
||||
"required": ["user"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_user_example_schema_non_strict(self):
|
||||
"""Test the user's example schema with optional properties - now STRICT_COMPLIANT (validator is relaxed)."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {"title": "A", "type": "integer"},
|
||||
"b": {"anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, "title": "B"},
|
||||
},
|
||||
"required": ["a"], # Only 'a' is required, 'b' is not
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
|
||||
def test_all_properties_required_strict_compliant(self):
|
||||
"""Test that schemas with all properties required are STRICT_COMPLIANT."""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {"title": "A", "type": "integer"},
|
||||
"b": {"anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, "title": "B"},
|
||||
},
|
||||
"required": ["a", "b"], # All properties are required
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
status, reasons = validate_complete_json_schema(schema)
|
||||
assert status == SchemaHealth.STRICT_COMPLIANT
|
||||
assert reasons == []
|
||||
27
tests/mcp_tests/weather/requirements.txt
Normal file
27
tests/mcp_tests/weather/requirements.txt
Normal file
@@ -0,0 +1,27 @@
|
||||
annotated-types==0.7.0
|
||||
anyio==4.9.0
|
||||
certifi==2025.4.26
|
||||
click==8.1.8
|
||||
h11==0.16.0
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
httpx-sse==0.4.0
|
||||
idna==3.10
|
||||
markdown-it-py==3.0.0
|
||||
mcp==1.7.1
|
||||
mdurl==0.1.2
|
||||
pydantic==2.11.4
|
||||
pydantic-settings==2.9.1
|
||||
pydantic_core==2.33.2
|
||||
Pygments==2.19.1
|
||||
python-dotenv==1.1.0
|
||||
python-multipart==0.0.20
|
||||
rich==14.0.0
|
||||
shellingham==1.5.4
|
||||
sniffio==1.3.1
|
||||
sse-starlette==2.3.3
|
||||
starlette==0.46.2
|
||||
typer==0.15.3
|
||||
typing-inspection==0.4.0
|
||||
typing_extensions==4.13.2
|
||||
uvicorn==0.34.2
|
||||
97
tests/mcp_tests/weather/weather.py
Normal file
97
tests/mcp_tests/weather/weather.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
# Initialize FastMCP server
|
||||
mcp = FastMCP("weather")
|
||||
|
||||
# Constants
|
||||
NWS_API_BASE = "https://api.weather.gov"
|
||||
USER_AGENT = "weather-app/1.0"
|
||||
|
||||
|
||||
async def make_nws_request(url: str) -> dict[str, Any] | None:
|
||||
"""Make a request to the NWS API with proper error handling."""
|
||||
headers = {"User-Agent": USER_AGENT, "Accept": "application/geo+json"}
|
||||
async with httpx.AsyncClient() as client:
|
||||
try:
|
||||
response = await client.get(url, headers=headers, timeout=30.0)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def format_alert(feature: dict) -> str:
|
||||
"""Format an alert feature into a readable string."""
|
||||
props = feature["properties"]
|
||||
return f"""
|
||||
Event: {props.get("event", "Unknown")}
|
||||
Area: {props.get("areaDesc", "Unknown")}
|
||||
Severity: {props.get("severity", "Unknown")}
|
||||
Description: {props.get("description", "No description available")}
|
||||
Instructions: {props.get("instruction", "No specific instructions provided")}
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_alerts(state: str) -> str:
|
||||
"""Get weather alerts for a US state.
|
||||
|
||||
Args:
|
||||
state: Two-letter US state code (e.g. CA, NY)
|
||||
"""
|
||||
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
|
||||
data = await make_nws_request(url)
|
||||
|
||||
if not data or "features" not in data:
|
||||
return "Unable to fetch alerts or no alerts found."
|
||||
|
||||
if not data["features"]:
|
||||
return "No active alerts for this state."
|
||||
|
||||
alerts = [format_alert(feature) for feature in data["features"]]
|
||||
return "\n---\n".join(alerts)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_forecast(latitude: float, longitude: float) -> str:
|
||||
"""Get weather forecast for a location.
|
||||
|
||||
Args:
|
||||
latitude: Latitude of the location
|
||||
longitude: Longitude of the location
|
||||
"""
|
||||
# First get the forecast grid endpoint
|
||||
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
|
||||
points_data = await make_nws_request(points_url)
|
||||
|
||||
if not points_data:
|
||||
return "Unable to fetch forecast data for this location."
|
||||
|
||||
# Get the forecast URL from the points response
|
||||
forecast_url = points_data["properties"]["forecast"]
|
||||
forecast_data = await make_nws_request(forecast_url)
|
||||
|
||||
if not forecast_data:
|
||||
return "Unable to fetch detailed forecast."
|
||||
|
||||
# Format the periods into a readable forecast
|
||||
periods = forecast_data["properties"]["periods"]
|
||||
forecasts = []
|
||||
for period in periods[:5]: # Only show next 5 periods
|
||||
forecast = f"""
|
||||
{period["name"]}:
|
||||
Temperature: {period["temperature"]}°{period["temperatureUnit"]}
|
||||
Wind: {period["windSpeed"]} {period["windDirection"]}
|
||||
Forecast: {period["detailedForecast"]}
|
||||
"""
|
||||
forecasts.append(forecast)
|
||||
|
||||
return "\n---\n".join(forecasts)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Initialize and run the server
|
||||
mcp.run(transport="stdio")
|
||||
Reference in New Issue
Block a user