* feat: add MiniMax provider support Add MiniMax as a new LLM provider using their Anthropic-compatible API. Key implementation details: - Uses standard messages API (not beta) - MiniMax supports thinking blocks natively - Base URL: https://api.minimax.io/anthropic - Models: MiniMax-M2.1, MiniMax-M2.1-lightning, MiniMax-M2 (all 200K context, 128K output) - Temperature clamped to valid range (0.0, 1.0] - All M2.x models treated as reasoning models (support interleaved thinking) Files added: - letta/schemas/providers/minimax.py - MiniMax provider schema - letta/llm_api/minimax_client.py - Client extending AnthropicClient - tests/test_minimax_client.py - Unit tests (13 tests) - tests/model_settings/minimax-m2.1.json - Integration test config 🐾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta <noreply@letta.com> * chore: regenerate API spec with MiniMax provider 🐾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta <noreply@letta.com> * chore: use MiniMax-M2.1-lightning for CI tests Switch to the faster/cheaper lightning model variant for integration tests. 🐾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta <noreply@letta.com> * chore: add MINIMAX_API_KEY to deploy-core command Co-authored-by: Sarah Wooders <sarahwooders@users.noreply.github.com> * chore: regenerate web openapi spec with MiniMax provider Co-authored-by: Sarah Wooders <sarahwooders@users.noreply.github.com> 🐾 Generated with [Letta Code](https://letta.com) --------- Co-authored-by: Letta <noreply@letta.com> Co-authored-by: letta-code <248085862+letta-code@users.noreply.github.com> Co-authored-by: Sarah Wooders <sarahwooders@users.noreply.github.com>
56 lines
1.6 KiB
Python
56 lines
1.6 KiB
Python
# Provider base classes and utilities
|
|
# Provider implementations
|
|
from .anthropic import AnthropicProvider
|
|
from .azure import AzureProvider
|
|
from .base import Provider, ProviderBase, ProviderCheck, ProviderCreate, ProviderUpdate
|
|
from .bedrock import BedrockProvider
|
|
from .cerebras import CerebrasProvider
|
|
from .chatgpt_oauth import ChatGPTOAuthProvider
|
|
from .deepseek import DeepSeekProvider
|
|
from .google_gemini import GoogleAIProvider
|
|
from .google_vertex import GoogleVertexProvider
|
|
from .groq import GroqProvider
|
|
from .letta import LettaProvider
|
|
from .lmstudio import LMStudioOpenAIProvider
|
|
from .minimax import MiniMaxProvider
|
|
from .mistral import MistralProvider
|
|
from .ollama import OllamaProvider
|
|
from .openai import OpenAIProvider
|
|
from .openrouter import OpenRouterProvider
|
|
from .sglang import SGLangProvider
|
|
from .together import TogetherProvider
|
|
from .vllm import VLLMProvider
|
|
from .xai import XAIProvider
|
|
from .zai import ZAIProvider
|
|
|
|
__all__ = [
|
|
# Base classes
|
|
"Provider",
|
|
"ProviderBase",
|
|
"ProviderCreate",
|
|
"ProviderUpdate",
|
|
"ProviderCheck",
|
|
# Provider implementations
|
|
"AnthropicProvider",
|
|
"AzureProvider",
|
|
"BedrockProvider",
|
|
"CerebrasProvider",
|
|
"ChatGPTOAuthProvider",
|
|
"DeepSeekProvider",
|
|
"GoogleAIProvider",
|
|
"GoogleVertexProvider",
|
|
"GroqProvider",
|
|
"LettaProvider",
|
|
"LMStudioOpenAIProvider",
|
|
"MiniMaxProvider",
|
|
"MistralProvider",
|
|
"OllamaProvider",
|
|
"OpenAIProvider",
|
|
"TogetherProvider",
|
|
"VLLMProvider", # Replaces ChatCompletions and Completions
|
|
"SGLangProvider",
|
|
"XAIProvider",
|
|
"ZAIProvider",
|
|
"OpenRouterProvider",
|
|
]
|