fix(core): add check_api_key method to MiniMaxProvider (#9112)
The MiniMaxProvider class was missing a check_api_key() implementation, causing /v1/providers/check to return a 500 error when validating MiniMax API keys. The base Provider class raises NotImplementedError. This adds check_api_key() using the Anthropic client (since MiniMax uses an Anthropic-compatible API), following the same pattern as AnthropicProvider. 👾 Generated with [Letta Code](https://letta.com) Co-authored-by: Letta <noreply@letta.com>
This commit is contained in:
committed by
Caren Thomas
parent
8f0ac630ab
commit
e0d9238bb6
@@ -1,7 +1,9 @@
|
||||
from typing import Literal
|
||||
|
||||
import anthropic
|
||||
from pydantic import Field
|
||||
|
||||
from letta.errors import ErrorCode, LLMAuthenticationError, LLMError
|
||||
from letta.log import get_logger
|
||||
from letta.schemas.enums import ProviderCategory, ProviderType
|
||||
from letta.schemas.llm_config import LLMConfig
|
||||
@@ -48,6 +50,22 @@ class MiniMaxProvider(Provider):
|
||||
api_key: str | None = Field(None, description="API key for the MiniMax API.", deprecated=True)
|
||||
base_url: str = Field("https://api.minimax.io/anthropic", description="Base URL for the MiniMax Anthropic-compatible API.")
|
||||
|
||||
async def check_api_key(self):
|
||||
"""Check if the API key is valid by making a test request to the MiniMax API."""
|
||||
api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None
|
||||
if not api_key:
|
||||
raise ValueError("No API key provided")
|
||||
|
||||
try:
|
||||
# Use async Anthropic client pointed at MiniMax's Anthropic-compatible endpoint
|
||||
client = anthropic.AsyncAnthropic(api_key=api_key, base_url=self.base_url)
|
||||
# Use count_tokens as a lightweight check - similar to Anthropic provider
|
||||
await client.messages.count_tokens(model=MODEL_LIST[-1]["name"], messages=[{"role": "user", "content": "a"}])
|
||||
except anthropic.AuthenticationError as e:
|
||||
raise LLMAuthenticationError(message=f"Failed to authenticate with MiniMax: {e}", code=ErrorCode.UNAUTHENTICATED)
|
||||
except Exception as e:
|
||||
raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR)
|
||||
|
||||
def get_default_max_output_tokens(self, model_name: str) -> int:
|
||||
"""Get the default max output tokens for MiniMax models."""
|
||||
# All MiniMax models support 128K output tokens
|
||||
@@ -73,7 +91,7 @@ class MiniMaxProvider(Provider):
|
||||
configs.append(
|
||||
LLMConfig(
|
||||
model=model["name"],
|
||||
model_endpoint_type="anthropic",
|
||||
model_endpoint_type="minimax",
|
||||
model_endpoint=self.base_url,
|
||||
context_window=model["context_window"],
|
||||
handle=self.get_handle(model["name"]),
|
||||
|
||||
Reference in New Issue
Block a user