[ { "id": "sonnet", "handle": "anthropic/claude-sonnet-4-6", "label": "Sonnet 4.6", "description": "Anthropic's new Sonnet model (high reasoning)", "isDefault": true, "isFeatured": true, "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "high", "enable_reasoner": true } }, { "id": "sonnet-1m", "handle": "anthropic/claude-sonnet-4-6", "label": "Sonnet 4.6 1M", "description": "Claude Sonnet 4.6 with 1M token context window (high reasoning)", "updateArgs": { "context_window": 1000000, "max_output_tokens": 128000, "reasoning_effort": "high", "enable_reasoner": true } }, { "id": "sonnet-4.6-no-reasoning", "handle": "anthropic/claude-sonnet-4-6", "label": "Sonnet 4.6", "description": "Sonnet 4.6 with no reasoning (faster)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "none", "enable_reasoner": false } }, { "id": "sonnet-4.6-low", "handle": "anthropic/claude-sonnet-4-6", "label": "Sonnet 4.6", "description": "Sonnet 4.6 (low reasoning)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "low", "enable_reasoner": true, "max_reasoning_tokens": 4000 } }, { "id": "sonnet-4.6-medium", "handle": "anthropic/claude-sonnet-4-6", "label": "Sonnet 4.6", "description": "Sonnet 4.6 (med reasoning)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "medium", "enable_reasoner": true, "max_reasoning_tokens": 12000 } }, { "id": "sonnet-4.6-xhigh", "handle": "anthropic/claude-sonnet-4-6", "label": "Sonnet 4.6", "description": "Sonnet 4.6 (max reasoning)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "xhigh", "enable_reasoner": true, "max_reasoning_tokens": 31999 } }, { "id": "sonnet-4.5", "handle": "anthropic/claude-sonnet-4-5-20250929", "label": "Sonnet 4.5", "description": "Previous default Sonnet model", "updateArgs": { "context_window": 180000, "max_output_tokens": 64000, "max_reasoning_tokens": 31999 } }, { "id": "sonnet-4.5-no-reasoning", "handle": "anthropic/claude-sonnet-4-5-20250929", "label": "Sonnet 4.5", "description": "Sonnet 4.5 with no reasoning (faster)", "updateArgs": { "enable_reasoner": false, "context_window": 180000, "max_output_tokens": 64000 } }, { "id": "opus", "handle": "anthropic/claude-opus-4-6", "label": "Opus 4.6", "description": "Anthropic's best model (high reasoning)", "isFeatured": true, "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "high", "enable_reasoner": true } }, { "id": "opus-4.6-no-reasoning", "handle": "anthropic/claude-opus-4-6", "label": "Opus 4.6", "description": "Opus 4.6 with no reasoning (faster)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "none", "enable_reasoner": false } }, { "id": "opus-4.6-low", "handle": "anthropic/claude-opus-4-6", "label": "Opus 4.6", "description": "Opus 4.6 (low reasoning)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "low", "enable_reasoner": true, "max_reasoning_tokens": 4000 } }, { "id": "opus-4.6-medium", "handle": "anthropic/claude-opus-4-6", "label": "Opus 4.6", "description": "Opus 4.6 (med reasoning)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "medium", "enable_reasoner": true, "max_reasoning_tokens": 12000 } }, { "id": "opus-4.6-xhigh", "handle": "anthropic/claude-opus-4-6", "label": "Opus 4.6", "description": "Opus 4.6 (max reasoning)", "updateArgs": { "context_window": 200000, "max_output_tokens": 128000, "reasoning_effort": "xhigh", "enable_reasoner": true, "max_reasoning_tokens": 31999 } }, { "id": "opus-4.5", "handle": "anthropic/claude-opus-4-5-20251101", "label": "Opus 4.5", "description": "Anthropic's (legacy) best model", "updateArgs": { "context_window": 180000, "max_output_tokens": 64000, "max_reasoning_tokens": 31999 } }, { "id": "bedrock-opus", "handle": "bedrock/us.anthropic.claude-opus-4-5-20251101-v1:0", "label": "Bedrock Opus 4.5", "shortLabel": "Opus 4.5 BR", "description": "Anthropic's best model (via AWS Bedrock)", "isFeatured": true, "updateArgs": { "context_window": 180000, "max_output_tokens": 64000, "max_reasoning_tokens": 31999 } }, { "id": "haiku", "handle": "anthropic/claude-haiku-4-5-20251001", "label": "Haiku 4.5", "description": "Anthropic's fastest model", "isFeatured": true, "updateArgs": { "context_window": 180000, "max_output_tokens": 64000 } }, { "id": "gpt-5.3-codex-plus-pro-none", "handle": "chatgpt-plus-pro/gpt-5.3-codex", "label": "GPT-5.3 Codex", "description": "GPT-5.3 Codex (no reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "none", "verbosity": "low", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.3-codex-plus-pro-low", "handle": "chatgpt-plus-pro/gpt-5.3-codex", "label": "GPT-5.3 Codex", "description": "GPT-5.3 Codex (low reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "low", "verbosity": "low", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.3-codex-plus-pro-medium", "handle": "chatgpt-plus-pro/gpt-5.3-codex", "label": "GPT-5.3 Codex", "description": "GPT-5.3 Codex (med reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "medium", "verbosity": "low", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.3-codex-plus-pro-high", "handle": "chatgpt-plus-pro/gpt-5.3-codex", "label": "GPT-5.3 Codex", "description": "GPT-5.3 Codex (high reasoning) via ChatGPT Plus/Pro", "isFeatured": true, "updateArgs": { "reasoning_effort": "high", "verbosity": "low", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.3-codex-plus-pro-xhigh", "handle": "chatgpt-plus-pro/gpt-5.3-codex", "label": "GPT-5.3 Codex", "description": "GPT-5.3 Codex (max reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "xhigh", "verbosity": "low", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-plus-pro-medium", "handle": "chatgpt-plus-pro/gpt-5.2-codex", "label": "GPT-5.2 Codex", "description": "GPT-5.2 Codex (med reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-plus-pro-high", "handle": "chatgpt-plus-pro/gpt-5.2-codex", "label": "GPT-5.2 Codex", "description": "GPT-5.2 Codex (high reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-plus-pro-medium", "handle": "chatgpt-plus-pro/gpt-5.2", "label": "GPT-5.2", "description": "GPT-5.2 (med reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-plus-pro-high", "handle": "chatgpt-plus-pro/gpt-5.2", "label": "GPT-5.2", "description": "GPT-5.2 (high reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-plus-pro-medium", "handle": "chatgpt-plus-pro/gpt-5.1-codex", "label": "GPT-5.1 Codex", "description": "GPT-5.1 Codex (med reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-plus-pro-high", "handle": "chatgpt-plus-pro/gpt-5.1-codex", "label": "GPT-5.1 Codex", "description": "GPT-5.1 Codex (high reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-max-plus-pro-medium", "handle": "chatgpt-plus-pro/gpt-5.1-codex-max", "label": "GPT-5.1 Codex Max", "description": "GPT-5.1 Codex Max (med reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-max-plus-pro-high", "handle": "chatgpt-plus-pro/gpt-5.1-codex-max", "label": "GPT-5.1 Codex Max", "description": "GPT-5.1 Codex Max (high reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-max-plus-pro-xhigh", "handle": "chatgpt-plus-pro/gpt-5.1-codex-max", "label": "GPT-5.1 Codex Max", "description": "GPT-5.1 Codex Max (extra-high reasoning) via ChatGPT Plus/Pro", "updateArgs": { "reasoning_effort": "xhigh", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-codex", "handle": "openai/gpt-5-codex", "label": "GPT-5-Codex", "description": "GPT-5 variant (med reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-none", "handle": "openai/gpt-5.2", "label": "GPT-5.2", "description": "Latest general-purpose GPT (no reasoning)", "updateArgs": { "reasoning_effort": "none", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-low", "handle": "openai/gpt-5.2", "label": "GPT-5.2", "description": "Latest general-purpose GPT (low reasoning)", "updateArgs": { "reasoning_effort": "low", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-medium", "handle": "openai/gpt-5.2", "label": "GPT-5.2", "description": "Latest general-purpose GPT (med reasoning)", "isFeatured": true, "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-high", "handle": "openai/gpt-5.2", "label": "GPT-5.2", "description": "Latest general-purpose GPT (high reasoning)", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-xhigh", "handle": "openai/gpt-5.2", "label": "GPT-5.2", "description": "Latest general-purpose GPT (max reasoning)", "updateArgs": { "reasoning_effort": "xhigh", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-none", "handle": "openai/gpt-5.2-codex", "label": "GPT-5.2-Codex", "description": "GPT-5.2 variant (no reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "none", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-low", "handle": "openai/gpt-5.2-codex", "label": "GPT-5.2-Codex", "description": "GPT-5.2 variant (low reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "low", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-medium", "handle": "openai/gpt-5.2-codex", "label": "GPT-5.2-Codex", "description": "GPT-5.2 variant (med reasoning) optimized for coding", "isFeatured": true, "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-high", "handle": "openai/gpt-5.2-codex", "label": "GPT-5.2-Codex", "description": "GPT-5.2 variant (high reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.2-codex-xhigh", "handle": "openai/gpt-5.2-codex", "label": "GPT-5.2-Codex", "description": "GPT-5.2 variant (max reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "xhigh", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-none", "handle": "openai/gpt-5.1", "label": "GPT-5.1", "description": "Legacy GPT-5.1 (no reasoning)", "updateArgs": { "reasoning_effort": "none", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-low", "handle": "openai/gpt-5.1", "label": "GPT-5.1", "description": "Legacy GPT-5.1 (low reasoning)", "updateArgs": { "reasoning_effort": "low", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-medium", "handle": "openai/gpt-5.1", "label": "GPT-5.1", "description": "Legacy GPT-5.1 (med reasoning)", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-high", "handle": "openai/gpt-5.1", "label": "GPT-5.1", "description": "Legacy GPT-5.1 (high reasoning)", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-none", "handle": "openai/gpt-5.1-codex", "label": "GPT-5.1-Codex", "description": "GPT-5.1 variant (no reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "none", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-medium", "handle": "openai/gpt-5.1-codex", "label": "GPT-5.1-Codex", "description": "GPT-5.1 variant (med reasoning) optimized for coding", "isFeatured": true, "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-high", "handle": "openai/gpt-5.1-codex", "label": "GPT-5.1-Codex", "description": "GPT-5.1 variant (max reasoning) optimized for coding", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-max-medium", "handle": "openai/gpt-5.1-codex-max", "label": "GPT-5.1-Codex-Max", "description": "GPT-5.1-Codex 'Max' variant (med reasoning)", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-max-high", "handle": "openai/gpt-5.1-codex-max", "label": "GPT-5.1-Codex-Max", "description": "GPT-5.1-Codex 'Max' variant (high reasoning)", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5.1-codex-max-x-high", "handle": "openai/gpt-5.1-codex-max", "label": "GPT-5.1-Codex-Max", "description": "GPT-5.1-Codex 'Max' variant (extra-high reasoning)", "updateArgs": { "reasoning_effort": "xhigh", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-minimal", "handle": "openai/gpt-5", "label": "GPT-5", "description": "Legacy GPT-5 (minimal reasoning)", "updateArgs": { "reasoning_effort": "minimal", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-low", "handle": "openai/gpt-5", "label": "GPT-5", "description": "Legacy GPT-5 (low reasoning)", "updateArgs": { "reasoning_effort": "low", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-medium", "handle": "openai/gpt-5", "label": "GPT-5", "description": "Legacy GPT-5 (med reasoning)", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-high", "handle": "openai/gpt-5", "label": "GPT-5", "description": "Legacy GPT-5 (high reasoning)", "updateArgs": { "reasoning_effort": "high", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-mini-medium", "handle": "openai/gpt-5-mini-2025-08-07", "label": "GPT-5-Mini", "description": "GPT-5-Mini (medium reasoning)", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "gpt-5-nano-medium", "handle": "openai/gpt-5-nano-2025-08-07", "label": "GPT-5-Nano", "description": "GPT-5-Nano (medium reasoning)", "updateArgs": { "reasoning_effort": "medium", "verbosity": "medium", "context_window": 272000, "max_output_tokens": 128000 } }, { "id": "glm-5", "handle": "zai/glm-5", "label": "GLM-5", "description": "zAI's latest coding model", "isFeatured": true, "free": true, "updateArgs": { "context_window": 200000 } }, { "id": "glm-4.7", "handle": "zai/glm-4.7", "label": "GLM-4.7", "description": "zAI's latest coding model (legacy)", "isFeatured": true, "free": true, "updateArgs": { "context_window": 200000 } }, { "id": "minimax-m2.5", "handle": "minimax/MiniMax-M2.5", "label": "MiniMax 2.5", "description": "MiniMax's latest coding model", "isFeatured": true, "free": true, "updateArgs": { "context_window": 160000, "max_output_tokens": 64000 } }, { "id": "minimax-m2.1", "handle": "minimax/MiniMax-M2.1", "label": "MiniMax 2.1", "description": "MiniMax's latest coding model (legacy)", "isFeatured": true, "free": true, "updateArgs": { "context_window": 160000, "max_output_tokens": 64000 } }, { "id": "minimax-m2", "handle": "openrouter/minimax/minimax-m2", "label": "Minimax M2", "description": "Minimax's latest model", "updateArgs": { "context_window": 160000, "max_output_tokens": 64000 } }, { "id": "kimi-k2", "handle": "openrouter/moonshotai/kimi-k2-0905", "label": "Kimi K2", "description": "Kimi's K2 model", "updateArgs": { "context_window": 262144 } }, { "id": "kimi-k2-thinking", "handle": "openrouter/moonshotai/kimi-k2-thinking", "label": "Kimi K2 Thinking", "description": "Kimi's K2 model with advanced thinking capabilities", "updateArgs": { "context_window": 256000, "max_output_tokens": 16000, "temperature": 1.0 } }, { "id": "kimi-k2.5", "handle": "openrouter/moonshotai/kimi-k2.5", "label": "Kimi K2.5", "description": "Kimi's latest coding model", "isFeatured": true, "updateArgs": { "context_window": 262144 } }, { "id": "deepseek-chat-v3.1", "handle": "openrouter/deepseek/deepseek-chat-v3.1", "label": "DeepSeek Chat V3.1", "description": "DeepSeek V3.1 model", "updateArgs": { "context_window": 128000 } }, { "id": "gemini-3.1", "handle": "google_ai/gemini-3.1-pro-preview", "label": "Gemini 3.1 Pro", "description": "Google's latest and smartest model", "isFeatured": true, "updateArgs": { "context_window": 180000, "temperature": 1.0 } }, { "id": "gemini-3", "handle": "google_ai/gemini-3-pro-preview", "label": "Gemini 3 Pro", "description": "Google's smartest model", "isFeatured": true, "updateArgs": { "context_window": 180000, "temperature": 1.0 } }, { "id": "gemini-3-flash", "handle": "google_ai/gemini-3-flash-preview", "label": "Gemini 3 Flash", "description": "Google's fastest Gemini 3 model", "isFeatured": true, "updateArgs": { "context_window": 180000, "temperature": 1.0 } }, { "id": "gemini-flash", "handle": "google_ai/gemini-2.5-flash", "label": "Gemini 2.5 Flash", "description": "Google's fastest model", "updateArgs": { "context_window": 180000 } }, { "id": "gemini-pro", "handle": "google_ai/gemini-2.5-pro", "label": "Gemini 2.5 Pro", "description": "Google's last generation flagship model", "updateArgs": { "context_window": 180000 } }, { "id": "gpt-4.1", "handle": "openai/gpt-4.1", "label": "GPT-4.1", "description": "OpenAI's most recent non-reasoner model", "updateArgs": { "context_window": 1047576 } }, { "id": "gpt-4.1-mini", "handle": "openai/gpt-4.1-mini-2025-04-14", "label": "GPT-4.1-Mini", "description": "OpenAI's most recent non-reasoner model (mini version)", "updateArgs": { "context_window": 1047576 } }, { "id": "gpt-4.1-nano", "handle": "openai/gpt-4.1-nano-2025-04-14", "label": "GPT-4.1-Nano", "description": "OpenAI's most recent non-reasoner model (nano version)", "updateArgs": { "context_window": 1047576 } }, { "id": "o4-mini", "handle": "openai/o4-mini", "label": "o4-mini", "description": "OpenAI's latest o-series reasoning model", "updateArgs": { "context_window": 180000 } }, { "id": "gemini-3-vertex", "handle": "google_vertex/gemini-3-pro-preview", "label": "Gemini 3 Pro", "description": "Google's smartest Gemini 3 Pro model (via Vertex AI)", "updateArgs": { "context_window": 180000, "temperature": 1.0 } } ]