{ "handle": "openai/o3", "model_settings": { "provider_type": "openai", "temperature": 1.0, "max_output_tokens": 4096, "parallel_tool_calls": false, "reasoning": { "reasoning_effort": "high" } } }