{ "handle": "lmstudio_openai/qwen3-4b", "model_settings": { "provider_type": "openai", "temperature": 0.7, "max_output_tokens": 4096, "parallel_tool_calls": true } }