{ "handle": "vllm/Qwen/Qwen3-32B-AWQ", "model_settings": { "provider_type": "openai", "temperature": 0.7, "max_output_tokens": 4096, "parallel_tool_calls": true } }