[[provider.anthropic]]
model_match = "claude-haiku-*"
version_min = [4, 5]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
thinking = true
[[provider.anthropic]]
model_match = "claude-opus-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
thinking = true
[[provider.anthropic]]
model_match = "claude-sonnet-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
thinking = true
[[provider.anthropic]]
model_match = "anthropic/claude-haiku-*"
version_min = [4, 5]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
thinking = true
[[provider.anthropic]]
model_match = "anthropic/claude-opus-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
thinking = true
[[provider.anthropic]]
model_match = "anthropic/claude-sonnet-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
thinking = true
[[provider.anthropic]]
model_match = "claude-*"
native_tools = true
prompt_caching = true
thinking = true
[[provider.openai]]
model_match = "gpt-*"
version_min = [5, 4]
native_tools = true
defer_loading = true
tool_search = ["hosted", "client"]
[[provider.openai]]
model_match = "gpt-*"
native_tools = true
[[provider.openai]]
model_match = "o1*"
native_tools = true
[[provider.openai]]
model_match = "o3*"
native_tools = true
[[provider.openai]]
model_match = "o4*"
native_tools = true
[[provider.openai]]
model_match = "openai/gpt-*"
version_min = [5, 4]
native_tools = true
defer_loading = true
tool_search = ["hosted", "client"]
[[provider.openai]]
model_match = "openai/gpt-*"
native_tools = true
[[provider.ollama]]
model_match = "qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "ollama_qwen3coder"
honors_chat_template_kwargs = false
recommended_endpoint = "/api/generate-raw"
text_tool_wire_format_supported = false
[[provider.ollama]]
model_match = "qwen3*"
native_tools = true
thinking = true
server_parser = "ollama_qwen3coder"
honors_chat_template_kwargs = false
recommended_endpoint = "/api/generate-raw"
text_tool_wire_format_supported = false
[[provider.llamacpp]]
model_match = "*qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.llamacpp]]
model_match = "*qwen3*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.local]]
model_match = "*qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.local]]
model_match = "*qwen3*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.mlx]]
model_match = "*qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.mlx]]
model_match = "*qwen3*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.dashscope]]
model_match = "qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.dashscope]]
model_match = "qwen*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.fireworks]]
model_match = "*qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.fireworks]]
model_match = "*qwen3p6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.fireworks]]
model_match = "*qwen*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.openrouter]]
model_match = "qwen/qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = false
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.openrouter]]
model_match = "qwen/*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = false
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.huggingface]]
model_match = "qwen/qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.huggingface]]
model_match = "qwen/*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.together]]
model_match = "qwen/qwen3.6*"
native_tools = true
thinking = true
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.together]]
model_match = "qwen/*"
native_tools = true
thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
[[provider.together]]
model_match = "deepseek-ai/deepseek-v3*"
native_tools = true
thinking = true
prompt_caching = true
server_parser = "none"
honors_chat_template_kwargs = true
[[provider.huggingface]]
model_match = "deepseek-ai/deepseek-v3*"
native_tools = true
thinking = true
prompt_caching = true
server_parser = "none"
honors_chat_template_kwargs = true
[[provider.openrouter]]
model_match = "deepseek/deepseek-v3*"
native_tools = true
thinking = true
prompt_caching = true
server_parser = "none"
honors_chat_template_kwargs = false
[[provider.openrouter]]
model_match = "google/gemini-2.5*"
native_tools = true
thinking = true
prompt_caching = true
[[provider.openrouter]]
model_match = "google/gemma-4*"
native_tools = true
[[provider.together]]
model_match = "moonshotai/*"
native_tools = true
[provider_family]
openrouter = "openai"
together = "openai"
groq = "openai"
deepseek = "openai"
fireworks = "openai"
huggingface = "openai"
local = "openai"
dashscope = "openai"
llamacpp = "openai"
mlx = "openai"