[[provider.anthropic]]
model_match = "claude-haiku-*"
version_min = [4, 7]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["adaptive"]
vision_supported = true
[[provider.anthropic]]
model_match = "claude-opus-*"
version_min = [4, 7]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["adaptive"]
interleaved_thinking_supported = true
vision_supported = true
[[provider.anthropic]]
model_match = "claude-sonnet-*"
version_min = [4, 7]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["adaptive"]
vision_supported = true
[[provider.anthropic]]
model_match = "claude-haiku-*"
version_min = [4, 5]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.anthropic]]
model_match = "claude-opus-*"
version_min = [4, 6]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
json_schema = "tool_use"
thinking_modes = ["enabled"]
interleaved_thinking_supported = true
vision_supported = true
[[provider.anthropic]]
model_match = "claude-opus-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.anthropic]]
model_match = "claude-sonnet-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-haiku-*"
version_min = [4, 7]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["adaptive"]
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-opus-*"
version_min = [4, 7]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["adaptive"]
interleaved_thinking_supported = true
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-sonnet-*"
version_min = [4, 7]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["adaptive"]
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-haiku-*"
version_min = [4, 5]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-opus-*"
version_min = [4, 6]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
json_schema = "tool_use"
thinking_modes = ["enabled"]
interleaved_thinking_supported = true
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-opus-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.anthropic]]
model_match = "anthropic/claude-sonnet-*"
version_min = [4, 0]
native_tools = true
defer_loading = true
tool_search = ["bm25", "regex"]
max_tools = 10000
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.anthropic]]
model_match = "claude-*"
native_tools = true
prompt_caching = true
vision = true
audio_supported = true
pdf_supported = true
files_api_supported = true
structured_output = "tool_use"
thinking_modes = ["enabled"]
vision_supported = true
[[provider.openai]]
model_match = "gpt-4o*"
native_tools = true
vision = true
audio_supported = true
structured_output = "native"
vision_supported = true
[[provider.openai]]
model_match = "gpt-4.1*"
native_tools = true
vision_supported = true
structured_output = "native"
[[provider.openai]]
model_match = "gpt-*"
version_min = [5, 4]
native_tools = true
defer_loading = true
tool_search = ["hosted", "client"]
vision_supported = true
structured_output = "native"
[[provider.openai]]
model_match = "gpt-*"
native_tools = true
structured_output = "native"
[[provider.openai]]
model_match = "o1*"
native_tools = true
structured_output = "native"
thinking_modes = ["effort"]
requires_completion_tokens = true
reasoning_effort_supported = true
[[provider.openai]]
model_match = "o3*"
native_tools = true
structured_output = "native"
thinking_modes = ["effort"]
requires_completion_tokens = true
reasoning_effort_supported = true
[[provider.openai]]
model_match = "o4*"
native_tools = true
structured_output = "native"
thinking_modes = ["effort"]
vision_supported = true
requires_completion_tokens = true
reasoning_effort_supported = true
[[provider.openai]]
model_match = "openai/gpt-4o*"
native_tools = true
vision = true
audio_supported = true
structured_output = "native"
vision_supported = true
[[provider.openai]]
model_match = "openai/gpt-4.1*"
native_tools = true
vision_supported = true
structured_output = "native"
[[provider.openai]]
model_match = "openai/gpt-*"
version_min = [5, 4]
native_tools = true
defer_loading = true
tool_search = ["hosted", "client"]
vision_supported = true
structured_output = "native"
[[provider.openai]]
model_match = "openai/gpt-*"
native_tools = true
structured_output = "native"
[[provider.openai]]
model_match = "openai/o1*"
native_tools = true
json_schema = "native"
thinking_modes = ["effort"]
requires_completion_tokens = true
reasoning_effort_supported = true
[[provider.openai]]
model_match = "openai/o3*"
native_tools = true
json_schema = "native"
thinking_modes = ["effort"]
requires_completion_tokens = true
reasoning_effort_supported = true
[[provider.openai]]
model_match = "openai/o4*"
native_tools = true
json_schema = "native"
thinking_modes = ["effort"]
vision_supported = true
requires_completion_tokens = true
reasoning_effort_supported = true
[[provider.ollama]]
model_match = "llava*"
vision_supported = true
[[provider.ollama]]
model_match = "bakllava*"
vision_supported = true
[[provider.ollama]]
model_match = "llama3.2-vision*"
vision_supported = true
[[provider.ollama]]
model_match = "gemma3*"
vision_supported = true
[[provider.ollama]]
model_match = "qwen3.6*"
native_tools = true
structured_output = "format_kw"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "ollama_qwen3coder"
honors_chat_template_kwargs = false
recommended_endpoint = "/api/generate-raw"
text_tool_wire_format_supported = false
thinking_disable_directive = "/no_think"
[[provider.ollama]]
model_match = "qwen3*"
native_tools = true
structured_output = "format_kw"
thinking_modes = ["enabled"]
server_parser = "ollama_qwen3coder"
honors_chat_template_kwargs = false
recommended_endpoint = "/api/generate-raw"
text_tool_wire_format_supported = false
thinking_disable_directive = "/no_think"
[[provider.llamacpp]]
model_match = "*qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.llamacpp]]
model_match = "*qwen3*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.local]]
model_match = "*qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.local]]
model_match = "*qwen3*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.mlx]]
model_match = "*qwen3.6*"
native_tools = true
vision = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.mlx]]
model_match = "*qwen3*"
native_tools = true
vision = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.dashscope]]
model_match = "qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.dashscope]]
model_match = "qwen*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.fireworks]]
model_match = "*qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.fireworks]]
model_match = "*qwen3p6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.fireworks]]
model_match = "*qwen*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.openrouter]]
model_match = "qwen/qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled", "effort"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = false
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.openrouter]]
model_match = "qwen/*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled", "effort"]
server_parser = "none"
honors_chat_template_kwargs = false
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.huggingface]]
model_match = "qwen/qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.huggingface]]
model_match = "qwen/*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.together]]
model_match = "qwen/qwen3.6*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
preserve_thinking = true
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.together]]
model_match = "qwen/*"
native_tools = true
structured_output = "native"
thinking_modes = ["enabled"]
server_parser = "none"
honors_chat_template_kwargs = true
recommended_endpoint = "/v1/chat/completions"
text_tool_wire_format_supported = true
thinking_disable_directive = "/no_think"
[[provider.together]]
model_match = "deepseek-ai/deepseek-v3*"
native_tools = true
thinking_modes = ["enabled"]
prompt_caching = true
structured_output = "native"
server_parser = "none"
honors_chat_template_kwargs = true
[[provider.huggingface]]
model_match = "deepseek-ai/deepseek-v3*"
native_tools = true
thinking_modes = ["enabled"]
prompt_caching = true
structured_output = "native"
server_parser = "none"
honors_chat_template_kwargs = true
[[provider.openrouter]]
model_match = "deepseek/deepseek-v3*"
native_tools = true
thinking_modes = ["enabled", "effort"]
prompt_caching = true
structured_output = "native"
server_parser = "none"
honors_chat_template_kwargs = false
[[provider.openrouter]]
model_match = "google/gemini-2.5*"
native_tools = true
thinking_modes = ["enabled", "effort"]
prompt_caching = true
vision_supported = true
vision = true
audio_supported = true
pdf_supported = true
structured_output = "native"
[[provider.gemini]]
model_match = "gemini-*"
vision_supported = true
audio_supported = true
pdf_supported = true
files_api_supported = true
[[provider.gemini]]
model_match = "models/gemini-*"
vision_supported = true
audio_supported = true
pdf_supported = true
files_api_supported = true
[[provider.openrouter]]
model_match = "google/gemma-4*"
native_tools = true
structured_output = "native"
[[provider.together]]
model_match = "moonshotai/*"
native_tools = true
structured_output = "native"
[[provider.bedrock]]
model_match = "*"
native_tools = true
recommended_endpoint = "/model/{model}/converse"
text_tool_wire_format_supported = true
[[provider.azure_openai]]
model_match = "gpt-*"
native_tools = true
recommended_endpoint = "/openai/deployments/{deployment}/chat/completions"
text_tool_wire_format_supported = true
[[provider.azure_openai]]
model_match = "o1*"
native_tools = true
recommended_endpoint = "/openai/deployments/{deployment}/chat/completions"
text_tool_wire_format_supported = true
[[provider.azure_openai]]
model_match = "o3*"
native_tools = true
recommended_endpoint = "/openai/deployments/{deployment}/chat/completions"
text_tool_wire_format_supported = true
[[provider.azure_openai]]
model_match = "o4*"
native_tools = true
recommended_endpoint = "/openai/deployments/{deployment}/chat/completions"
text_tool_wire_format_supported = true
[[provider.vertex]]
model_match = "gemini-*"
native_tools = true
recommended_endpoint = "/projects/{project}/locations/{location}/publishers/google/models/{model}:generateContent"
text_tool_wire_format_supported = true
[provider_family]
openrouter = "openai"
together = "openai"
groq = "openai"
deepseek = "openai"
fireworks = "openai"
huggingface = "openai"
local = "openai"
dashscope = "openai"
llamacpp = "openai"
mlx = "openai"