use serde_json::json;
use std::sync::Arc;
use vtcode_core::config::constants::models;
use vtcode_core::config::types::VerbosityLevel;
use vtcode_core::llm::{
provider::{LLMProvider, LLMRequest, Message, ToolCall, ToolChoice, ToolDefinition},
providers::{
AnthropicProvider, GeminiProvider, LmStudioProvider, OllamaProvider, OpenAIProvider,
OpenRouterProvider,
},
};
#[test]
fn test_openai_tool_call_format() {
let provider = OpenAIProvider::new("test_key".to_string());
let tool = ToolDefinition::function(
"get_weather".to_string(),
"Get weather for a location".to_string(),
json!({
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}),
);
let assistant_msg = Message::assistant_with_tools(
"I'll get the weather for you.".to_string(),
vec![ToolCall::function(
"call_123".to_string(),
"get_weather".to_string(),
json!({"location": "New York"}).to_string(),
)],
);
let tool_msg = Message::tool_response("call_123".to_string(), "Sunny, 72°F".to_string());
let request = LLMRequest {
messages: vec![
Message::user("What's the weather in New York?".to_string()),
assistant_msg,
tool_msg,
],
system_prompt: Some(Arc::new("You are a helpful assistant.".to_string())),
tools: Some(Arc::new(vec![tool])),
model: models::GPT_OSS_20B.to_string(),
max_tokens: None,
temperature: Some(0.7),
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(provider.validate_request(&request).is_ok());
}
#[test]
fn test_anthropic_tool_call_format() {
let provider = AnthropicProvider::new("test_key".to_string());
let tool = ToolDefinition::function(
"get_weather".to_string(),
"Get weather for a location".to_string(),
json!({
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}),
);
let assistant_msg = Message::assistant_with_tools(
"I'll get the weather for you.".to_string(),
vec![ToolCall::function(
"toolu_123".to_string(),
"get_weather".to_string(),
json!({"location": "New York"}).to_string(),
)],
);
let tool_msg = Message::tool_response("toolu_123".to_string(), "Sunny, 72°F".to_string());
let request = LLMRequest {
messages: vec![
Message::user("What's the weather in New York?".to_string()),
assistant_msg,
tool_msg,
],
system_prompt: Some(Arc::new("You are a helpful assistant.".to_string())),
tools: Some(Arc::new(vec![tool])),
model: models::CLAUDE_SONNET_4_6.to_string(),
max_tokens: None,
temperature: Some(0.7),
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(provider.validate_request(&request).is_ok());
}
#[test]
fn test_gemini_tool_call_format() {
let provider = GeminiProvider::new("test_key".to_string());
let tool = ToolDefinition::function(
"get_weather".to_string(),
"Get weather for a location".to_string(),
json!({
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}),
);
let assistant_msg = Message::assistant_with_tools(
"I'll get the weather for you.".to_string(),
vec![ToolCall::function(
"func_123".to_string(),
"get_weather".to_string(),
json!({"location": "New York"}).to_string(),
)],
);
let tool_msg = Message::tool_response("func_123".to_string(), "Sunny, 72°F".to_string());
let request = LLMRequest {
messages: vec![
Message::user("What's the weather in New York?".to_string()),
assistant_msg,
tool_msg,
],
system_prompt: Some(Arc::new("You are a helpful assistant.".to_string())),
tools: Some(Arc::new(vec![tool])),
model: "gemini-3-flash-preview".to_string(),
max_tokens: None,
temperature: Some(0.7),
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(provider.validate_request(&request).is_ok());
}
#[test]
fn test_all_providers_tool_validation() {
let gemini = GeminiProvider::new("test_key".to_string());
let openai = OpenAIProvider::new("test_key".to_string());
let anthropic = AnthropicProvider::new("test_key".to_string());
let openrouter = OpenRouterProvider::new("test_key".to_string());
let ollama = OllamaProvider::from_config(None, None, None, None, None, None, None);
let lmstudio = LmStudioProvider::from_config(None, None, None, None, None, None, None);
let tool = ToolDefinition::function(
"test_tool".to_string(),
"A test tool".to_string(),
json!({"type": "object"}),
);
let gemini_request = LLMRequest {
messages: vec![Message::user("test".to_string())],
system_prompt: None,
tools: Some(Arc::new(vec![tool.clone()])),
model: "gemini-3-flash-preview".to_string(),
max_tokens: None,
temperature: Some(0.7),
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
let openai_request = LLMRequest {
messages: vec![Message::user("test".to_string())],
system_prompt: None,
tools: Some(Arc::new(vec![tool.clone()])),
model: models::GPT_OSS_20B.to_string(),
max_tokens: None,
temperature: None,
stream: false,
tool_choice: Some(ToolChoice::auto()),
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
let anthropic_request = LLMRequest {
messages: vec![Message::user("test".to_string())],
system_prompt: None,
tools: Some(Arc::new(vec![tool.clone()])),
model: models::CLAUDE_SONNET_4_6.to_string(),
max_tokens: None,
temperature: None,
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
let openrouter_request = LLMRequest {
messages: vec![Message::user("test".to_string())],
system_prompt: None,
tools: Some(Arc::new(vec![tool.clone()])),
model: models::OPENROUTER_QWEN3_CODER.to_string(),
max_tokens: None,
temperature: None,
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(gemini.validate_request(&gemini_request).is_ok());
assert!(openai.validate_request(&openai_request).is_ok());
assert!(anthropic.validate_request(&anthropic_request).is_ok());
assert!(openrouter.validate_request(&openrouter_request).is_ok());
let lmstudio_request = LLMRequest {
messages: vec![Message::user("test".to_string())],
system_prompt: None,
tools: Some(Arc::new(vec![tool.clone()])),
model: models::lmstudio::DEFAULT_MODEL.to_string(),
max_tokens: None,
temperature: Some(0.1),
stream: true,
tool_choice: Some(ToolChoice::auto()),
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(lmstudio.validate_request(&lmstudio_request).is_ok());
let ollama_request = LLMRequest {
messages: vec![Message::user("test".to_string())],
system_prompt: None,
tools: Some(Arc::new(vec![tool])),
model: models::ollama::DEFAULT_MODEL.to_string(),
max_tokens: None,
temperature: None,
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(
ollama.validate_request(&ollama_request).is_ok(),
"Ollama should accept tool-bearing requests"
);
}
#[test]
fn test_openrouter_tool_call_format() {
let provider = OpenRouterProvider::new("test_key".to_string());
let tool = ToolDefinition::function(
"get_weather".to_string(),
"Get weather for a location".to_string(),
json!({
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}),
);
let assistant_msg = Message::assistant_with_tools(
"I'll get the weather for you.".to_string(),
vec![ToolCall::function(
"call_456".to_string(),
"get_weather".to_string(),
json!({"location": "Paris"}).to_string(),
)],
);
let tool_msg = Message::tool_response("call_456".to_string(), "Cloudy, 68°F".to_string());
let request = LLMRequest {
messages: vec![
Message::user("What's the weather in Paris?".to_string()),
assistant_msg,
tool_msg,
],
system_prompt: Some(Arc::new("You are a helpful assistant.".to_string())),
tools: Some(Arc::new(vec![tool])),
model: models::OPENROUTER_QWEN3_CODER.to_string(),
max_tokens: None,
temperature: Some(0.7),
stream: false,
tool_choice: None,
parallel_tool_calls: None,
parallel_tool_config: None,
reasoning_effort: None,
effort: None,
output_format: None,
verbosity: Some(VerbosityLevel::default()),
do_sample: None,
top_p: None,
top_k: None,
presence_penalty: None,
frequency_penalty: None,
stop_sequences: None,
thinking_budget: None,
betas: None,
context_management: None,
prefill: None,
character_reinforcement: false,
character_name: None,
coding_agent_settings: None,
metadata: None,
prompt_cache_key: None,
service_tier: None,
previous_response_id: None,
response_store: None,
responses_include: None,
};
assert!(provider.validate_request(&request).is_ok());
}
#[test]
fn test_provider_tool_support_matrix() {
let gemini = GeminiProvider::new("test_key".to_string());
let openai = OpenAIProvider::new("test_key".to_string());
let anthropic = AnthropicProvider::new("test_key".to_string());
let openrouter = OpenRouterProvider::new("test_key".to_string());
let ollama = OllamaProvider::from_config(None, None, None, None, None, None, None);
for &model in models::google::SUPPORTED_MODELS {
assert!(
gemini.supports_tools(model),
"Gemini should advertise tool calling for {}",
model
);
}
for &model in models::openai::SUPPORTED_MODELS {
let supports = openai.supports_tools(model);
if models::openai::TOOL_UNAVAILABLE_MODELS.contains(&model) {
assert!(
!supports,
"OpenAI should disable tool calling for {}",
model
);
} else {
assert!(
supports,
"OpenAI should advertise tool calling for {}",
model
);
}
}
for &model in models::anthropic::SUPPORTED_MODELS {
assert!(
anthropic.supports_tools(model),
"Anthropic should advertise tool calling for {}",
model
);
}
for &model in models::openrouter::SUPPORTED_MODELS {
let supports = openrouter.supports_tools(model);
if models::openrouter::TOOL_UNAVAILABLE_MODELS.contains(&model) {
assert!(
!supports,
"OpenRouter should disable tool calling for {}",
model
);
} else {
assert!(
supports,
"OpenRouter should advertise tool calling for {}",
model
);
}
}
for &model in models::ollama::SUPPORTED_MODELS {
assert!(
ollama.supports_tools(model),
"Ollama should advertise tool calling for {}",
model
);
}
for &model in models::openai::TOOL_UNAVAILABLE_MODELS {
assert!(
!openai.supports_tools(model),
"OpenAI should disable tool calling for {}",
model
);
}
for &model in models::openrouter::TOOL_UNAVAILABLE_MODELS {
assert!(
!openrouter.supports_tools(model),
"OpenRouter should disable tool calling for {}",
model
);
}
}