vtcode_core/llm/
rig_adapter.rs

1use crate::config::models::Provider;
2use crate::config::types::ReasoningEffortLevel;
3use anyhow::Result;
4use rig::client::CompletionClient;
5use rig::providers::gemini::completion::gemini_api_types::ThinkingConfig;
6use rig::providers::{anthropic, deepseek, gemini, openai, openrouter, xai};
7use serde_json::{Value, json};
8
9/// Result of validating a provider/model combination through rig-core.
10#[derive(Debug, Clone)]
11pub struct RigValidationSummary {
12    pub provider: Provider,
13    pub model: String,
14}
15
16/// Attempt to construct a rig-core client for the given provider and
17/// instantiate the requested model. This performs a lightweight validation
18/// without issuing a network request, ensuring that downstream calls can
19/// reuse the rig client configuration paths.
20pub fn verify_model_with_rig(
21    provider: Provider,
22    model: &str,
23    api_key: &str,
24) -> Result<RigValidationSummary> {
25    match provider {
26        Provider::Gemini => {
27            let client = gemini::Client::new(api_key);
28            let _ = client.completion_model(model);
29        }
30        Provider::OpenAI => {
31            let client = openai::Client::new(api_key);
32            let _ = client.completion_model(model);
33        }
34        Provider::Anthropic => {
35            let client = anthropic::Client::new(api_key);
36            let _ = client.completion_model(model);
37        }
38        Provider::DeepSeek => {
39            let client = deepseek::Client::new(api_key);
40            let _ = client.completion_model(model);
41        }
42        Provider::OpenRouter => {
43            let client = openrouter::Client::new(api_key);
44            let _ = client.completion_model(model);
45        }
46        Provider::XAI => {
47            let client = xai::Client::new(api_key);
48            let _ = client.completion_model(model);
49        }
50    }
51
52    Ok(RigValidationSummary {
53        provider,
54        model: model.to_string(),
55    })
56}
57
58/// Convert a vtcode reasoning effort level to provider-specific parameters
59/// using rig-core data structures. The resulting JSON payload can be merged
60/// into provider requests when supported.
61pub fn reasoning_parameters_for(provider: Provider, effort: ReasoningEffortLevel) -> Option<Value> {
62    match provider {
63        Provider::OpenAI => {
64            let mut reasoning = openai::responses_api::Reasoning::new();
65            let mapped = match effort {
66                ReasoningEffortLevel::Low => openai::responses_api::ReasoningEffort::Low,
67                ReasoningEffortLevel::Medium => openai::responses_api::ReasoningEffort::Medium,
68                ReasoningEffortLevel::High => openai::responses_api::ReasoningEffort::High,
69            };
70            reasoning = reasoning.with_effort(mapped);
71            serde_json::to_value(reasoning).ok()
72        }
73        Provider::Gemini => {
74            let include_thoughts = matches!(effort, ReasoningEffortLevel::High);
75            let budget = match effort {
76                ReasoningEffortLevel::Low => 64,
77                ReasoningEffortLevel::Medium => 128,
78                ReasoningEffortLevel::High => 256,
79            };
80            let config = ThinkingConfig {
81                thinking_budget: budget,
82                include_thoughts: Some(include_thoughts),
83            };
84            serde_json::to_value(config)
85                .ok()
86                .map(|value| json!({ "thinking_config": value }))
87        }
88        _ => None,
89    }
90}