vtcode_core/llm/
rig_adapter.rs

1use crate::config::models::Provider;
2use crate::config::types::ReasoningEffortLevel;
3use anyhow::Result;
4use rig::client::CompletionClient;
5use rig::providers::gemini::completion::gemini_api_types::ThinkingConfig;
6use rig::providers::{anthropic, deepseek, gemini, openai, openrouter, xai};
7use serde_json::{Value, json};
8
9/// Result of validating a provider/model combination through rig-core.
10#[derive(Debug, Clone)]
11pub struct RigValidationSummary {
12    pub provider: Provider,
13    pub model: String,
14}
15
16/// Attempt to construct a rig-core client for the given provider and
17/// instantiate the requested model. This performs a lightweight validation
18/// without issuing a network request, ensuring that downstream calls can
19/// reuse the rig client configuration paths.
20pub fn verify_model_with_rig(
21    provider: Provider,
22    model: &str,
23    api_key: &str,
24) -> Result<RigValidationSummary> {
25    match provider {
26        Provider::Gemini => {
27            let client = gemini::Client::new(api_key);
28            let _ = client.completion_model(model);
29        }
30        Provider::OpenAI => {
31            let client = openai::Client::new(api_key);
32            let _ = client.completion_model(model);
33        }
34        Provider::Anthropic => {
35            let client = anthropic::Client::new(api_key);
36            let _ = client.completion_model(model);
37        }
38        Provider::DeepSeek => {
39            let client = deepseek::Client::new(api_key);
40            let _ = client.completion_model(model);
41        }
42        Provider::OpenRouter => {
43            let client = openrouter::Client::new(api_key);
44            let _ = client.completion_model(model);
45        }
46        Provider::XAI => {
47            let client = xai::Client::new(api_key);
48            let _ = client.completion_model(model);
49        }
50        Provider::Moonshot => {
51            // Moonshot does not have a rig client integration yet.
52        }
53        Provider::ZAI => {
54            // The rig crate does not yet expose a dedicated Z.AI client.
55            // Skip instantiation while still marking the provider as verified.
56        }
57    }
58
59    Ok(RigValidationSummary {
60        provider,
61        model: model.to_string(),
62    })
63}
64
65/// Convert a vtcode reasoning effort level to provider-specific parameters
66/// using rig-core data structures. The resulting JSON payload can be merged
67/// into provider requests when supported.
68pub fn reasoning_parameters_for(provider: Provider, effort: ReasoningEffortLevel) -> Option<Value> {
69    match provider {
70        Provider::OpenAI => {
71            let mut reasoning = openai::responses_api::Reasoning::new();
72            let mapped = match effort {
73                ReasoningEffortLevel::Low => openai::responses_api::ReasoningEffort::Low,
74                ReasoningEffortLevel::Medium => openai::responses_api::ReasoningEffort::Medium,
75                ReasoningEffortLevel::High => openai::responses_api::ReasoningEffort::High,
76            };
77            reasoning = reasoning.with_effort(mapped);
78            serde_json::to_value(reasoning).ok()
79        }
80        Provider::Gemini => {
81            let include_thoughts = matches!(effort, ReasoningEffortLevel::High);
82            let budget = match effort {
83                ReasoningEffortLevel::Low => 64,
84                ReasoningEffortLevel::Medium => 128,
85                ReasoningEffortLevel::High => 256,
86            };
87            let config = ThinkingConfig {
88                thinking_budget: budget,
89                include_thoughts: Some(include_thoughts),
90            };
91            serde_json::to_value(config)
92                .ok()
93                .map(|value| json!({ "thinking_config": value }))
94        }
95        Provider::ZAI => None,
96        _ => None,
97    }
98}