vtcode_core/llm/mod.rs
1//! # LLM Integration Layer
2//!
3//! This module provides a unified, modular interface for integrating multiple LLM providers
4//! with VTCode, supporting Gemini, OpenAI, Anthropic, xAI, and DeepSeek.
5//!
6//! ## Architecture Overview
7//!
8//! The LLM layer is designed with several key principles:
9//!
10//! - **Unified Interface**: Single `AnyClient` trait for all providers
11//! - **Provider Agnostic**: Easy switching between providers
12//! - **Configuration Driven**: TOML-based provider configuration
13//! - **Error Handling**: Comprehensive error types and recovery
14//! - **Async Support**: Full async/await support for all operations
15//!
16//! ## Supported Providers
17//!
18//! | Provider | Status | Models |
19//! |----------|--------|---------|
20//! | Gemini | ✓ | gemini-2.5-pro, gemini-2.5-flash-preview-05-20 |
21//! | OpenAI | ✓ | gpt-5, gpt-4.1, gpt-5-mini |
22//! | Anthropic | ✓ | claude-4.1-opus, claude-4-sonnet |
23//! | xAI | ✓ | grok-2-latest, grok-2-mini |
24//! | DeepSeek | ✓ | deepseek-chat, deepseek-reasoner |
25//! | Z.AI | ✓ | glm-4.6 |
26//!
27//! ## Basic Usage
28//!
29//! ```rust,no_run
30//! use vtcode_core::llm::{AnyClient, make_client};
31//! use vtcode_core::utils::dot_config::ProviderConfigs;
32//!
33//! #[tokio::main]
34//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
35//! // Configure providers
36//! let providers = ProviderConfigs {
37//! gemini: Some(vtcode_core::utils::dot_config::ProviderConfig {
38//! api_key: std::env::var("GEMINI_API_KEY")?,
39//! model: "gemini-2.5-flash".to_string(),
40//! ..Default::default()
41//! }),
42//! ..Default::default()
43//! };
44//!
45//! // Create client
46//! let client = make_client(&providers, "gemini")?;
47//!
48//! // Make a request
49//! let messages = vec![
50//! vtcode_core::llm::types::Message {
51//! role: "user".to_string(),
52//! content: "Hello, how can you help me with coding?".to_string(),
53//! }
54//! ];
55//!
56//! let response = client.chat(&messages, None).await?;
57//! println!("Response: {}", response.content);
58//!
59//! Ok(())
60//! }
61//! ```
62//!
63//! ## Provider Configuration
64//!
65//! ```rust,no_run
66//! use vtcode_core::utils::dot_config::{ProviderConfigs, ProviderConfig};
67//!
68//! let config = ProviderConfigs {
69//! gemini: Some(ProviderConfig {
70//! api_key: "your-api-key".to_string(),
71//! model: "gemini-2.5-flash".to_string(),
72//! temperature: Some(0.7),
73//! max_tokens: Some(4096),
74//! ..Default::default()
75//! }),
76//! openai: Some(ProviderConfig {
77//! api_key: "your-openai-key".to_string(),
78//! model: "gpt-5".to_string(),
79//! temperature: Some(0.3),
80//! max_tokens: Some(8192),
81//! ..Default::default()
82//! }),
83//! ..Default::default()
84//! };
85//! ```
86//!
87//! ## Advanced Features
88//!
89//! ### Streaming Responses
90//! ```rust,no_run
91//! use vtcode_core::llm::AnyClient;
92//! use futures::StreamExt;
93//!
94//! let client = make_client(&providers, "gemini")?;
95//!
96//! let mut stream = client.chat_stream(&messages, None).await?;
97//! while let Some(chunk) = stream.next().await {
98//! match chunk {
99//! Ok(response) => print!("{}", response.content),
100//! Err(e) => eprintln!("Error: {}", e),
101//! }
102//! }
103//! ```
104//!
105//! ### Function Calling
106//! ```rust,no_run
107//! use vtcode_core::llm::types::{FunctionDeclaration, FunctionCall};
108//!
109//! let functions = vec![
110//! FunctionDeclaration {
111//! name: "read_file".to_string(),
112//! description: "Read a file from the filesystem".to_string(),
113//! parameters: serde_json::json!({
114//! "type": "object",
115//! "properties": {
116//! "path": {"type": "string", "description": "File path to read"}
117//! },
118//! "required": ["path"]
119//! }),
120//! }
121//! ];
122//!
123//! let response = client.chat_with_functions(&messages, &functions, None).await?;
124//!
125//! if let Some(function_call) = response.function_call {
126//! match function_call.name.as_str() {
127//! "read_file" => {
128//! // Handle function call
129//! }
130//! _ => {}
131//! }
132//! }
133//! ```
134//!
135//! ## Error Handling
136//!
137//! The LLM layer provides comprehensive error handling:
138//!
139//! ```rust,no_run
140//! use vtcode_core::llm::LLMError;
141//!
142//! match client.chat(&messages, None).await {
143//! Ok(response) => println!("Success: {}", response.content),
144//! Err(LLMError::Authentication) => eprintln!("Authentication failed"),
145//! Err(LLMError::RateLimit) => eprintln!("Rate limit exceeded"),
146//! Err(LLMError::Network(e)) => eprintln!("Network error: {}", e),
147//! Err(LLMError::Provider(e)) => eprintln!("Provider error: {}", e),
148//! Err(e) => eprintln!("Other error: {}", e),
149//! }
150//! ```
151//!
152//! ## Performance Considerations
153//!
154//! - **Connection Pooling**: Efficient connection reuse
155//! - **Request Batching**: Where supported by providers
156//! - **Caching**: Built-in prompt caching for repeated requests
157//! - **Timeout Handling**: Configurable timeouts and retries
158//! - **Rate Limiting**: Automatic rate limit handling
159//!
160//! # LLM abstraction layer with modular architecture
161//!
162//! This module provides a unified interface for different LLM providers
163//! with provider-specific implementations.
164
165pub mod client;
166pub mod error_display;
167pub mod factory;
168pub mod provider;
169pub mod providers;
170pub mod rig_adapter;
171pub mod types;
172
173#[cfg(test)]
174mod error_display_test;
175
176// Re-export main types for backward compatibility
177pub use client::{AnyClient, make_client};
178pub use factory::{create_provider_with_config, get_factory};
179pub use provider::{LLMStream, LLMStreamEvent};
180pub use providers::{AnthropicProvider, GeminiProvider, OpenAIProvider, XAIProvider, ZAIProvider};
181pub use types::{BackendKind, LLMError, LLMResponse};