1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
//! # ai_client
//!
//! A Rust crate for interacting with AI language model APIs, supporting multiple providers
//! (Grok, Anthropic, OpenAI) through a unified `ChatCompletionClient` trait.
//!
//! ## Features
//! - Unified interface for chat completions across different LLM providers
//! - Caching of responses using an LRU cache
//! - Exponential backoff for retrying failed requests
//! - Metrics tracking for requests, successes, errors, and cache hits
//! - Environment-based configuration
//! - Robust error handling
//!
//! ## Usage
//!
//! ```rust
//! use ai_client::clients::{ChatCompletionClient, GrokClient};
//! use ai_client::entities::Message;
//! use tokio;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let client = GrokClient::new()?;
//! let messages = vec![
//! Message {
//! role: "system".to_string(),
//! content: "You are a helpful assistant.".to_string(),
//! },
//! Message {
//! role: "user".to_string(),
//! content: "What is 101*3?".to_string(),
//! },
//! ];
//! let response = client.send_chat_completion(messages, "low").await?;
//! println!("Response: {:?}", response.choices[0].message.content);
//! Ok(())
//! }
//! ```
//!
//! ## Environment Variables
//! - `GROK_API_KEY`: API key for Grok
//! - `GROK_API_ENDPOINT`: API endpoint (default: https://api.x.ai/v1/chat/completions)
//! - `GROK_MODEL`: Model name (default: grok-3-mini-fast-latest)
//! - `GROK_CACHE_SIZE`: Cache size for responses (default: 100)
// Re-export key types for convenient access
pub use ;
pub use ;
pub use Metrics;
pub use LlmClientError;