pub struct AiClient { /* private fields */ }
Expand description
Unified AI client
Usage example:
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Switch model provider by changing Provider value
let client = AiClient::new(Provider::Groq)?;
let request = ChatCompletionRequest::new(
"test-model".to_string(),
vec![Message {
role: Role::User,
content: ai_lib::types::common::Content::Text("Hello".to_string()),
function_call: None,
}],
);
// Note: Set GROQ_API_KEY environment variable for actual API calls
// Optional: Set AI_PROXY_URL environment variable to use proxy server
// let response = client.chat_completion(request).await?;
println!("Client created successfully with provider: {:?}", client.current_provider());
println!("Request prepared for model: {}", request.model);
Ok(())
}
§Proxy Configuration
Configure proxy server by setting the AI_PROXY_URL
environment variable:
export AI_PROXY_URL=http://proxy.example.com:8080
Supported proxy formats:
- HTTP proxy:
http://proxy.example.com:8080
- HTTPS proxy:
https://proxy.example.com:8080
- With authentication:
http://user:pass@proxy.example.com:8080
Implementations§
Source§impl AiClient
impl AiClient
Sourcepub fn new(provider: Provider) -> Result<Self, AiLibError>
pub fn new(provider: Provider) -> Result<Self, AiLibError>
Create a new AI client
§Arguments
provider
- The AI model provider to use
§Returns
Result<Self, AiLibError>
- Client instance on success, error on failure
§Example
use ai_lib::{AiClient, Provider};
let client = AiClient::new(Provider::Groq)?;
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("Multimodal example: image + audio content in a message");
7
8 let _client = AiClient::new(Provider::Groq)?;
9
10 let request = ChatCompletionRequest::new(
11 "multimodal-model".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::new_image(
15 Some("https://example.com/dog.jpg".into()),
16 Some("image/jpeg".into()),
17 Some("dog.jpg".into()),
18 ),
19 function_call: None,
20 }],
21 );
22
23 println!(
24 "Prepared multimodal request; image URL: {}",
25 request.messages[0].content.as_text()
26 );
27
28 // Note: this example demonstrates the type usage only and does not call the API.
29 Ok(())
30}
More examples
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Ensure COHERE_API_KEY env var is set if making real requests
8 let client = AiClient::new(Provider::Cohere)?;
9
10 let request = ChatCompletionRequest::new(
11 "command-xlarge-nightly".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::Text("Write a haiku about rust programming".to_string()),
15 function_call: None,
16 }],
17 )
18 .with_temperature(0.7)
19 .with_max_tokens(60);
20
21 // List models
22 match client.list_models().await {
23 Ok(models) => println!("Models: {:?}", models),
24 Err(e) => eprintln!("Failed to list models: {}", e),
25 }
26
27 // Streaming
28 let mut stream = client.chat_completion_stream(request).await?;
29 while let Some(chunk) = stream.next().await {
30 match chunk {
31 Ok(c) => {
32 for choice in c.choices {
33 if let Some(delta) = choice.delta.content {
34 print!("{}", delta);
35 }
36 }
37 }
38 Err(e) => {
39 eprintln!("Stream error: {}", e);
40 break;
41 }
42 }
43 }
44
45 Ok(())
46}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Basic Usage Example");
8 println!("================================");
9
10 // Switch model provider by changing Provider value
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Get list of supported models
18 let models = client.list_models().await?;
19 println!("📋 Available models: {:?}", models);
20
21 // Create chat request
22 let request = ChatCompletionRequest::new(
23 "llama3-8b-8192".to_string(),
24 vec![Message {
25 role: Role::User,
26 content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27 function_call: None,
28 }],
29 )
30 .with_temperature(0.7)
31 .with_max_tokens(100);
32
33 println!("📤 Sending request to model: {}", request.model);
34
35 // Send request
36 let response = client.chat_completion(request).await?;
37
38 println!("📥 Received response:");
39 println!(" ID: {}", response.id);
40 println!(" Model: {}", response.model);
41 println!(
42 " Content: {}",
43 response.choices[0].message.content.as_text()
44 );
45 println!(" Usage: {} tokens", response.usage.total_tokens);
46
47 Ok(())
48}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // 检查环境变量
7 if std::env::var("GROQ_API_KEY").is_err() {
8 println!("❌ Please set GROQ_API_KEY environment variable");
9 println!(" Example: export GROQ_API_KEY=your_api_key_here");
10 println!(" Or set it in .env file");
11 return Ok(());
12 }
13
14 println!("🔧 Creating Groq client using new Provider classification system...");
15
16 // Create Groq client - using new provider classification system
17 let client = AiClient::new(Provider::Groq)?;
18
19 // Create chat request
20 let request = ChatCompletionRequest::new(
21 "llama-3.1-8b-instant".to_string(), // Available Groq model
22 vec![Message {
23 role: Role::User,
24 content: Content::Text("Hello! Please respond with a simple greeting.".to_string()),
25 function_call: None,
26 }],
27 );
28
29 println!("🚀 Sending request to Groq...");
30 println!("📝 Request: Hello! Please respond with a simple greeting.");
31 println!();
32
33 // 发送请求并获取响应
34 let response = client.chat_completion(request).await?;
35
36 println!("✅ Groq Response:");
37 match &response.choices[0].message.content {
38 Content::Text(text) => println!("{}", text),
39 Content::Json(json) => println!("JSON: {:?}", json),
40 Content::Image { url, mime, name } => println!("Image: url={:?}, mime={:?}, name={:?}", url, mime, name),
41 Content::Audio { url, mime } => println!("Audio: url={:?}, mime={:?}", url, mime),
42 }
43
44 Ok(())
45}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 Config-driven AI-lib Example");
8 println!("================================");
9
10 // Demonstrate the advantages of config-driven approach: easy provider switching
11 let providers = vec![
12 (Provider::Groq, "Groq"),
13 (Provider::OpenAI, "OpenAI"),
14 (Provider::DeepSeek, "DeepSeek"),
15 ];
16
17 for (provider, name) in providers {
18 println!("\n📡 Testing Provider: {}", name);
19
20 // Create client - just change the enum value
21 let client = AiClient::new(provider)?;
22 println!(
23 "✅ Client created successfully: {:?}",
24 client.current_provider()
25 );
26
27 // Get model list
28 match client.list_models().await {
29 Ok(models) => println!("📋 Available models: {:?}", models),
30 Err(e) => println!("⚠️ Failed to get model list: {}", e),
31 }
32
33 // Create test request
34 let request = ChatCompletionRequest::new(
35 "test-model".to_string(),
36 vec![Message {
37 role: Role::User,
38 content: Content::Text("Hello from ai-lib!".to_string()),
39 function_call: None,
40 }],
41 );
42
43 println!("📤 Request prepared, model: {}", request.model);
44 println!(" (Need to set corresponding API_KEY environment variable for actual calls)");
45 }
46
47 println!("\n🎯 Core advantages of config-driven approach:");
48 println!(" • Zero-code switching: just change Provider enum value");
49 println!(" • Unified interface: all providers use the same API");
50 println!(" • Rapid expansion: add new compatible providers with just configuration");
51
52 Ok(())
53}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🌐 AI-lib Proxy Server Support Example");
8 println!("=====================================");
9
10 // Check proxy configuration
11 match std::env::var("AI_PROXY_URL") {
12 Ok(proxy_url) => {
13 println!("✅ Proxy configuration detected: {}", proxy_url);
14 println!(" All HTTP requests will go through this proxy server");
15 }
16 Err(_) => {
17 println!("ℹ️ AI_PROXY_URL environment variable not set");
18 println!(" To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19 }
20 }
21
22 println!("\n🚀 Creating AI client...");
23 let client = AiClient::new(Provider::Groq)?;
24 println!(
25 "✅ Client created successfully, provider: {:?}",
26 client.current_provider()
27 );
28
29 // Create test request
30 let request = ChatCompletionRequest::new(
31 "llama3-8b-8192".to_string(),
32 vec![Message {
33 role: Role::User,
34 content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35 function_call: None,
36 }],
37 );
38
39 println!("\n📤 Preparing to send request...");
40 println!(" Model: {}", request.model);
41 println!(" Message: {}", request.messages[0].content.as_text());
42
43 // Get model list (this request will also go through proxy)
44 match client.list_models().await {
45 Ok(models) => {
46 println!("\n📋 Model list obtained through proxy:");
47 for model in models {
48 println!(" • {}", model);
49 }
50 }
51 Err(e) => {
52 println!("\n⚠️ Failed to get model list: {}", e);
53 println!(" This may be due to:");
54 println!(" • GROQ_API_KEY environment variable not set");
55 println!(" • Proxy server configuration error");
56 println!(" • Network connection issue");
57 }
58 }
59
60 println!("\n💡 Proxy Configuration Instructions:");
61 println!(" • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62 println!(" • Supports HTTP and HTTPS proxies");
63 println!(" • Supports authenticated proxies: http://user:pass@proxy:port");
64 println!(" • All AI providers will automatically use this proxy configuration");
65
66 Ok(())
67}
Sourcepub fn with_options(
provider: Provider,
opts: ConnectionOptions,
) -> Result<Self, AiLibError>
pub fn with_options( provider: Provider, opts: ConnectionOptions, ) -> Result<Self, AiLibError>
Create client with minimal explicit options (base_url/proxy/timeout). Not all providers support overrides; unsupported providers ignore unspecified fields gracefully.
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("Explicit configuration example");
8 let opts = ConnectionOptions {
9 base_url: None, // fallback to provider default
10 proxy: Some("http://proxy.example.com:8080".into()), // or None to use AI_PROXY_URL
11 api_key: None, // rely on environment for now
12 timeout: Some(std::time::Duration::from_secs(40)),
13 disable_proxy: false,
14 };
15
16 let client = AiClient::with_options(Provider::Groq, opts)?;
17
18 let req = ChatCompletionRequest::new(
19 "llama3-8b-8192".to_string(),
20 vec![Message {
21 role: Role::User,
22 content: Content::Text("Ping from explicit config".into()),
23 function_call: None,
24 }],
25 );
26
27 // This may fail if GROQ_API_KEY not set; we only show structure.
28 match client.chat_completion(req).await {
29 Ok(resp) => println!("Response model: {}", resp.model),
30 Err(e) => println!(
31 "Request failed (expected in example without API key): {}",
32 e
33 ),
34 }
35 Ok(())
36}
pub fn connection_options(&self) -> Option<&ConnectionOptions>
Sourcepub fn builder(provider: Provider) -> AiClientBuilder
pub fn builder(provider: Provider) -> AiClientBuilder
Create a new AI client builder
The builder pattern allows more flexible client configuration:
- Automatic environment variable detection
- Support for custom base_url and proxy
- Support for custom timeout and connection pool configuration
§Arguments
provider
- The AI model provider to use
§Returns
AiClientBuilder
- Builder instance
§Example
use ai_lib::{AiClient, Provider};
// Simplest usage - automatic environment variable detection
let client = AiClient::builder(Provider::Groq).build()?;
// Custom base_url and proxy
let client = AiClient::builder(Provider::Groq)
.with_base_url("https://custom.groq.com")
.with_proxy(Some("http://proxy.example.com:8080"))
.build()?;
Examples found in repository?
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
More examples
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 println!("🚀 AI Client Builder Pattern Example");
9 println!("===================================");
10
11 // Example 1: Simplest usage - automatic environment variable detection
12 println!("\n📋 Example 1: Simplest usage");
13 println!(" Automatically detect GROQ_BASE_URL and AI_PROXY_URL from environment variables");
14
15 let client = AiClientBuilder::new(Provider::Groq).build()?;
16 println!(
17 "✅ Client created successfully, provider: {:?}",
18 client.current_provider()
19 );
20
21 // Example 2: Custom base_url
22 println!("\n📋 Example 2: Custom base_url");
23 println!(" Use custom Groq server address");
24
25 let client = AiClientBuilder::new(Provider::Groq)
26 .with_base_url("https://custom.groq.com")
27 .build()?;
28 println!("✅ Client created successfully with custom base_url");
29
30 // Example 3: Custom base_url and proxy
31 println!("\n📋 Example 3: Custom base_url and proxy");
32 println!(" Use custom server and proxy");
33
34 let client = AiClientBuilder::new(Provider::Groq)
35 .with_base_url("https://custom.groq.com")
36 .with_proxy(Some("http://proxy.example.com:8080"))
37 .build()?;
38 println!("✅ Client created successfully with custom base_url and proxy");
39
40 // Example 4: Full custom configuration
41 println!("\n📋 Example 4: Full custom configuration");
42 println!(" Custom timeout, connection pool and other advanced configurations");
43
44 let client = AiClientBuilder::new(Provider::Groq)
45 .with_base_url("https://custom.groq.com")
46 .with_proxy(Some("http://proxy.example.com:8080"))
47 .with_timeout(Duration::from_secs(60))
48 .with_pool_config(32, Duration::from_secs(90))
49 .build()?;
50 println!("✅ Client created successfully with full custom configuration");
51
52 // Example 5: Use convenient builder method
53 println!("\n📋 Example 5: Use convenient builder method");
54 println!(" Create builder through AiClient::builder()");
55
56 let client = AiClient::builder(Provider::Groq)
57 .with_base_url("https://custom.groq.com")
58 .with_proxy(Some("http://proxy.example.com:8080"))
59 .build()?;
60 println!("✅ Client created successfully using convenient builder method");
61
62 // Example 6: Environment variable priority demonstration
63 println!("\n📋 Example 6: Environment variable priority demonstration");
64 println!(" Set environment variables, then use builder");
65
66 // Set environment variables
67 std::env::set_var("GROQ_BASE_URL", "https://env.groq.com");
68 std::env::set_var("AI_PROXY_URL", "http://env.proxy.com:8080");
69
70 // Don't set any custom configuration, should use environment variables
71 let client = AiClientBuilder::new(Provider::Groq).build()?;
72 println!("✅ Client created successfully using environment variable configuration");
73
74 // Explicit settings override environment variables
75 let client = AiClientBuilder::new(Provider::Groq)
76 .with_base_url("https://explicit.groq.com")
77 .with_proxy(Some("http://explicit.proxy.com:8080"))
78 .build()?;
79 println!(
80 "✅ Client created successfully, explicit configuration overrides environment variables"
81 );
82
83 // Example 7: Different provider configurations
84 println!("\n📋 Example 7: Different provider configurations");
85
86 // Groq
87 let groq_client = AiClientBuilder::new(Provider::Groq)
88 .with_base_url("https://custom.groq.com")
89 .build()?;
90 println!("✅ Groq client created successfully");
91
92 // DeepSeek
93 let deepseek_client = AiClientBuilder::new(Provider::DeepSeek)
94 .with_base_url("https://custom.deepseek.com")
95 .with_proxy(Some("http://proxy.example.com:8080"))
96 .build()?;
97 println!("✅ DeepSeek client created successfully");
98
99 // Ollama (local deployment)
100 let ollama_client = AiClientBuilder::new(Provider::Ollama)
101 .with_base_url("http://localhost:11434")
102 .build()?;
103 println!("✅ Ollama client created successfully");
104
105 // Example 8: Error handling
106 println!("\n📋 Example 8: Error handling");
107 println!(" Try to set custom configuration for unsupported provider");
108
109 match AiClientBuilder::new(Provider::OpenAI)
110 .with_base_url("https://custom.openai.com")
111 .build()
112 {
113 Ok(_) => println!("❌ This should not succeed"),
114 Err(e) => println!("✅ Correctly caught error: {}", e),
115 }
116
117 println!("\n🎉 All examples completed!");
118 println!("\n💡 Advantages of builder pattern:");
119 println!(" 1. Automatic environment variable detection, reducing configuration code");
120 println!(" 2. Support for progressive custom configuration");
121 println!(" 3. Method chaining for cleaner code");
122 println!(" 4. Backward compatible, existing code requires no changes");
123 println!(" 5. Support for advanced configuration (timeout, connection pool, etc.)");
124
125 Ok(())
126}
Sourcepub fn new_with_metrics(
provider: Provider,
metrics: Arc<dyn Metrics>,
) -> Result<Self, AiLibError>
pub fn new_with_metrics( provider: Provider, metrics: Arc<dyn Metrics>, ) -> Result<Self, AiLibError>
Create AiClient with injected metrics implementation
Sourcepub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
Set metrics implementation on client
Sourcepub async fn chat_completion(
&self,
request: ChatCompletionRequest,
) -> Result<ChatCompletionResponse, AiLibError>
pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>
Send chat completion request
§Arguments
request
- Chat completion request
§Returns
Result<ChatCompletionResponse, AiLibError>
- Response on success, error on failure
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("Explicit configuration example");
8 let opts = ConnectionOptions {
9 base_url: None, // fallback to provider default
10 proxy: Some("http://proxy.example.com:8080".into()), // or None to use AI_PROXY_URL
11 api_key: None, // rely on environment for now
12 timeout: Some(std::time::Duration::from_secs(40)),
13 disable_proxy: false,
14 };
15
16 let client = AiClient::with_options(Provider::Groq, opts)?;
17
18 let req = ChatCompletionRequest::new(
19 "llama3-8b-8192".to_string(),
20 vec![Message {
21 role: Role::User,
22 content: Content::Text("Ping from explicit config".into()),
23 function_call: None,
24 }],
25 );
26
27 // This may fail if GROQ_API_KEY not set; we only show structure.
28 match client.chat_completion(req).await {
29 Ok(resp) => println!("Response model: {}", resp.model),
30 Err(e) => println!(
31 "Request failed (expected in example without API key): {}",
32 e
33 ),
34 }
35 Ok(())
36}
More examples
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Basic Usage Example");
8 println!("================================");
9
10 // Switch model provider by changing Provider value
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Get list of supported models
18 let models = client.list_models().await?;
19 println!("📋 Available models: {:?}", models);
20
21 // Create chat request
22 let request = ChatCompletionRequest::new(
23 "llama3-8b-8192".to_string(),
24 vec![Message {
25 role: Role::User,
26 content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27 function_call: None,
28 }],
29 )
30 .with_temperature(0.7)
31 .with_max_tokens(100);
32
33 println!("📤 Sending request to model: {}", request.model);
34
35 // Send request
36 let response = client.chat_completion(request).await?;
37
38 println!("📥 Received response:");
39 println!(" ID: {}", response.id);
40 println!(" Model: {}", response.model);
41 println!(
42 " Content: {}",
43 response.choices[0].message.content.as_text()
44 );
45 println!(" Usage: {} tokens", response.usage.total_tokens);
46
47 Ok(())
48}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // 检查环境变量
7 if std::env::var("GROQ_API_KEY").is_err() {
8 println!("❌ Please set GROQ_API_KEY environment variable");
9 println!(" Example: export GROQ_API_KEY=your_api_key_here");
10 println!(" Or set it in .env file");
11 return Ok(());
12 }
13
14 println!("🔧 Creating Groq client using new Provider classification system...");
15
16 // Create Groq client - using new provider classification system
17 let client = AiClient::new(Provider::Groq)?;
18
19 // Create chat request
20 let request = ChatCompletionRequest::new(
21 "llama-3.1-8b-instant".to_string(), // Available Groq model
22 vec![Message {
23 role: Role::User,
24 content: Content::Text("Hello! Please respond with a simple greeting.".to_string()),
25 function_call: None,
26 }],
27 );
28
29 println!("🚀 Sending request to Groq...");
30 println!("📝 Request: Hello! Please respond with a simple greeting.");
31 println!();
32
33 // 发送请求并获取响应
34 let response = client.chat_completion(request).await?;
35
36 println!("✅ Groq Response:");
37 match &response.choices[0].message.content {
38 Content::Text(text) => println!("{}", text),
39 Content::Json(json) => println!("JSON: {:?}", json),
40 Content::Image { url, mime, name } => println!("Image: url={:?}, mime={:?}, name={:?}", url, mime, name),
41 Content::Audio { url, mime } => println!("Audio: url={:?}, mime={:?}", url, mime),
42 }
43
44 Ok(())
45}
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 println!("🔧 OpenAI Function Calling example (ai-lib)");
9
10 // Ensure OPENAI_API_KEY is set in env before running
11 let client = AiClient::new(Provider::OpenAI)?;
12
13 // Build a simple user message
14 let user_msg = Message {
15 role: Role::User,
16 content: Content::Text("Please call the ascii_horse tool with size=3".to_string()),
17 function_call: None,
18 };
19
20 // Define a Tool (JSON Schema for parameters)
21 let ascii_horse_tool = Tool {
22 name: "ascii_horse".to_string(),
23 description: Some("Draws an ASCII horse of given size".to_string()),
24 parameters: Some(json!({
25 "type": "object",
26 "properties": {
27 "size": { "type": "integer", "description": "Size of the horse" }
28 },
29 "required": ["size"]
30 })),
31 };
32
33 let mut req = ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![user_msg]);
34 req.functions = Some(vec![ascii_horse_tool]);
35 req.function_call = Some(FunctionCallPolicy::Auto("auto".to_string()));
36 req = req.with_max_tokens(200).with_temperature(0.0);
37
38 println!("📤 Sending request to OpenAI (model={})", req.model);
39
40 let resp = client.chat_completion(req).await?;
41
42 // Handle a possible function call from the model: execute locally and send the result back
43 for choice in resp.choices {
44 let msg = choice.message;
45 if let Some(fc) = msg.function_call {
46 println!("🛠️ Model invoked function: {}", fc.name);
47 let args = fc.arguments.unwrap_or(serde_json::json!(null));
48 println!(" arguments: {}", args);
49
50 // Simple local tool: ascii_horse
51 if fc.name == "ascii_horse" {
52 // Parse size param
53 let size = args.get("size").and_then(|v| v.as_i64()).unwrap_or(3) as usize;
54 let horse = generate_ascii_horse(size);
55 println!("⚙️ Executed ascii_horse locally, output:\n{}", horse);
56
57 // Send follow-up message with tool result as assistant message
58 let tool_msg = Message {
59 role: Role::Assistant,
60 content: Content::Text(horse.clone()),
61 function_call: None,
62 };
63
64 let mut followup =
65 ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![tool_msg]);
66 followup = followup.with_max_tokens(200).with_temperature(0.0);
67 let follow_resp = client.chat_completion(followup).await?;
68 for fc_choice in follow_resp.choices {
69 println!(
70 "🗨️ Final model response: {}",
71 fc_choice.message.content.as_text()
72 );
73 }
74 }
75 } else {
76 println!("💬 Model message: {}", msg.content.as_text());
77 }
78 }
79
80 Ok(())
81}
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
Sourcepub async fn chat_completion_stream(
&self,
request: ChatCompletionRequest,
) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>
pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>
Streaming chat completion request
§Arguments
request
- Chat completion request
§Returns
Result<impl Stream<Item = Result<ChatCompletionChunk, AiLibError>>, AiLibError>
- Stream response on success
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Ensure COHERE_API_KEY env var is set if making real requests
8 let client = AiClient::new(Provider::Cohere)?;
9
10 let request = ChatCompletionRequest::new(
11 "command-xlarge-nightly".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::Text("Write a haiku about rust programming".to_string()),
15 function_call: None,
16 }],
17 )
18 .with_temperature(0.7)
19 .with_max_tokens(60);
20
21 // List models
22 match client.list_models().await {
23 Ok(models) => println!("Models: {:?}", models),
24 Err(e) => eprintln!("Failed to list models: {}", e),
25 }
26
27 // Streaming
28 let mut stream = client.chat_completion_stream(request).await?;
29 while let Some(chunk) = stream.next().await {
30 match chunk {
31 Ok(c) => {
32 for choice in c.choices {
33 if let Some(delta) = choice.delta.content {
34 print!("{}", delta);
35 }
36 }
37 }
38 Err(e) => {
39 eprintln!("Stream error: {}", e);
40 break;
41 }
42 }
43 }
44
45 Ok(())
46}
More examples
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🌊 流式响应测试");
8 println!("================");
9
10 // 检查Groq API密钥
11 if std::env::var("GROQ_API_KEY").is_err() {
12 println!("❌ 未设置GROQ_API_KEY");
13 return Ok(());
14 }
15
16 // 创建Groq客户端
17 let client = AiClient::new(Provider::Groq)?;
18 println!("✅ Groq客户端创建成功");
19
20 // 创建流式请求
21 let request = ChatCompletionRequest::new(
22 "llama3-8b-8192".to_string(),
23 vec![Message {
24 role: Role::User,
25 content: Content::Text(
26 "Please write a short poem about AI in exactly 4 lines.".to_string(),
27 ),
28 function_call: None,
29 }],
30 )
31 .with_max_tokens(100)
32 .with_temperature(0.7);
33
34 println!("\n📤 发送流式请求...");
35 println!(" 模型: {}", request.model);
36 println!(" 消息: {}", request.messages[0].content.as_text());
37
38 // 获取流式响应
39 match client.chat_completion_stream(request).await {
40 Ok(mut stream) => {
41 println!("\n🌊 开始接收流式响应:");
42 println!("{}", "─".repeat(50));
43
44 let mut full_content = String::new();
45 let mut chunk_count = 0;
46
47 while let Some(result) = stream.next().await {
48 match result {
49 Ok(chunk) => {
50 chunk_count += 1;
51
52 if let Some(choice) = chunk.choices.first() {
53 if let Some(content) = &choice.delta.content {
54 print!("{}", content);
55 full_content.push_str(content);
56
57 // 刷新输出
58 use std::io::{self, Write};
59 io::stdout().flush().unwrap();
60 }
61
62 // 检查是否完成
63 if choice.finish_reason.is_some() {
64 println!("\n{}", "─".repeat(50));
65 println!("✅ 流式响应完成!");
66 println!(" 完成原因: {:?}", choice.finish_reason);
67 break;
68 }
69 }
70 }
71 Err(e) => {
72 println!("\n❌ 流式响应错误: {}", e);
73 break;
74 }
75 }
76 }
77
78 println!("\n📊 流式响应统计:");
79 println!(" 数据块数量: {}", chunk_count);
80 println!(" 总内容长度: {} 字符", full_content.len());
81 println!(" 完整内容: \"{}\"", full_content.trim());
82 }
83 Err(e) => {
84 println!("❌ 流式请求失败: {}", e);
85 }
86 }
87
88 println!("\n💡 流式响应的优势:");
89 println!(" • 实时显示生成内容");
90 println!(" • 更好的用户体验");
91 println!(" • 可以提前停止生成");
92 println!(" • 适合长文本生成");
93
94 Ok(())
95}
Sourcepub async fn chat_completion_stream_with_cancel(
&self,
request: ChatCompletionRequest,
) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>
pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>
Sourcepub async fn chat_completion_batch(
&self,
requests: Vec<ChatCompletionRequest>,
concurrency_limit: Option<usize>,
) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
pub async fn chat_completion_batch( &self, requests: Vec<ChatCompletionRequest>, concurrency_limit: Option<usize>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
Batch chat completion requests
§Arguments
requests
- List of chat completion requestsconcurrency_limit
- Maximum concurrent request count (None means unlimited)
§Returns
Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
- Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = AiClient::new(Provider::Groq)?;
let requests = vec![
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("Hello".to_string()),
function_call: None,
}],
),
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("How are you?".to_string()),
function_call: None,
}],
),
];
// Limit concurrency to 5
let responses = client.chat_completion_batch(requests, Some(5)).await?;
for (i, response) in responses.iter().enumerate() {
match response {
Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
Err(e) => println!("Request {} failed: {}", i, e),
}
}
Ok(())
}
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Batch Processing Example");
8 println!("==================================");
9
10 // Create client
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Prepare multiple requests
18 let requests = vec![
19 ChatCompletionRequest::new(
20 "llama3-8b-8192".to_string(),
21 vec![Message {
22 role: Role::User,
23 content: Content::Text("What is the capital of France?".to_string()),
24 function_call: None,
25 }],
26 )
27 .with_temperature(0.7)
28 .with_max_tokens(50),
29 ChatCompletionRequest::new(
30 "llama3-8b-8192".to_string(),
31 vec![Message {
32 role: Role::User,
33 content: Content::Text("What is 2 + 2?".to_string()),
34 function_call: None,
35 }],
36 )
37 .with_temperature(0.1)
38 .with_max_tokens(20),
39 ChatCompletionRequest::new(
40 "llama3-8b-8192".to_string(),
41 vec![Message {
42 role: Role::User,
43 content: Content::Text("Tell me a short joke.".to_string()),
44 function_call: None,
45 }],
46 )
47 .with_temperature(0.9)
48 .with_max_tokens(100),
49 ChatCompletionRequest::new(
50 "llama3-8b-8192".to_string(),
51 vec![Message {
52 role: Role::User,
53 content: Content::Text(
54 "What is the largest planet in our solar system?".to_string(),
55 ),
56 function_call: None,
57 }],
58 )
59 .with_temperature(0.5)
60 .with_max_tokens(60),
61 ];
62
63 println!(
64 "📤 Prepared {} requests for batch processing",
65 requests.len()
66 );
67
68 // Method 1: Batch processing with concurrency limit
69 println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
70 let start_time = std::time::Instant::now();
71
72 let responses = client
73 .chat_completion_batch(requests.clone(), Some(2))
74 .await?;
75
76 let duration = start_time.elapsed();
77 println!("⏱️ Batch processing completed in {:?}", duration);
78
79 // Process responses
80 for (i, response) in responses.iter().enumerate() {
81 match response {
82 Ok(resp) => {
83 println!(
84 "✅ Request {}: {}",
85 i + 1,
86 resp.choices[0].message.content.as_text()
87 );
88 }
89 Err(e) => {
90 println!("❌ Request {} failed: {}", i + 1, e);
91 }
92 }
93 }
94
95 // Method 2: Smart batch processing (auto-select strategy)
96 println!("\n🧠 Method 2: Smart batch processing");
97 let start_time = std::time::Instant::now();
98
99 let responses = client.chat_completion_batch_smart(requests.clone()).await?;
100
101 let duration = start_time.elapsed();
102 println!("⏱️ Smart batch processing completed in {:?}", duration);
103
104 // Count successes and failures
105 let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
106 let failed: Vec<_> = responses
107 .iter()
108 .enumerate()
109 .filter_map(|(i, r)| r.as_ref().err().map(|e| (i, e)))
110 .collect();
111
112 println!("📊 Results:");
113 println!(" ✅ Successful: {}/{}", successful.len(), responses.len());
114 println!(" ❌ Failed: {}/{}", failed.len(), responses.len());
115 println!(
116 " 📈 Success rate: {:.1}%",
117 (successful.len() as f64 / responses.len() as f64) * 100.0
118 );
119
120 // Method 3: Unlimited concurrent batch processing
121 println!("\n🚀 Method 3: Unlimited concurrent batch processing");
122 let start_time = std::time::Instant::now();
123
124 let responses = client.chat_completion_batch(requests, None).await?;
125
126 let duration = start_time.elapsed();
127 println!(
128 "⏱️ Unlimited concurrent processing completed in {:?}",
129 duration
130 );
131
132 // Display all responses
133 for (i, response) in responses.iter().enumerate() {
134 match response {
135 Ok(resp) => {
136 println!(
137 "✅ Request {}: {}",
138 i + 1,
139 resp.choices[0].message.content.as_text()
140 );
141 }
142 Err(e) => {
143 println!("❌ Request {} failed: {}", i + 1, e);
144 }
145 }
146 }
147
148 println!("\n🎉 Batch processing example completed successfully!");
149 Ok(())
150}
Sourcepub async fn chat_completion_batch_smart(
&self,
requests: Vec<ChatCompletionRequest>,
) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
pub async fn chat_completion_batch_smart( &self, requests: Vec<ChatCompletionRequest>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
Smart batch processing: automatically choose processing strategy based on request count
§Arguments
requests
- List of chat completion requests
§Returns
Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
- Returns response results for all requests
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Batch Processing Example");
8 println!("==================================");
9
10 // Create client
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Prepare multiple requests
18 let requests = vec![
19 ChatCompletionRequest::new(
20 "llama3-8b-8192".to_string(),
21 vec![Message {
22 role: Role::User,
23 content: Content::Text("What is the capital of France?".to_string()),
24 function_call: None,
25 }],
26 )
27 .with_temperature(0.7)
28 .with_max_tokens(50),
29 ChatCompletionRequest::new(
30 "llama3-8b-8192".to_string(),
31 vec![Message {
32 role: Role::User,
33 content: Content::Text("What is 2 + 2?".to_string()),
34 function_call: None,
35 }],
36 )
37 .with_temperature(0.1)
38 .with_max_tokens(20),
39 ChatCompletionRequest::new(
40 "llama3-8b-8192".to_string(),
41 vec![Message {
42 role: Role::User,
43 content: Content::Text("Tell me a short joke.".to_string()),
44 function_call: None,
45 }],
46 )
47 .with_temperature(0.9)
48 .with_max_tokens(100),
49 ChatCompletionRequest::new(
50 "llama3-8b-8192".to_string(),
51 vec![Message {
52 role: Role::User,
53 content: Content::Text(
54 "What is the largest planet in our solar system?".to_string(),
55 ),
56 function_call: None,
57 }],
58 )
59 .with_temperature(0.5)
60 .with_max_tokens(60),
61 ];
62
63 println!(
64 "📤 Prepared {} requests for batch processing",
65 requests.len()
66 );
67
68 // Method 1: Batch processing with concurrency limit
69 println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
70 let start_time = std::time::Instant::now();
71
72 let responses = client
73 .chat_completion_batch(requests.clone(), Some(2))
74 .await?;
75
76 let duration = start_time.elapsed();
77 println!("⏱️ Batch processing completed in {:?}", duration);
78
79 // Process responses
80 for (i, response) in responses.iter().enumerate() {
81 match response {
82 Ok(resp) => {
83 println!(
84 "✅ Request {}: {}",
85 i + 1,
86 resp.choices[0].message.content.as_text()
87 );
88 }
89 Err(e) => {
90 println!("❌ Request {} failed: {}", i + 1, e);
91 }
92 }
93 }
94
95 // Method 2: Smart batch processing (auto-select strategy)
96 println!("\n🧠 Method 2: Smart batch processing");
97 let start_time = std::time::Instant::now();
98
99 let responses = client.chat_completion_batch_smart(requests.clone()).await?;
100
101 let duration = start_time.elapsed();
102 println!("⏱️ Smart batch processing completed in {:?}", duration);
103
104 // Count successes and failures
105 let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
106 let failed: Vec<_> = responses
107 .iter()
108 .enumerate()
109 .filter_map(|(i, r)| r.as_ref().err().map(|e| (i, e)))
110 .collect();
111
112 println!("📊 Results:");
113 println!(" ✅ Successful: {}/{}", successful.len(), responses.len());
114 println!(" ❌ Failed: {}/{}", failed.len(), responses.len());
115 println!(
116 " 📈 Success rate: {:.1}%",
117 (successful.len() as f64 / responses.len() as f64) * 100.0
118 );
119
120 // Method 3: Unlimited concurrent batch processing
121 println!("\n🚀 Method 3: Unlimited concurrent batch processing");
122 let start_time = std::time::Instant::now();
123
124 let responses = client.chat_completion_batch(requests, None).await?;
125
126 let duration = start_time.elapsed();
127 println!(
128 "⏱️ Unlimited concurrent processing completed in {:?}",
129 duration
130 );
131
132 // Display all responses
133 for (i, response) in responses.iter().enumerate() {
134 match response {
135 Ok(resp) => {
136 println!(
137 "✅ Request {}: {}",
138 i + 1,
139 resp.choices[0].message.content.as_text()
140 );
141 }
142 Err(e) => {
143 println!("❌ Request {} failed: {}", i + 1, e);
144 }
145 }
146 }
147
148 println!("\n🎉 Batch processing example completed successfully!");
149 Ok(())
150}
Sourcepub async fn list_models(&self) -> Result<Vec<String>, AiLibError>
pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>
Batch chat completion requests
§Arguments
requests
- List of chat completion requestsconcurrency_limit
- Maximum concurrent request count (None means unlimited)
§Returns
Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
- Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = AiClient::new(Provider::Groq)?;
let requests = vec![
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("Hello".to_string()),
function_call: None,
}],
),
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("How are you?".to_string()),
function_call: None,
}],
),
];
// Limit concurrency to 5
let responses = client.chat_completion_batch(requests, Some(5)).await?;
for (i, response) in responses.iter().enumerate() {
match response {
Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
Err(e) => println!("Request {} failed: {}", i, e),
}
}
Ok(())
}
Get list of supported models
§Returns
Result<Vec<String>, AiLibError>
- Returns model list on success, error on failure
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Ensure COHERE_API_KEY env var is set if making real requests
8 let client = AiClient::new(Provider::Cohere)?;
9
10 let request = ChatCompletionRequest::new(
11 "command-xlarge-nightly".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::Text("Write a haiku about rust programming".to_string()),
15 function_call: None,
16 }],
17 )
18 .with_temperature(0.7)
19 .with_max_tokens(60);
20
21 // List models
22 match client.list_models().await {
23 Ok(models) => println!("Models: {:?}", models),
24 Err(e) => eprintln!("Failed to list models: {}", e),
25 }
26
27 // Streaming
28 let mut stream = client.chat_completion_stream(request).await?;
29 while let Some(chunk) = stream.next().await {
30 match chunk {
31 Ok(c) => {
32 for choice in c.choices {
33 if let Some(delta) = choice.delta.content {
34 print!("{}", delta);
35 }
36 }
37 }
38 Err(e) => {
39 eprintln!("Stream error: {}", e);
40 break;
41 }
42 }
43 }
44
45 Ok(())
46}
More examples
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Basic Usage Example");
8 println!("================================");
9
10 // Switch model provider by changing Provider value
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Get list of supported models
18 let models = client.list_models().await?;
19 println!("📋 Available models: {:?}", models);
20
21 // Create chat request
22 let request = ChatCompletionRequest::new(
23 "llama3-8b-8192".to_string(),
24 vec![Message {
25 role: Role::User,
26 content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27 function_call: None,
28 }],
29 )
30 .with_temperature(0.7)
31 .with_max_tokens(100);
32
33 println!("📤 Sending request to model: {}", request.model);
34
35 // Send request
36 let response = client.chat_completion(request).await?;
37
38 println!("📥 Received response:");
39 println!(" ID: {}", response.id);
40 println!(" Model: {}", response.model);
41 println!(
42 " Content: {}",
43 response.choices[0].message.content.as_text()
44 );
45 println!(" Usage: {} tokens", response.usage.total_tokens);
46
47 Ok(())
48}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 Config-driven AI-lib Example");
8 println!("================================");
9
10 // Demonstrate the advantages of config-driven approach: easy provider switching
11 let providers = vec![
12 (Provider::Groq, "Groq"),
13 (Provider::OpenAI, "OpenAI"),
14 (Provider::DeepSeek, "DeepSeek"),
15 ];
16
17 for (provider, name) in providers {
18 println!("\n📡 Testing Provider: {}", name);
19
20 // Create client - just change the enum value
21 let client = AiClient::new(provider)?;
22 println!(
23 "✅ Client created successfully: {:?}",
24 client.current_provider()
25 );
26
27 // Get model list
28 match client.list_models().await {
29 Ok(models) => println!("📋 Available models: {:?}", models),
30 Err(e) => println!("⚠️ Failed to get model list: {}", e),
31 }
32
33 // Create test request
34 let request = ChatCompletionRequest::new(
35 "test-model".to_string(),
36 vec![Message {
37 role: Role::User,
38 content: Content::Text("Hello from ai-lib!".to_string()),
39 function_call: None,
40 }],
41 );
42
43 println!("📤 Request prepared, model: {}", request.model);
44 println!(" (Need to set corresponding API_KEY environment variable for actual calls)");
45 }
46
47 println!("\n🎯 Core advantages of config-driven approach:");
48 println!(" • Zero-code switching: just change Provider enum value");
49 println!(" • Unified interface: all providers use the same API");
50 println!(" • Rapid expansion: add new compatible providers with just configuration");
51
52 Ok(())
53}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🌐 AI-lib Proxy Server Support Example");
8 println!("=====================================");
9
10 // Check proxy configuration
11 match std::env::var("AI_PROXY_URL") {
12 Ok(proxy_url) => {
13 println!("✅ Proxy configuration detected: {}", proxy_url);
14 println!(" All HTTP requests will go through this proxy server");
15 }
16 Err(_) => {
17 println!("ℹ️ AI_PROXY_URL environment variable not set");
18 println!(" To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19 }
20 }
21
22 println!("\n🚀 Creating AI client...");
23 let client = AiClient::new(Provider::Groq)?;
24 println!(
25 "✅ Client created successfully, provider: {:?}",
26 client.current_provider()
27 );
28
29 // Create test request
30 let request = ChatCompletionRequest::new(
31 "llama3-8b-8192".to_string(),
32 vec![Message {
33 role: Role::User,
34 content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35 function_call: None,
36 }],
37 );
38
39 println!("\n📤 Preparing to send request...");
40 println!(" Model: {}", request.model);
41 println!(" Message: {}", request.messages[0].content.as_text());
42
43 // Get model list (this request will also go through proxy)
44 match client.list_models().await {
45 Ok(models) => {
46 println!("\n📋 Model list obtained through proxy:");
47 for model in models {
48 println!(" • {}", model);
49 }
50 }
51 Err(e) => {
52 println!("\n⚠️ Failed to get model list: {}", e);
53 println!(" This may be due to:");
54 println!(" • GROQ_API_KEY environment variable not set");
55 println!(" • Proxy server configuration error");
56 println!(" • Network connection issue");
57 }
58 }
59
60 println!("\n💡 Proxy Configuration Instructions:");
61 println!(" • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62 println!(" • Supports HTTP and HTTPS proxies");
63 println!(" • Supports authenticated proxies: http://user:pass@proxy:port");
64 println!(" • All AI providers will automatically use this proxy configuration");
65
66 Ok(())
67}
Sourcepub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>
pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>
Switch AI model provider
§Arguments
provider
- New provider
§Returns
Result<(), AiLibError>
- Returns () on success, error on failure
§Example
use ai_lib::{AiClient, Provider};
let mut client = AiClient::new(Provider::Groq)?;
// Switch from Groq to Groq (demonstrating switch functionality)
client.switch_provider(Provider::Groq)?;
Sourcepub fn current_provider(&self) -> Provider
pub fn current_provider(&self) -> Provider
Get current provider
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Basic Usage Example");
8 println!("================================");
9
10 // Switch model provider by changing Provider value
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Get list of supported models
18 let models = client.list_models().await?;
19 println!("📋 Available models: {:?}", models);
20
21 // Create chat request
22 let request = ChatCompletionRequest::new(
23 "llama3-8b-8192".to_string(),
24 vec![Message {
25 role: Role::User,
26 content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27 function_call: None,
28 }],
29 )
30 .with_temperature(0.7)
31 .with_max_tokens(100);
32
33 println!("📤 Sending request to model: {}", request.model);
34
35 // Send request
36 let response = client.chat_completion(request).await?;
37
38 println!("📥 Received response:");
39 println!(" ID: {}", response.id);
40 println!(" Model: {}", response.model);
41 println!(
42 " Content: {}",
43 response.choices[0].message.content.as_text()
44 );
45 println!(" Usage: {} tokens", response.usage.total_tokens);
46
47 Ok(())
48}
More examples
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 Config-driven AI-lib Example");
8 println!("================================");
9
10 // Demonstrate the advantages of config-driven approach: easy provider switching
11 let providers = vec![
12 (Provider::Groq, "Groq"),
13 (Provider::OpenAI, "OpenAI"),
14 (Provider::DeepSeek, "DeepSeek"),
15 ];
16
17 for (provider, name) in providers {
18 println!("\n📡 Testing Provider: {}", name);
19
20 // Create client - just change the enum value
21 let client = AiClient::new(provider)?;
22 println!(
23 "✅ Client created successfully: {:?}",
24 client.current_provider()
25 );
26
27 // Get model list
28 match client.list_models().await {
29 Ok(models) => println!("📋 Available models: {:?}", models),
30 Err(e) => println!("⚠️ Failed to get model list: {}", e),
31 }
32
33 // Create test request
34 let request = ChatCompletionRequest::new(
35 "test-model".to_string(),
36 vec![Message {
37 role: Role::User,
38 content: Content::Text("Hello from ai-lib!".to_string()),
39 function_call: None,
40 }],
41 );
42
43 println!("📤 Request prepared, model: {}", request.model);
44 println!(" (Need to set corresponding API_KEY environment variable for actual calls)");
45 }
46
47 println!("\n🎯 Core advantages of config-driven approach:");
48 println!(" • Zero-code switching: just change Provider enum value");
49 println!(" • Unified interface: all providers use the same API");
50 println!(" • Rapid expansion: add new compatible providers with just configuration");
51
52 Ok(())
53}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🌐 AI-lib Proxy Server Support Example");
8 println!("=====================================");
9
10 // Check proxy configuration
11 match std::env::var("AI_PROXY_URL") {
12 Ok(proxy_url) => {
13 println!("✅ Proxy configuration detected: {}", proxy_url);
14 println!(" All HTTP requests will go through this proxy server");
15 }
16 Err(_) => {
17 println!("ℹ️ AI_PROXY_URL environment variable not set");
18 println!(" To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19 }
20 }
21
22 println!("\n🚀 Creating AI client...");
23 let client = AiClient::new(Provider::Groq)?;
24 println!(
25 "✅ Client created successfully, provider: {:?}",
26 client.current_provider()
27 );
28
29 // Create test request
30 let request = ChatCompletionRequest::new(
31 "llama3-8b-8192".to_string(),
32 vec![Message {
33 role: Role::User,
34 content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35 function_call: None,
36 }],
37 );
38
39 println!("\n📤 Preparing to send request...");
40 println!(" Model: {}", request.model);
41 println!(" Message: {}", request.messages[0].content.as_text());
42
43 // Get model list (this request will also go through proxy)
44 match client.list_models().await {
45 Ok(models) => {
46 println!("\n📋 Model list obtained through proxy:");
47 for model in models {
48 println!(" • {}", model);
49 }
50 }
51 Err(e) => {
52 println!("\n⚠️ Failed to get model list: {}", e);
53 println!(" This may be due to:");
54 println!(" • GROQ_API_KEY environment variable not set");
55 println!(" • Proxy server configuration error");
56 println!(" • Network connection issue");
57 }
58 }
59
60 println!("\n💡 Proxy Configuration Instructions:");
61 println!(" • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62 println!(" • Supports HTTP and HTTPS proxies");
63 println!(" • Supports authenticated proxies: http://user:pass@proxy:port");
64 println!(" • All AI providers will automatically use this proxy configuration");
65
66 Ok(())
67}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Batch Processing Example");
8 println!("==================================");
9
10 // Create client
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Prepare multiple requests
18 let requests = vec![
19 ChatCompletionRequest::new(
20 "llama3-8b-8192".to_string(),
21 vec![Message {
22 role: Role::User,
23 content: Content::Text("What is the capital of France?".to_string()),
24 function_call: None,
25 }],
26 )
27 .with_temperature(0.7)
28 .with_max_tokens(50),
29 ChatCompletionRequest::new(
30 "llama3-8b-8192".to_string(),
31 vec![Message {
32 role: Role::User,
33 content: Content::Text("What is 2 + 2?".to_string()),
34 function_call: None,
35 }],
36 )
37 .with_temperature(0.1)
38 .with_max_tokens(20),
39 ChatCompletionRequest::new(
40 "llama3-8b-8192".to_string(),
41 vec![Message {
42 role: Role::User,
43 content: Content::Text("Tell me a short joke.".to_string()),
44 function_call: None,
45 }],
46 )
47 .with_temperature(0.9)
48 .with_max_tokens(100),
49 ChatCompletionRequest::new(
50 "llama3-8b-8192".to_string(),
51 vec![Message {
52 role: Role::User,
53 content: Content::Text(
54 "What is the largest planet in our solar system?".to_string(),
55 ),
56 function_call: None,
57 }],
58 )
59 .with_temperature(0.5)
60 .with_max_tokens(60),
61 ];
62
63 println!(
64 "📤 Prepared {} requests for batch processing",
65 requests.len()
66 );
67
68 // Method 1: Batch processing with concurrency limit
69 println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
70 let start_time = std::time::Instant::now();
71
72 let responses = client
73 .chat_completion_batch(requests.clone(), Some(2))
74 .await?;
75
76 let duration = start_time.elapsed();
77 println!("⏱️ Batch processing completed in {:?}", duration);
78
79 // Process responses
80 for (i, response) in responses.iter().enumerate() {
81 match response {
82 Ok(resp) => {
83 println!(
84 "✅ Request {}: {}",
85 i + 1,
86 resp.choices[0].message.content.as_text()
87 );
88 }
89 Err(e) => {
90 println!("❌ Request {} failed: {}", i + 1, e);
91 }
92 }
93 }
94
95 // Method 2: Smart batch processing (auto-select strategy)
96 println!("\n🧠 Method 2: Smart batch processing");
97 let start_time = std::time::Instant::now();
98
99 let responses = client.chat_completion_batch_smart(requests.clone()).await?;
100
101 let duration = start_time.elapsed();
102 println!("⏱️ Smart batch processing completed in {:?}", duration);
103
104 // Count successes and failures
105 let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
106 let failed: Vec<_> = responses
107 .iter()
108 .enumerate()
109 .filter_map(|(i, r)| r.as_ref().err().map(|e| (i, e)))
110 .collect();
111
112 println!("📊 Results:");
113 println!(" ✅ Successful: {}/{}", successful.len(), responses.len());
114 println!(" ❌ Failed: {}/{}", failed.len(), responses.len());
115 println!(
116 " 📈 Success rate: {:.1}%",
117 (successful.len() as f64 / responses.len() as f64) * 100.0
118 );
119
120 // Method 3: Unlimited concurrent batch processing
121 println!("\n🚀 Method 3: Unlimited concurrent batch processing");
122 let start_time = std::time::Instant::now();
123
124 let responses = client.chat_completion_batch(requests, None).await?;
125
126 let duration = start_time.elapsed();
127 println!(
128 "⏱️ Unlimited concurrent processing completed in {:?}",
129 duration
130 );
131
132 // Display all responses
133 for (i, response) in responses.iter().enumerate() {
134 match response {
135 Ok(resp) => {
136 println!(
137 "✅ Request {}: {}",
138 i + 1,
139 resp.choices[0].message.content.as_text()
140 );
141 }
142 Err(e) => {
143 println!("❌ Request {} failed: {}", i + 1, e);
144 }
145 }
146 }
147
148 println!("\n🎉 Batch processing example completed successfully!");
149 Ok(())
150}
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 println!("🚀 AI Client Builder Pattern Example");
9 println!("===================================");
10
11 // Example 1: Simplest usage - automatic environment variable detection
12 println!("\n📋 Example 1: Simplest usage");
13 println!(" Automatically detect GROQ_BASE_URL and AI_PROXY_URL from environment variables");
14
15 let client = AiClientBuilder::new(Provider::Groq).build()?;
16 println!(
17 "✅ Client created successfully, provider: {:?}",
18 client.current_provider()
19 );
20
21 // Example 2: Custom base_url
22 println!("\n📋 Example 2: Custom base_url");
23 println!(" Use custom Groq server address");
24
25 let client = AiClientBuilder::new(Provider::Groq)
26 .with_base_url("https://custom.groq.com")
27 .build()?;
28 println!("✅ Client created successfully with custom base_url");
29
30 // Example 3: Custom base_url and proxy
31 println!("\n📋 Example 3: Custom base_url and proxy");
32 println!(" Use custom server and proxy");
33
34 let client = AiClientBuilder::new(Provider::Groq)
35 .with_base_url("https://custom.groq.com")
36 .with_proxy(Some("http://proxy.example.com:8080"))
37 .build()?;
38 println!("✅ Client created successfully with custom base_url and proxy");
39
40 // Example 4: Full custom configuration
41 println!("\n📋 Example 4: Full custom configuration");
42 println!(" Custom timeout, connection pool and other advanced configurations");
43
44 let client = AiClientBuilder::new(Provider::Groq)
45 .with_base_url("https://custom.groq.com")
46 .with_proxy(Some("http://proxy.example.com:8080"))
47 .with_timeout(Duration::from_secs(60))
48 .with_pool_config(32, Duration::from_secs(90))
49 .build()?;
50 println!("✅ Client created successfully with full custom configuration");
51
52 // Example 5: Use convenient builder method
53 println!("\n📋 Example 5: Use convenient builder method");
54 println!(" Create builder through AiClient::builder()");
55
56 let client = AiClient::builder(Provider::Groq)
57 .with_base_url("https://custom.groq.com")
58 .with_proxy(Some("http://proxy.example.com:8080"))
59 .build()?;
60 println!("✅ Client created successfully using convenient builder method");
61
62 // Example 6: Environment variable priority demonstration
63 println!("\n📋 Example 6: Environment variable priority demonstration");
64 println!(" Set environment variables, then use builder");
65
66 // Set environment variables
67 std::env::set_var("GROQ_BASE_URL", "https://env.groq.com");
68 std::env::set_var("AI_PROXY_URL", "http://env.proxy.com:8080");
69
70 // Don't set any custom configuration, should use environment variables
71 let client = AiClientBuilder::new(Provider::Groq).build()?;
72 println!("✅ Client created successfully using environment variable configuration");
73
74 // Explicit settings override environment variables
75 let client = AiClientBuilder::new(Provider::Groq)
76 .with_base_url("https://explicit.groq.com")
77 .with_proxy(Some("http://explicit.proxy.com:8080"))
78 .build()?;
79 println!(
80 "✅ Client created successfully, explicit configuration overrides environment variables"
81 );
82
83 // Example 7: Different provider configurations
84 println!("\n📋 Example 7: Different provider configurations");
85
86 // Groq
87 let groq_client = AiClientBuilder::new(Provider::Groq)
88 .with_base_url("https://custom.groq.com")
89 .build()?;
90 println!("✅ Groq client created successfully");
91
92 // DeepSeek
93 let deepseek_client = AiClientBuilder::new(Provider::DeepSeek)
94 .with_base_url("https://custom.deepseek.com")
95 .with_proxy(Some("http://proxy.example.com:8080"))
96 .build()?;
97 println!("✅ DeepSeek client created successfully");
98
99 // Ollama (local deployment)
100 let ollama_client = AiClientBuilder::new(Provider::Ollama)
101 .with_base_url("http://localhost:11434")
102 .build()?;
103 println!("✅ Ollama client created successfully");
104
105 // Example 8: Error handling
106 println!("\n📋 Example 8: Error handling");
107 println!(" Try to set custom configuration for unsupported provider");
108
109 match AiClientBuilder::new(Provider::OpenAI)
110 .with_base_url("https://custom.openai.com")
111 .build()
112 {
113 Ok(_) => println!("❌ This should not succeed"),
114 Err(e) => println!("✅ Correctly caught error: {}", e),
115 }
116
117 println!("\n🎉 All examples completed!");
118 println!("\n💡 Advantages of builder pattern:");
119 println!(" 1. Automatic environment variable detection, reducing configuration code");
120 println!(" 2. Support for progressive custom configuration");
121 println!(" 3. Method chaining for cleaner code");
122 println!(" 4. Backward compatible, existing code requires no changes");
123 println!(" 5. Support for advanced configuration (timeout, connection pool, etc.)");
124
125 Ok(())
126}
Sourcepub fn build_simple_request<S: Into<String>>(
&self,
prompt: S,
) -> ChatCompletionRequest
pub fn build_simple_request<S: Into<String>>( &self, prompt: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with the provider’s default chat model. This does NOT send the request. Uses custom default model if set via AiClientBuilder, otherwise uses provider default.
Examples found in repository?
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
Sourcepub fn build_simple_request_with_model<S: Into<String>>(
&self,
prompt: S,
model: S,
) -> ChatCompletionRequest
pub fn build_simple_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with an explicitly specified chat model. This does NOT send the request.
Examples found in repository?
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
Sourcepub fn build_multimodal_request<S: Into<String>>(
&self,
prompt: S,
) -> Result<ChatCompletionRequest, AiLibError>
pub fn build_multimodal_request<S: Into<String>>( &self, prompt: S, ) -> Result<ChatCompletionRequest, AiLibError>
Convenience helper: construct a request with the provider’s default multimodal model. This does NOT send the request. Uses custom default model if set via AiClientBuilder, otherwise uses provider default.
Sourcepub fn build_multimodal_request_with_model<S: Into<String>>(
&self,
prompt: S,
model: S,
) -> ChatCompletionRequest
pub fn build_multimodal_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with an explicitly specified multimodal model. This does NOT send the request.
Sourcepub async fn quick_chat_text<P: Into<String>>(
provider: Provider,
prompt: P,
) -> Result<String, AiLibError>
pub async fn quick_chat_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using the
default chat model, and return plain text content (first choice).
Examples found in repository?
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
Sourcepub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>(
provider: Provider,
prompt: P,
model: M,
) -> Result<String, AiLibError>
pub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using an
explicitly specified chat model, and return plain text content (first choice).
Examples found in repository?
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
Sourcepub async fn quick_multimodal_text<P: Into<String>>(
provider: Provider,
prompt: P,
) -> Result<String, AiLibError>
pub async fn quick_multimodal_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using the
default multimodal model, and return plain text content (first choice).
Sourcepub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>(
provider: Provider,
prompt: P,
model: M,
) -> Result<String, AiLibError>
pub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using an
explicitly specified multimodal model, and return plain text content (first choice).
Sourcepub async fn quick_chat_text_with_options<P: Into<String>>(
provider: Provider,
prompt: P,
options: ModelOptions,
) -> Result<String, AiLibError>
pub async fn quick_chat_text_with_options<P: Into<String>>( provider: Provider, prompt: P, options: ModelOptions, ) -> Result<String, AiLibError>
One-shot helper with model options: create a client for provider
, send a single user prompt
using specified model options, and return plain text content (first choice).