AiClient

Struct AiClient 

Source
pub struct AiClient { /* private fields */ }
Expand description

Unified AI client

Usage example:

use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // Switch model provider by changing Provider value
    let client = AiClient::new(Provider::Groq)?;
     
    let request = ChatCompletionRequest::new(
        "test-model".to_string(),
        vec![Message {
            role: Role::User,
            content: ai_lib::types::common::Content::Text("Hello".to_string()),
            function_call: None,
        }],
    );
     
    // Note: Set GROQ_API_KEY environment variable for actual API calls
    // Optional: Set AI_PROXY_URL environment variable to use proxy server
    // let response = client.chat_completion(request).await?;
     
    println!("Client created successfully with provider: {:?}", client.current_provider());
    println!("Request prepared for model: {}", request.model);
     
    Ok(())
}

§Proxy Configuration

Configure proxy server by setting the AI_PROXY_URL environment variable:

export AI_PROXY_URL=http://proxy.example.com:8080

Supported proxy formats:

  • HTTP proxy: http://proxy.example.com:8080
  • HTTPS proxy: https://proxy.example.com:8080
  • With authentication: http://user:pass@proxy.example.com:8080

Implementations§

Source§

impl AiClient

Source

pub fn new(provider: Provider) -> Result<Self, AiLibError>

Create a new AI client

§Arguments
  • provider - The AI model provider to use
§Returns
  • Result<Self, AiLibError> - Client instance on success, error on failure
§Example
use ai_lib::{AiClient, Provider};

let client = AiClient::new(Provider::Groq)?;
Examples found in repository?
examples/multimodal_example.rs (line 8)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("Multimodal example: image + audio content in a message");
7
8    let _client = AiClient::new(Provider::Groq)?;
9
10    let request = ChatCompletionRequest::new(
11        "multimodal-model".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::new_image(
15                Some("https://example.com/dog.jpg".into()),
16                Some("image/jpeg".into()),
17                Some("dog.jpg".into()),
18            ),
19            function_call: None,
20        }],
21    );
22
23    println!(
24        "Prepared multimodal request; image URL: {}",
25        request.messages[0].content.as_text()
26    );
27
28    // Note: this example demonstrates the type usage only and does not call the API.
29    Ok(())
30}
More examples
Hide additional examples
examples/list_models_smoke.rs (line 24)
4async fn main() {
5    let providers = vec![
6        Provider::Groq,
7        Provider::XaiGrok,
8        Provider::Ollama,
9        Provider::DeepSeek,
10        Provider::Anthropic,
11        Provider::AzureOpenAI,
12        Provider::HuggingFace,
13        Provider::TogetherAI,
14        Provider::Qwen,
15        Provider::OpenAI,
16        Provider::Gemini,
17        Provider::Mistral,
18        Provider::Cohere,
19        // Provider::Bedrock, // Removed: Bedrock deferred implementation/not in public API
20    ];
21
22    for p in providers {
23        println!("--- Provider: {:?} ---", p);
24        match AiClient::new(p) {
25            Ok(client) => match client.list_models().await {
26                Ok(models) => {
27                    println!("Found {} models (showing up to 5):", models.len());
28                    for m in models.into_iter().take(5) {
29                        println!(" - {}", m);
30                    }
31                }
32                Err(e) => println!("list_models error: {:?}", e),
33            },
34            Err(e) => println!("client init error: {:?}", e),
35        }
36    }
37}
examples/cohere_stream.rs (line 8)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
examples/basic_usage.rs (line 11)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
examples/config_driven_example.rs (line 21)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 Config-driven AI-lib Example");
8    println!("================================");
9
10    // Demonstrate the advantages of config-driven approach: easy provider switching
11    let providers = vec![
12        (Provider::Groq, "Groq"),
13        (Provider::OpenAI, "OpenAI"),
14        (Provider::DeepSeek, "DeepSeek"),
15    ];
16
17    for (provider, name) in providers {
18        println!("\n📡 Testing Provider: {}", name);
19
20        // Create client - just change the enum value
21        let client = AiClient::new(provider)?;
22        println!(
23            "✅ Client created successfully: {:?}",
24            client.current_provider()
25        );
26
27        // Get model list
28        match client.list_models().await {
29            Ok(models) => println!("📋 Available models: {:?}", models),
30            Err(e) => println!("⚠️  Failed to get model list: {}", e),
31        }
32
33        // Create test request
34        let request = ChatCompletionRequest::new(
35            "test-model".to_string(),
36            vec![Message {
37                role: Role::User,
38                content: Content::Text("Hello from ai-lib!".to_string()),
39                function_call: None,
40            }],
41        );
42
43        println!("📤 Request prepared, model: {}", request.model);
44        println!("   (Need to set corresponding API_KEY environment variable for actual calls)");
45    }
46
47    println!("\n🎯 Core advantages of config-driven approach:");
48    println!("   • Zero-code switching: just change Provider enum value");
49    println!("   • Unified interface: all providers use the same API");
50    println!("   • Rapid expansion: add new compatible providers with just configuration");
51
52    Ok(())
53}
examples/debug_request.rs (line 33)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔍 Debug Request Format");
8    println!("======================");
9
10    // Create test request
11    let request = ChatCompletionRequest::new(
12        "gpt-3.5-turbo".to_string(),
13        vec![Message {
14            role: Role::User,
15            content: Content::Text("Hello!".to_string()),
16            function_call: None,
17        }],
18    )
19    .with_max_tokens(10);
20
21    println!("📤 Original Request:");
22    println!("   Model: {}", request.model);
23    println!("   Message count: {}", request.messages.len());
24    println!(
25        "   Message[0]: {:?} - {}",
26        request.messages[0].role,
27        request.messages[0].content.as_text()
28    );
29    println!("   max_tokens: {:?}", request.max_tokens);
30
31    // Test OpenAI
32    println!("\n🤖 Testing OpenAI...");
33    match AiClient::new(Provider::OpenAI) {
34        Ok(client) => {
35            match client.chat_completion(request.clone()).await {
36                Ok(response) => {
37                    println!("✅ Success!");
38                    println!(
39                        "   Response: {}",
40                        response.choices[0].message.content.as_text()
41                    );
42                }
43                Err(e) => {
44                    println!("❌ Failed: {}", e);
45
46                    // If it's a 400 error, it indicates request format issues
47                    if e.to_string().contains("400") {
48                        println!("   This usually indicates incorrect request format");
49                        println!("   Let's check if the request contains necessary fields...");
50                    }
51                }
52            }
53        }
54        Err(e) => println!("❌ Client creation failed: {}", e),
55    }
56
57    Ok(())
58}
Source

pub fn with_options( provider: Provider, opts: ConnectionOptions, ) -> Result<Self, AiLibError>

Create client with minimal explicit options (base_url/proxy/timeout). Not all providers support overrides; unsupported providers ignore unspecified fields gracefully.

Examples found in repository?
examples/explicit_config.rs (line 16)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("Explicit configuration example");
8    let opts = ConnectionOptions {
9        base_url: None,                                      // fallback to provider default
10        proxy: Some("http://proxy.example.com:8080".into()), // or None to use AI_PROXY_URL
11        api_key: None,                                       // rely on environment for now
12        timeout: Some(std::time::Duration::from_secs(40)),
13        disable_proxy: false,
14    };
15
16    let client = AiClient::with_options(Provider::Groq, opts)?;
17
18    let req = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text("Ping from explicit config".into()),
23            function_call: None,
24        }],
25    );
26
27    // This may fail if GROQ_API_KEY not set; we only show structure.
28    match client.chat_completion(req).await {
29        Ok(resp) => println!("Response model: {}", resp.model),
30        Err(e) => println!(
31            "Request failed (expected in example without API key): {}",
32            e
33        ),
34    }
35    Ok(())
36}
Source

pub fn connection_options(&self) -> Option<&ConnectionOptions>

Source

pub fn builder(provider: Provider) -> AiClientBuilder

Create a new AI client builder

The builder pattern allows more flexible client configuration:

  • Automatic environment variable detection
  • Support for custom base_url and proxy
  • Support for custom timeout and connection pool configuration
§Arguments
  • provider - The AI model provider to use
§Returns
  • AiClientBuilder - Builder instance
§Example
use ai_lib::{AiClient, Provider};

// Simplest usage - automatic environment variable detection
let client = AiClient::builder(Provider::Groq).build()?;

// Custom base_url and proxy
let client = AiClient::builder(Provider::Groq)
    .with_base_url("https://custom.groq.com")
    .with_proxy(Some("http://proxy.example.com:8080"))
    .build()?;
Examples found in repository?
examples/builder_pattern.rs (line 56)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8    println!("🚀 AI Client Builder Pattern Example");
9    println!("===================================");
10
11    // Example 1: Simplest usage - automatic environment variable detection
12    println!("\n📋 Example 1: Simplest usage");
13    println!("   Automatically detect GROQ_BASE_URL and AI_PROXY_URL from environment variables");
14
15    let client = AiClientBuilder::new(Provider::Groq).build()?;
16    println!(
17        "✅ Client created successfully, provider: {:?}",
18        client.current_provider()
19    );
20
21    // Example 2: Custom base_url
22    println!("\n📋 Example 2: Custom base_url");
23    println!("   Use custom Groq server address");
24
25    let client = AiClientBuilder::new(Provider::Groq)
26        .with_base_url("https://custom.groq.com")
27        .build()?;
28    println!("✅ Client created successfully with custom base_url");
29
30    // Example 3: Custom base_url and proxy
31    println!("\n📋 Example 3: Custom base_url and proxy");
32    println!("   Use custom server and proxy");
33
34    let client = AiClientBuilder::new(Provider::Groq)
35        .with_base_url("https://custom.groq.com")
36        .with_proxy(Some("http://proxy.example.com:8080"))
37        .build()?;
38    println!("✅ Client created successfully with custom base_url and proxy");
39
40    // Example 4: Full custom configuration
41    println!("\n📋 Example 4: Full custom configuration");
42    println!("   Custom timeout, connection pool and other advanced configurations");
43
44    let client = AiClientBuilder::new(Provider::Groq)
45        .with_base_url("https://custom.groq.com")
46        .with_proxy(Some("http://proxy.example.com:8080"))
47        .with_timeout(Duration::from_secs(60))
48        .with_pool_config(32, Duration::from_secs(90))
49        .build()?;
50    println!("✅ Client created successfully with full custom configuration");
51
52    // Example 5: Use convenient builder method
53    println!("\n📋 Example 5: Use convenient builder method");
54    println!("   Create builder through AiClient::builder()");
55
56    let client = AiClient::builder(Provider::Groq)
57        .with_base_url("https://custom.groq.com")
58        .with_proxy(Some("http://proxy.example.com:8080"))
59        .build()?;
60    println!("✅ Client created successfully using convenient builder method");
61
62    // Example 6: Environment variable priority demonstration
63    println!("\n📋 Example 6: Environment variable priority demonstration");
64    println!("   Set environment variables, then use builder");
65
66    // Set environment variables
67    std::env::set_var("GROQ_BASE_URL", "https://env.groq.com");
68    std::env::set_var("AI_PROXY_URL", "http://env.proxy.com:8080");
69
70    // Don't set any custom configuration, should use environment variables
71    let client = AiClientBuilder::new(Provider::Groq).build()?;
72    println!("✅ Client created successfully using environment variable configuration");
73
74    // Explicit settings override environment variables
75    let client = AiClientBuilder::new(Provider::Groq)
76        .with_base_url("https://explicit.groq.com")
77        .with_proxy(Some("http://explicit.proxy.com:8080"))
78        .build()?;
79    println!(
80        "✅ Client created successfully, explicit configuration overrides environment variables"
81    );
82
83    // Example 7: Different provider configurations
84    println!("\n📋 Example 7: Different provider configurations");
85
86    // Groq
87    let groq_client = AiClientBuilder::new(Provider::Groq)
88        .with_base_url("https://custom.groq.com")
89        .build()?;
90    println!("✅ Groq client created successfully");
91
92    // DeepSeek
93    let deepseek_client = AiClientBuilder::new(Provider::DeepSeek)
94        .with_base_url("https://custom.deepseek.com")
95        .with_proxy(Some("http://proxy.example.com:8080"))
96        .build()?;
97    println!("✅ DeepSeek client created successfully");
98
99    // Ollama (local deployment)
100    let ollama_client = AiClientBuilder::new(Provider::Ollama)
101        .with_base_url("http://localhost:11434")
102        .build()?;
103    println!("✅ Ollama client created successfully");
104
105    // Example 8: Error handling
106    println!("\n📋 Example 8: Error handling");
107    println!("   Try to set custom configuration for unsupported provider");
108
109    match AiClientBuilder::new(Provider::OpenAI)
110        .with_base_url("https://custom.openai.com")
111        .build()
112    {
113        Ok(_) => println!("❌ This should not succeed"),
114        Err(e) => println!("✅ Correctly caught error: {}", e),
115    }
116
117    println!("\n🎉 All examples completed!");
118    println!("\n💡 Advantages of builder pattern:");
119    println!("   1. Automatic environment variable detection, reducing configuration code");
120    println!("   2. Support for progressive custom configuration");
121    println!("   3. Method chaining for cleaner code");
122    println!("   4. Backward compatible, existing code requires no changes");
123    println!("   5. Support for advanced configuration (timeout, connection pool, etc.)");
124
125    Ok(())
126}
Source

pub fn new_with_metrics( provider: Provider, metrics: Arc<dyn Metrics>, ) -> Result<Self, AiLibError>

Create AiClient with injected metrics implementation

Source

pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self

Set metrics implementation on client

Source

pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>

Send chat completion request

§Arguments
  • request - Chat completion request
§Returns
  • Result<ChatCompletionResponse, AiLibError> - Response on success, error on failure
Examples found in repository?
examples/explicit_config.rs (line 28)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("Explicit configuration example");
8    let opts = ConnectionOptions {
9        base_url: None,                                      // fallback to provider default
10        proxy: Some("http://proxy.example.com:8080".into()), // or None to use AI_PROXY_URL
11        api_key: None,                                       // rely on environment for now
12        timeout: Some(std::time::Duration::from_secs(40)),
13        disable_proxy: false,
14    };
15
16    let client = AiClient::with_options(Provider::Groq, opts)?;
17
18    let req = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text("Ping from explicit config".into()),
23            function_call: None,
24        }],
25    );
26
27    // This may fail if GROQ_API_KEY not set; we only show structure.
28    match client.chat_completion(req).await {
29        Ok(resp) => println!("Response model: {}", resp.model),
30        Err(e) => println!(
31            "Request failed (expected in example without API key): {}",
32            e
33        ),
34    }
35    Ok(())
36}
More examples
Hide additional examples
examples/basic_usage.rs (line 36)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
examples/debug_request.rs (line 35)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔍 Debug Request Format");
8    println!("======================");
9
10    // Create test request
11    let request = ChatCompletionRequest::new(
12        "gpt-3.5-turbo".to_string(),
13        vec![Message {
14            role: Role::User,
15            content: Content::Text("Hello!".to_string()),
16            function_call: None,
17        }],
18    )
19    .with_max_tokens(10);
20
21    println!("📤 Original Request:");
22    println!("   Model: {}", request.model);
23    println!("   Message count: {}", request.messages.len());
24    println!(
25        "   Message[0]: {:?} - {}",
26        request.messages[0].role,
27        request.messages[0].content.as_text()
28    );
29    println!("   max_tokens: {:?}", request.max_tokens);
30
31    // Test OpenAI
32    println!("\n🤖 Testing OpenAI...");
33    match AiClient::new(Provider::OpenAI) {
34        Ok(client) => {
35            match client.chat_completion(request.clone()).await {
36                Ok(response) => {
37                    println!("✅ Success!");
38                    println!(
39                        "   Response: {}",
40                        response.choices[0].message.content.as_text()
41                    );
42                }
43                Err(e) => {
44                    println!("❌ Failed: {}", e);
45
46                    // If it's a 400 error, it indicates request format issues
47                    if e.to_string().contains("400") {
48                        println!("   This usually indicates incorrect request format");
49                        println!("   Let's check if the request contains necessary fields...");
50                    }
51                }
52            }
53        }
54        Err(e) => println!("❌ Client creation failed: {}", e),
55    }
56
57    Ok(())
58}
examples/test_without_proxy.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 测试不使用代理的连接");
7    println!("======================");
8
9    // 临时移除代理设置
10    std::env::remove_var("AI_PROXY_URL");
11
12    println!("ℹ️  已临时移除AI_PROXY_URL设置");
13
14    // 测试DeepSeek(国内可直连)
15    println!("\n🔍 测试DeepSeek (直连):");
16    match AiClient::new(Provider::DeepSeek) {
17        Ok(client) => {
18            let request = ChatCompletionRequest::new(
19                "deepseek-chat".to_string(),
20                vec![Message {
21                    role: Role::User,
22                    content: Content::Text(
23                        "Hello! Please respond with just 'Hi' to test.".to_string(),
24                    ),
25                    function_call: None,
26                }],
27            )
28            .with_max_tokens(5);
29
30            match client.chat_completion(request).await {
31                Ok(response) => {
32                    println!("✅ DeepSeek 直连成功!");
33                    println!("   响应: {}", response.choices[0].message.content.as_text());
34                    println!("   Token使用: {}", response.usage.total_tokens);
35                }
36                Err(e) => {
37                    println!("❌ DeepSeek 请求失败: {}", e);
38                    if e.to_string().contains("402") {
39                        println!("   (这是余额不足错误,说明连接正常)");
40                    }
41                }
42            }
43        }
44        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
45    }
46
47    println!("\n💡 结论:");
48    println!("   • DeepSeek可以直连,不需要代理");
49    println!("   • OpenAI和Groq需要通过代理访问");
50    println!("   • 代理可能会修改请求内容,导致格式错误");
51    println!("   • 建议检查代理服务器的配置");
52
53    Ok(())
54}
examples/openai_test.rs (line 53)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🤖 OpenAI Provider Test");
8    println!("======================");
9
10    // Check API key
11    match std::env::var("OPENAI_API_KEY") {
12        Ok(_) => println!("✅ OPENAI_API_KEY detected"),
13        Err(_) => {
14            println!("❌ OPENAI_API_KEY environment variable not set");
15            println!("   Please set: export OPENAI_API_KEY=your_api_key");
16            return Ok(());
17        }
18    }
19
20    // Create OpenAI client
21    let client = AiClient::new(Provider::OpenAI)?;
22    println!("✅ OpenAI client created successfully");
23
24    // Get model list
25    println!("\n📋 Getting OpenAI model list...");
26    match client.list_models().await {
27        Ok(models) => {
28            println!("✅ Successfully got {} models", models.len());
29            println!("   Common models:");
30            for model in models.iter().filter(|m| m.contains("gpt")) {
31                println!("   • {}", model);
32            }
33        }
34        Err(e) => println!("❌ Failed to get model list: {}", e),
35    }
36
37    // Test chat completion
38    println!("\n💬 Testing chat completion...");
39    let request = ChatCompletionRequest::new(
40        "gpt-3.5-turbo".to_string(),
41        vec![Message {
42            role: Role::User,
43            content: Content::Text(
44                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
45                    .to_string(),
46            ),
47            function_call: None,
48        }],
49    )
50    .with_max_tokens(20)
51    .with_temperature(0.7);
52
53    match client.chat_completion(request).await {
54        Ok(response) => {
55            println!("✅ Chat completion successful!");
56            println!("   Model: {}", response.model);
57            println!(
58                "   Response: {}",
59                response.choices[0].message.content.as_text()
60            );
61            println!(
62                "   Token usage: {} (prompt: {}, completion: {})",
63                response.usage.total_tokens,
64                response.usage.prompt_tokens,
65                response.usage.completion_tokens
66            );
67        }
68        Err(e) => println!("❌ Chat completion failed: {}", e),
69    }
70
71    println!("\n🎯 OpenAI config-driven test completed!");
72    println!("   This demonstrates the power of config-driven architecture:");
73    println!("   • No need to write OpenAI-specific code");
74    println!("   • Just add configuration in ProviderConfigs");
75    println!("   • Automatically supports all OpenAI-compatible features");
76
77    Ok(())
78}
examples/test_all_providers.rs (line 54)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🧪 Test All AI Providers");
8    println!("=======================");
9
10    // Check proxy configuration
11    if let Ok(proxy_url) = std::env::var("AI_PROXY_URL") {
12        println!("🌐 Using proxy: {}", proxy_url);
13    }
14
15    let providers = vec![
16        (Provider::Groq, "Groq", "llama3-8b-8192"),
17        (Provider::OpenAI, "OpenAI", "gpt-3.5-turbo"),
18        (Provider::DeepSeek, "DeepSeek", "deepseek-chat"),
19    ];
20
21    for (provider, name, model) in providers {
22        println!("\n🔍 Testing Provider: {}", name);
23        println!("{}", "─".repeat(30));
24
25        match AiClient::new(provider) {
26            Ok(client) => {
27                println!("✅ Client created successfully");
28
29                // Test model list
30                match client.list_models().await {
31                    Ok(models) => {
32                        println!("📋 Available models count: {}", models.len());
33                        if !models.is_empty() {
34                            println!("   First 3 models: {:?}", &models[..models.len().min(3)]);
35                        }
36                    }
37                    Err(e) => println!("⚠️  Failed to get model list: {}", e),
38                }
39
40                // Test chat completion
41                let request = ChatCompletionRequest::new(
42                    model.to_string(),
43                    vec![Message {
44                        role: Role::User,
45                        content: Content::Text(
46                            "Hello! Please respond with just 'Hi' to test the API.".to_string(),
47                        ),
48                        function_call: None,
49                    }],
50                )
51                .with_max_tokens(10);
52
53                println!("📤 Sending test request to model: {}", model);
54                match client.chat_completion(request).await {
55                    Ok(response) => {
56                        println!("✅ Request successful!");
57                        println!("   Response ID: {}", response.id);
58                        println!(
59                            "   Content: {}",
60                            response.choices[0].message.content.as_text()
61                        );
62                        println!("   Tokens used: {}", response.usage.total_tokens);
63                    }
64                    Err(e) => println!("❌ Request failed: {}", e),
65                }
66            }
67            Err(e) => {
68                println!("❌ Client creation failed: {}", e);
69            }
70        }
71    }
72
73    println!("\n💡 Tips:");
74    println!("   • Make sure to set corresponding API key environment variables");
75    println!("   • GROQ_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY");
76    println!("   • Optionally set AI_PROXY_URL to use proxy server");
77
78    Ok(())
79}
Source

pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>

Streaming chat completion request

§Arguments
  • request - Chat completion request
§Returns
  • Result<impl Stream<Item = Result<ChatCompletionChunk, AiLibError>>, AiLibError> - Stream response on success
Examples found in repository?
examples/cohere_stream.rs (line 28)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
More examples
Hide additional examples
examples/test_streaming.rs (line 39)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 流式响应测试");
8    println!("================");
9
10    // 检查Groq API密钥
11    if std::env::var("GROQ_API_KEY").is_err() {
12        println!("❌ 未设置GROQ_API_KEY");
13        return Ok(());
14    }
15
16    // 创建Groq客户端
17    let client = AiClient::new(Provider::Groq)?;
18    println!("✅ Groq客户端创建成功");
19
20    // 创建流式请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text(
26                "Please write a short poem about AI in exactly 4 lines.".to_string(),
27            ),
28            function_call: None,
29        }],
30    )
31    .with_max_tokens(100)
32    .with_temperature(0.7);
33
34    println!("\n📤 发送流式请求...");
35    println!("   模型: {}", request.model);
36    println!("   消息: {}", request.messages[0].content.as_text());
37
38    // 获取流式响应
39    match client.chat_completion_stream(request).await {
40        Ok(mut stream) => {
41            println!("\n🌊 开始接收流式响应:");
42            println!("{}", "─".repeat(50));
43
44            let mut full_content = String::new();
45            let mut chunk_count = 0;
46
47            while let Some(result) = stream.next().await {
48                match result {
49                    Ok(chunk) => {
50                        chunk_count += 1;
51
52                        if let Some(choice) = chunk.choices.first() {
53                            if let Some(content) = &choice.delta.content {
54                                print!("{}", content);
55                                full_content.push_str(content);
56
57                                // 刷新输出
58                                use std::io::{self, Write};
59                                io::stdout().flush().unwrap();
60                            }
61
62                            // 检查是否完成
63                            if choice.finish_reason.is_some() {
64                                println!("\n{}", "─".repeat(50));
65                                println!("✅ 流式响应完成!");
66                                println!("   完成原因: {:?}", choice.finish_reason);
67                                break;
68                            }
69                        }
70                    }
71                    Err(e) => {
72                        println!("\n❌ 流式响应错误: {}", e);
73                        break;
74                    }
75                }
76            }
77
78            println!("\n📊 流式响应统计:");
79            println!("   数据块数量: {}", chunk_count);
80            println!("   总内容长度: {} 字符", full_content.len());
81            println!("   完整内容: \"{}\"", full_content.trim());
82        }
83        Err(e) => {
84            println!("❌ 流式请求失败: {}", e);
85        }
86    }
87
88    println!("\n💡 流式响应的优势:");
89    println!("   • 实时显示生成内容");
90    println!("   • 更好的用户体验");
91    println!("   • 可以提前停止生成");
92    println!("   • 适合长文本生成");
93
94    Ok(())
95}
examples/test_groq_generic.rs (line 48)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔄 测试配置驱动的Groq");
8    println!("====================");
9
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14
15    let client = AiClient::new(Provider::Groq)?;
16    println!("✅ Groq客户端创建成功 (使用GenericAdapter)");
17
18    // 测试普通聊天
19    let request = ChatCompletionRequest::new(
20        "llama3-8b-8192".to_string(),
21        vec![Message {
22            role: Role::User,
23            content: Content::Text(
24                "Say 'Hello from Generic Groq!' in exactly those words.".to_string(),
25            ),
26            function_call: None,
27        }],
28    )
29    .with_max_tokens(20);
30
31    println!("\n💬 测试普通聊天...");
32    match client.chat_completion(request.clone()).await {
33        Ok(response) => {
34            println!("✅ 普通聊天成功!");
35            println!(
36                "   响应: '{}'",
37                response.choices[0].message.content.as_text()
38            );
39            println!("   Token使用: {}", response.usage.total_tokens);
40        }
41        Err(e) => {
42            println!("❌ 普通聊天失败: {}", e);
43        }
44    }
45
46    // 测试流式聊天
47    println!("\n🌊 测试流式聊天...");
48    match client.chat_completion_stream(request).await {
49        Ok(mut stream) => {
50            print!("   流式响应: ");
51            let mut content = String::new();
52
53            while let Some(result) = stream.next().await {
54                match result {
55                    Ok(chunk) => {
56                        if let Some(choice) = chunk.choices.first() {
57                            if let Some(text) = &choice.delta.content {
58                                print!("{}", text);
59                                content.push_str(text);
60                                use std::io::{self, Write};
61                                io::stdout().flush().unwrap();
62                            }
63                            if choice.finish_reason.is_some() {
64                                println!();
65                                break;
66                            }
67                        }
68                    }
69                    Err(e) => {
70                        println!("\n❌ 流式错误: {}", e);
71                        break;
72                    }
73                }
74            }
75
76            if !content.is_empty() {
77                println!("✅ 流式聊天成功!");
78                println!("   完整内容: '{}'", content.trim());
79            }
80        }
81        Err(e) => {
82            println!("❌ 流式聊天失败: {}", e);
83        }
84    }
85
86    // 测试模型列表
87    println!("\n📋 测试模型列表...");
88    match client.list_models().await {
89        Ok(models) => {
90            println!("✅ 模型列表获取成功!");
91            println!("   可用模型: {:?}", models);
92        }
93        Err(e) => {
94            println!("❌ 模型列表获取失败: {}", e);
95        }
96    }
97
98    println!("\n🎯 配置驱动Groq测试结果:");
99    println!("   • 使用GenericAdapter而不是GroqAdapter");
100    println!("   • 代码量从250行减少到10行配置");
101    println!("   • 功能完全相同:普通聊天、流式聊天、模型列表");
102    println!("   • 证明了OpenAI兼容性和通用适配器的有效性");
103
104    Ok(())
105}
examples/test_streaming_clean.rs (line 34)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 清洁版流式响应测试");
8    println!("======================");
9
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14
15    let client = AiClient::new(Provider::Groq)?;
16    println!("✅ Groq客户端创建成功");
17
18    let request = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text("Write a haiku about programming.".to_string()),
23            function_call: None,
24        }],
25    )
26    .with_max_tokens(50)
27    .with_temperature(0.8);
28
29    println!(
30        "\n📤 发送流式请求: {}",
31        request.messages[0].content.as_text()
32    );
33
34    match client.chat_completion_stream(request).await {
35        Ok(mut stream) => {
36            println!("\n🎭 AI回复:");
37            print!("   ");
38
39            let mut content_parts: Vec<String> = Vec::new();
40
41            while let Some(result) = stream.next().await {
42                match result {
43                    Ok(chunk) => {
44                        if let Some(choice) = chunk.choices.first() {
45                            if let Some(content) = &choice.delta.content {
46                                // try to parse JSON-shaped content first
47                                if content.contains("\"content\":") {
48                                    if let Ok(json) =
49                                        serde_json::from_str::<serde_json::Value>(content)
50                                    {
51                                        if let Some(text) = json["content"].as_str() {
52                                            if !text.is_empty() {
53                                                print!("{}", text);
54                                                content_parts.push(text.to_string());
55                                                use std::io::{self, Write};
56                                                io::stdout().flush().unwrap();
57                                            }
58                                        }
59                                    }
60                                } else if !content.trim().is_empty() && !content.contains("data:") {
61                                    // direct plain text
62                                    print!("{}", content);
63                                    content_parts.push(content.clone());
64                                    use std::io::{self, Write};
65                                    io::stdout().flush().unwrap();
66                                }
67                            }
68
69                            if choice.finish_reason.is_some() {
70                                println!("\n");
71                                break;
72                            }
73                        }
74                    }
75                    Err(e) => {
76                        println!("\n❌ 流式响应错误: {}", e);
77                        break;
78                    }
79                }
80            }
81
82            let full_content = content_parts.join("");
83            if !full_content.is_empty() {
84                println!("✅ 流式响应完成!");
85                println!("📝 完整内容: \"{}\"", full_content.trim());
86            } else {
87                println!("⚠️  未提取到有效内容,可能需要改进SSE解析");
88            }
89        }
90        Err(e) => {
91            println!("❌ 流式请求失败: {}", e);
92        }
93    }
94
95    Ok(())
96}
examples/test_streaming_improved.rs (line 34)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 改进的流式响应测试");
8    println!("======================");
9
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14
15    let client = AiClient::new(Provider::Groq)?;
16    println!("✅ Groq客户端创建成功");
17
18    let request = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text(
23                "Write a creative story about a robot learning to paint. Keep it under 100 words."
24                    .to_string(),
25            ),
26            function_call: None,
27        }],
28    )
29    .with_max_tokens(150)
30    .with_temperature(0.8);
31
32    println!("\n📤 发送流式请求...");
33
34    match client.chat_completion_stream(request).await {
35        Ok(mut stream) => {
36            println!("🎨 AI创作中:");
37            print!("   ");
38
39            let mut content = String::new();
40            let mut chunk_count = 0;
41
42            while let Some(result) = stream.next().await {
43                match result {
44                    Ok(chunk) => {
45                        chunk_count += 1;
46
47                        if let Some(choice) = chunk.choices.first() {
48                            if let Some(text) = &choice.delta.content {
49                                if !text.is_empty() {
50                                    print!("{}", text);
51                                    content.push_str(text);
52
53                                    use std::io::{self, Write};
54                                    io::stdout().flush().unwrap();
55                                }
56                            }
57
58                            if choice.finish_reason.is_some() {
59                                println!("\n");
60                                println!("✅ 创作完成! (原因: {:?})", choice.finish_reason);
61                                break;
62                            }
63                        }
64                    }
65                    Err(e) => {
66                        println!("\n❌ 流式错误: {}", e);
67                        break;
68                    }
69                }
70            }
71
72            println!("\n📊 统计信息:");
73            println!("   数据块: {}", chunk_count);
74            println!("   字符数: {}", content.len());
75            println!("   单词数: {}", content.split_whitespace().count());
76        }
77        Err(e) => {
78            println!("❌ 流式请求失败: {}", e);
79        }
80    }
81
82    // 测试DeepSeek流式响应
83    if std::env::var("DEEPSEEK_API_KEY").is_ok() {
84        println!("\n{}", "=".repeat(50));
85        println!("🧠 测试DeepSeek流式响应");
86
87        let deepseek_client = AiClient::new(Provider::DeepSeek)?;
88        let request = ChatCompletionRequest::new(
89            "deepseek-chat".to_string(),
90            vec![Message {
91                role: Role::User,
92                content: Content::Text("Explain quantum computing in one sentence.".to_string()),
93                function_call: None,
94            }],
95        )
96        .with_max_tokens(50);
97
98        match deepseek_client.chat_completion_stream(request).await {
99            Ok(mut stream) => {
100                println!("🔬 DeepSeek回复:");
101                print!("   ");
102
103                while let Some(result) = stream.next().await {
104                    match result {
105                        Ok(chunk) => {
106                            if let Some(choice) = chunk.choices.first() {
107                                if let Some(text) = &choice.delta.content {
108                                    print!("{}", text);
109                                    use std::io::{self, Write};
110                                    io::stdout().flush().unwrap();
111                                }
112                                if choice.finish_reason.is_some() {
113                                    println!("\n✅ DeepSeek流式响应成功!");
114                                    break;
115                                }
116                            }
117                        }
118                        Err(e) => {
119                            println!("\n❌ DeepSeek流式错误: {}", e);
120                            break;
121                        }
122                    }
123                }
124            }
125            Err(e) => {
126                println!("❌ DeepSeek流式请求失败: {}", e);
127            }
128        }
129    }
130
131    Ok(())
132}
Source

pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>

Streaming chat completion request with cancel control

§Arguments
  • request - Chat completion request
§Returns
  • Result<(impl Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin, CancelHandle), AiLibError> - Returns streaming response and cancel handle on success
Source

pub async fn chat_completion_batch( &self, requests: Vec<ChatCompletionRequest>, concurrency_limit: Option<usize>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>

Batch chat completion requests

§Arguments
  • requests - List of chat completion requests
  • concurrency_limit - Maximum concurrent request count (None means unlimited)
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = AiClient::new(Provider::Groq)?;
     
    let requests = vec![
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("Hello".to_string()),
                function_call: None,
            }],
        ),
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("How are you?".to_string()),
                function_call: None,
            }],
        ),
    ];
     
    // Limit concurrency to 5
    let responses = client.chat_completion_batch(requests, Some(5)).await?;
     
    for (i, response) in responses.iter().enumerate() {
        match response {
            Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
            Err(e) => println!("Request {} failed: {}", i, e),
        }
    }
     
    Ok(())
}
Examples found in repository?
examples/batch_processing.rs (line 73)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Batch Processing Example");
8    println!("==================================");
9
10    // Create client
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Prepare multiple requests
18    let requests = vec![
19        ChatCompletionRequest::new(
20            "llama3-8b-8192".to_string(),
21            vec![Message {
22                role: Role::User,
23                content: Content::Text("What is the capital of France?".to_string()),
24                function_call: None,
25            }],
26        )
27        .with_temperature(0.7)
28        .with_max_tokens(50),
29        ChatCompletionRequest::new(
30            "llama3-8b-8192".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: Content::Text("What is 2 + 2?".to_string()),
34                function_call: None,
35            }],
36        )
37        .with_temperature(0.1)
38        .with_max_tokens(20),
39        ChatCompletionRequest::new(
40            "llama3-8b-8192".to_string(),
41            vec![Message {
42                role: Role::User,
43                content: Content::Text("Tell me a short joke.".to_string()),
44                function_call: None,
45            }],
46        )
47        .with_temperature(0.9)
48        .with_max_tokens(100),
49        ChatCompletionRequest::new(
50            "llama3-8b-8192".to_string(),
51            vec![Message {
52                role: Role::User,
53                content: Content::Text(
54                    "What is the largest planet in our solar system?".to_string(),
55                ),
56                function_call: None,
57            }],
58        )
59        .with_temperature(0.5)
60        .with_max_tokens(60),
61    ];
62
63    println!(
64        "📤 Prepared {} requests for batch processing",
65        requests.len()
66    );
67
68    // Method 1: Batch processing with concurrency limit
69    println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
70    let start_time = std::time::Instant::now();
71
72    let responses = client
73        .chat_completion_batch(requests.clone(), Some(2))
74        .await?;
75
76    let duration = start_time.elapsed();
77    println!("⏱️  Batch processing completed in {:?}", duration);
78
79    // Process responses
80    for (i, response) in responses.iter().enumerate() {
81        match response {
82            Ok(resp) => {
83                println!(
84                    "✅ Request {}: {}",
85                    i + 1,
86                    resp.choices[0].message.content.as_text()
87                );
88            }
89            Err(e) => {
90                println!("❌ Request {} failed: {}", i + 1, e);
91            }
92        }
93    }
94
95    // Method 2: Smart batch processing (auto-select strategy)
96    println!("\n🧠 Method 2: Smart batch processing");
97    let start_time = std::time::Instant::now();
98
99    let responses = client.chat_completion_batch_smart(requests.clone()).await?;
100
101    let duration = start_time.elapsed();
102    println!("⏱️  Smart batch processing completed in {:?}", duration);
103
104    // Count successes and failures
105    let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
106    let failed: Vec<_> = responses
107        .iter()
108        .enumerate()
109        .filter_map(|(i, r)| r.as_ref().err().map(|e| (i, e)))
110        .collect();
111
112    println!("📊 Results:");
113    println!("   ✅ Successful: {}/{}", successful.len(), responses.len());
114    println!("   ❌ Failed: {}/{}", failed.len(), responses.len());
115    println!(
116        "   📈 Success rate: {:.1}%",
117        (successful.len() as f64 / responses.len() as f64) * 100.0
118    );
119
120    // Method 3: Unlimited concurrent batch processing
121    println!("\n🚀 Method 3: Unlimited concurrent batch processing");
122    let start_time = std::time::Instant::now();
123
124    let responses = client.chat_completion_batch(requests, None).await?;
125
126    let duration = start_time.elapsed();
127    println!(
128        "⏱️  Unlimited concurrent processing completed in {:?}",
129        duration
130    );
131
132    // Display all responses
133    for (i, response) in responses.iter().enumerate() {
134        match response {
135            Ok(resp) => {
136                println!(
137                    "✅ Request {}: {}",
138                    i + 1,
139                    resp.choices[0].message.content.as_text()
140                );
141            }
142            Err(e) => {
143                println!("❌ Request {} failed: {}", i + 1, e);
144            }
145        }
146    }
147
148    println!("\n🎉 Batch processing example completed successfully!");
149    Ok(())
150}
Source

pub async fn chat_completion_batch_smart( &self, requests: Vec<ChatCompletionRequest>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>

Smart batch processing: automatically choose processing strategy based on request count

§Arguments
  • requests - List of chat completion requests
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - Returns response results for all requests
Examples found in repository?
examples/batch_processing.rs (line 99)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Batch Processing Example");
8    println!("==================================");
9
10    // Create client
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Prepare multiple requests
18    let requests = vec![
19        ChatCompletionRequest::new(
20            "llama3-8b-8192".to_string(),
21            vec![Message {
22                role: Role::User,
23                content: Content::Text("What is the capital of France?".to_string()),
24                function_call: None,
25            }],
26        )
27        .with_temperature(0.7)
28        .with_max_tokens(50),
29        ChatCompletionRequest::new(
30            "llama3-8b-8192".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: Content::Text("What is 2 + 2?".to_string()),
34                function_call: None,
35            }],
36        )
37        .with_temperature(0.1)
38        .with_max_tokens(20),
39        ChatCompletionRequest::new(
40            "llama3-8b-8192".to_string(),
41            vec![Message {
42                role: Role::User,
43                content: Content::Text("Tell me a short joke.".to_string()),
44                function_call: None,
45            }],
46        )
47        .with_temperature(0.9)
48        .with_max_tokens(100),
49        ChatCompletionRequest::new(
50            "llama3-8b-8192".to_string(),
51            vec![Message {
52                role: Role::User,
53                content: Content::Text(
54                    "What is the largest planet in our solar system?".to_string(),
55                ),
56                function_call: None,
57            }],
58        )
59        .with_temperature(0.5)
60        .with_max_tokens(60),
61    ];
62
63    println!(
64        "📤 Prepared {} requests for batch processing",
65        requests.len()
66    );
67
68    // Method 1: Batch processing with concurrency limit
69    println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
70    let start_time = std::time::Instant::now();
71
72    let responses = client
73        .chat_completion_batch(requests.clone(), Some(2))
74        .await?;
75
76    let duration = start_time.elapsed();
77    println!("⏱️  Batch processing completed in {:?}", duration);
78
79    // Process responses
80    for (i, response) in responses.iter().enumerate() {
81        match response {
82            Ok(resp) => {
83                println!(
84                    "✅ Request {}: {}",
85                    i + 1,
86                    resp.choices[0].message.content.as_text()
87                );
88            }
89            Err(e) => {
90                println!("❌ Request {} failed: {}", i + 1, e);
91            }
92        }
93    }
94
95    // Method 2: Smart batch processing (auto-select strategy)
96    println!("\n🧠 Method 2: Smart batch processing");
97    let start_time = std::time::Instant::now();
98
99    let responses = client.chat_completion_batch_smart(requests.clone()).await?;
100
101    let duration = start_time.elapsed();
102    println!("⏱️  Smart batch processing completed in {:?}", duration);
103
104    // Count successes and failures
105    let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
106    let failed: Vec<_> = responses
107        .iter()
108        .enumerate()
109        .filter_map(|(i, r)| r.as_ref().err().map(|e| (i, e)))
110        .collect();
111
112    println!("📊 Results:");
113    println!("   ✅ Successful: {}/{}", successful.len(), responses.len());
114    println!("   ❌ Failed: {}/{}", failed.len(), responses.len());
115    println!(
116        "   📈 Success rate: {:.1}%",
117        (successful.len() as f64 / responses.len() as f64) * 100.0
118    );
119
120    // Method 3: Unlimited concurrent batch processing
121    println!("\n🚀 Method 3: Unlimited concurrent batch processing");
122    let start_time = std::time::Instant::now();
123
124    let responses = client.chat_completion_batch(requests, None).await?;
125
126    let duration = start_time.elapsed();
127    println!(
128        "⏱️  Unlimited concurrent processing completed in {:?}",
129        duration
130    );
131
132    // Display all responses
133    for (i, response) in responses.iter().enumerate() {
134        match response {
135            Ok(resp) => {
136                println!(
137                    "✅ Request {}: {}",
138                    i + 1,
139                    resp.choices[0].message.content.as_text()
140                );
141            }
142            Err(e) => {
143                println!("❌ Request {} failed: {}", i + 1, e);
144            }
145        }
146    }
147
148    println!("\n🎉 Batch processing example completed successfully!");
149    Ok(())
150}
Source

pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>

Batch chat completion requests

§Arguments
  • requests - List of chat completion requests
  • concurrency_limit - Maximum concurrent request count (None means unlimited)
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = AiClient::new(Provider::Groq)?;
     
    let requests = vec![
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("Hello".to_string()),
                function_call: None,
            }],
        ),
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("How are you?".to_string()),
                function_call: None,
            }],
        ),
    ];
     
    // Limit concurrency to 5
    let responses = client.chat_completion_batch(requests, Some(5)).await?;
     
    for (i, response) in responses.iter().enumerate() {
        match response {
            Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
            Err(e) => println!("Request {} failed: {}", i, e),
        }
    }
     
    Ok(())
}

Get list of supported models

§Returns
  • Result<Vec<String>, AiLibError> - Returns model list on success, error on failure
Examples found in repository?
examples/list_models_smoke.rs (line 25)
4async fn main() {
5    let providers = vec![
6        Provider::Groq,
7        Provider::XaiGrok,
8        Provider::Ollama,
9        Provider::DeepSeek,
10        Provider::Anthropic,
11        Provider::AzureOpenAI,
12        Provider::HuggingFace,
13        Provider::TogetherAI,
14        Provider::Qwen,
15        Provider::OpenAI,
16        Provider::Gemini,
17        Provider::Mistral,
18        Provider::Cohere,
19        // Provider::Bedrock, // Removed: Bedrock deferred implementation/not in public API
20    ];
21
22    for p in providers {
23        println!("--- Provider: {:?} ---", p);
24        match AiClient::new(p) {
25            Ok(client) => match client.list_models().await {
26                Ok(models) => {
27                    println!("Found {} models (showing up to 5):", models.len());
28                    for m in models.into_iter().take(5) {
29                        println!(" - {}", m);
30                    }
31                }
32                Err(e) => println!("list_models error: {:?}", e),
33            },
34            Err(e) => println!("client init error: {:?}", e),
35        }
36    }
37}
More examples
Hide additional examples
examples/cohere_stream.rs (line 22)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
examples/basic_usage.rs (line 18)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
examples/config_driven_example.rs (line 28)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 Config-driven AI-lib Example");
8    println!("================================");
9
10    // Demonstrate the advantages of config-driven approach: easy provider switching
11    let providers = vec![
12        (Provider::Groq, "Groq"),
13        (Provider::OpenAI, "OpenAI"),
14        (Provider::DeepSeek, "DeepSeek"),
15    ];
16
17    for (provider, name) in providers {
18        println!("\n📡 Testing Provider: {}", name);
19
20        // Create client - just change the enum value
21        let client = AiClient::new(provider)?;
22        println!(
23            "✅ Client created successfully: {:?}",
24            client.current_provider()
25        );
26
27        // Get model list
28        match client.list_models().await {
29            Ok(models) => println!("📋 Available models: {:?}", models),
30            Err(e) => println!("⚠️  Failed to get model list: {}", e),
31        }
32
33        // Create test request
34        let request = ChatCompletionRequest::new(
35            "test-model".to_string(),
36            vec![Message {
37                role: Role::User,
38                content: Content::Text("Hello from ai-lib!".to_string()),
39                function_call: None,
40            }],
41        );
42
43        println!("📤 Request prepared, model: {}", request.model);
44        println!("   (Need to set corresponding API_KEY environment variable for actual calls)");
45    }
46
47    println!("\n🎯 Core advantages of config-driven approach:");
48    println!("   • Zero-code switching: just change Provider enum value");
49    println!("   • Unified interface: all providers use the same API");
50    println!("   • Rapid expansion: add new compatible providers with just configuration");
51
52    Ok(())
53}
examples/proxy_example.rs (line 44)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌐 AI-lib Proxy Server Support Example");
8    println!("=====================================");
9
10    // Check proxy configuration
11    match std::env::var("AI_PROXY_URL") {
12        Ok(proxy_url) => {
13            println!("✅ Proxy configuration detected: {}", proxy_url);
14            println!("   All HTTP requests will go through this proxy server");
15        }
16        Err(_) => {
17            println!("ℹ️  AI_PROXY_URL environment variable not set");
18            println!("   To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19        }
20    }
21
22    println!("\n🚀 Creating AI client...");
23    let client = AiClient::new(Provider::Groq)?;
24    println!(
25        "✅ Client created successfully, provider: {:?}",
26        client.current_provider()
27    );
28
29    // Create test request
30    let request = ChatCompletionRequest::new(
31        "llama3-8b-8192".to_string(),
32        vec![Message {
33            role: Role::User,
34            content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35            function_call: None,
36        }],
37    );
38
39    println!("\n📤 Preparing to send request...");
40    println!("   Model: {}", request.model);
41    println!("   Message: {}", request.messages[0].content.as_text());
42
43    // Get model list (this request will also go through proxy)
44    match client.list_models().await {
45        Ok(models) => {
46            println!("\n📋 Model list obtained through proxy:");
47            for model in models {
48                println!("   • {}", model);
49            }
50        }
51        Err(e) => {
52            println!("\n⚠️  Failed to get model list: {}", e);
53            println!("   This may be due to:");
54            println!("   • GROQ_API_KEY environment variable not set");
55            println!("   • Proxy server configuration error");
56            println!("   • Network connection issue");
57        }
58    }
59
60    println!("\n💡 Proxy Configuration Instructions:");
61    println!("   • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62    println!("   • Supports HTTP and HTTPS proxies");
63    println!("   • Supports authenticated proxies: http://user:pass@proxy:port");
64    println!("   • All AI providers will automatically use this proxy configuration");
65
66    Ok(())
67}
examples/openai_test.rs (line 26)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🤖 OpenAI Provider Test");
8    println!("======================");
9
10    // Check API key
11    match std::env::var("OPENAI_API_KEY") {
12        Ok(_) => println!("✅ OPENAI_API_KEY detected"),
13        Err(_) => {
14            println!("❌ OPENAI_API_KEY environment variable not set");
15            println!("   Please set: export OPENAI_API_KEY=your_api_key");
16            return Ok(());
17        }
18    }
19
20    // Create OpenAI client
21    let client = AiClient::new(Provider::OpenAI)?;
22    println!("✅ OpenAI client created successfully");
23
24    // Get model list
25    println!("\n📋 Getting OpenAI model list...");
26    match client.list_models().await {
27        Ok(models) => {
28            println!("✅ Successfully got {} models", models.len());
29            println!("   Common models:");
30            for model in models.iter().filter(|m| m.contains("gpt")) {
31                println!("   • {}", model);
32            }
33        }
34        Err(e) => println!("❌ Failed to get model list: {}", e),
35    }
36
37    // Test chat completion
38    println!("\n💬 Testing chat completion...");
39    let request = ChatCompletionRequest::new(
40        "gpt-3.5-turbo".to_string(),
41        vec![Message {
42            role: Role::User,
43            content: Content::Text(
44                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
45                    .to_string(),
46            ),
47            function_call: None,
48        }],
49    )
50    .with_max_tokens(20)
51    .with_temperature(0.7);
52
53    match client.chat_completion(request).await {
54        Ok(response) => {
55            println!("✅ Chat completion successful!");
56            println!("   Model: {}", response.model);
57            println!(
58                "   Response: {}",
59                response.choices[0].message.content.as_text()
60            );
61            println!(
62                "   Token usage: {} (prompt: {}, completion: {})",
63                response.usage.total_tokens,
64                response.usage.prompt_tokens,
65                response.usage.completion_tokens
66            );
67        }
68        Err(e) => println!("❌ Chat completion failed: {}", e),
69    }
70
71    println!("\n🎯 OpenAI config-driven test completed!");
72    println!("   This demonstrates the power of config-driven architecture:");
73    println!("   • No need to write OpenAI-specific code");
74    println!("   • Just add configuration in ProviderConfigs");
75    println!("   • Automatically supports all OpenAI-compatible features");
76
77    Ok(())
78}
Source

pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>

Switch AI model provider

§Arguments
  • provider - New provider
§Returns
  • Result<(), AiLibError> - Returns () on success, error on failure
§Example
use ai_lib::{AiClient, Provider};

let mut client = AiClient::new(Provider::Groq)?;
// Switch from Groq to Groq (demonstrating switch functionality)
client.switch_provider(Provider::Groq)?;
Source

pub fn current_provider(&self) -> Provider

Get current provider

Examples found in repository?
examples/basic_usage.rs (line 14)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
More examples
Hide additional examples
examples/config_driven_example.rs (line 24)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 Config-driven AI-lib Example");
8    println!("================================");
9
10    // Demonstrate the advantages of config-driven approach: easy provider switching
11    let providers = vec![
12        (Provider::Groq, "Groq"),
13        (Provider::OpenAI, "OpenAI"),
14        (Provider::DeepSeek, "DeepSeek"),
15    ];
16
17    for (provider, name) in providers {
18        println!("\n📡 Testing Provider: {}", name);
19
20        // Create client - just change the enum value
21        let client = AiClient::new(provider)?;
22        println!(
23            "✅ Client created successfully: {:?}",
24            client.current_provider()
25        );
26
27        // Get model list
28        match client.list_models().await {
29            Ok(models) => println!("📋 Available models: {:?}", models),
30            Err(e) => println!("⚠️  Failed to get model list: {}", e),
31        }
32
33        // Create test request
34        let request = ChatCompletionRequest::new(
35            "test-model".to_string(),
36            vec![Message {
37                role: Role::User,
38                content: Content::Text("Hello from ai-lib!".to_string()),
39                function_call: None,
40            }],
41        );
42
43        println!("📤 Request prepared, model: {}", request.model);
44        println!("   (Need to set corresponding API_KEY environment variable for actual calls)");
45    }
46
47    println!("\n🎯 Core advantages of config-driven approach:");
48    println!("   • Zero-code switching: just change Provider enum value");
49    println!("   • Unified interface: all providers use the same API");
50    println!("   • Rapid expansion: add new compatible providers with just configuration");
51
52    Ok(())
53}
examples/proxy_example.rs (line 26)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌐 AI-lib Proxy Server Support Example");
8    println!("=====================================");
9
10    // Check proxy configuration
11    match std::env::var("AI_PROXY_URL") {
12        Ok(proxy_url) => {
13            println!("✅ Proxy configuration detected: {}", proxy_url);
14            println!("   All HTTP requests will go through this proxy server");
15        }
16        Err(_) => {
17            println!("ℹ️  AI_PROXY_URL environment variable not set");
18            println!("   To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19        }
20    }
21
22    println!("\n🚀 Creating AI client...");
23    let client = AiClient::new(Provider::Groq)?;
24    println!(
25        "✅ Client created successfully, provider: {:?}",
26        client.current_provider()
27    );
28
29    // Create test request
30    let request = ChatCompletionRequest::new(
31        "llama3-8b-8192".to_string(),
32        vec![Message {
33            role: Role::User,
34            content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35            function_call: None,
36        }],
37    );
38
39    println!("\n📤 Preparing to send request...");
40    println!("   Model: {}", request.model);
41    println!("   Message: {}", request.messages[0].content.as_text());
42
43    // Get model list (this request will also go through proxy)
44    match client.list_models().await {
45        Ok(models) => {
46            println!("\n📋 Model list obtained through proxy:");
47            for model in models {
48                println!("   • {}", model);
49            }
50        }
51        Err(e) => {
52            println!("\n⚠️  Failed to get model list: {}", e);
53            println!("   This may be due to:");
54            println!("   • GROQ_API_KEY environment variable not set");
55            println!("   • Proxy server configuration error");
56            println!("   • Network connection issue");
57        }
58    }
59
60    println!("\n💡 Proxy Configuration Instructions:");
61    println!("   • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62    println!("   • Supports HTTP and HTTPS proxies");
63    println!("   • Supports authenticated proxies: http://user:pass@proxy:port");
64    println!("   • All AI providers will automatically use this proxy configuration");
65
66    Ok(())
67}
examples/batch_processing.rs (line 14)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Batch Processing Example");
8    println!("==================================");
9
10    // Create client
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Prepare multiple requests
18    let requests = vec![
19        ChatCompletionRequest::new(
20            "llama3-8b-8192".to_string(),
21            vec![Message {
22                role: Role::User,
23                content: Content::Text("What is the capital of France?".to_string()),
24                function_call: None,
25            }],
26        )
27        .with_temperature(0.7)
28        .with_max_tokens(50),
29        ChatCompletionRequest::new(
30            "llama3-8b-8192".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: Content::Text("What is 2 + 2?".to_string()),
34                function_call: None,
35            }],
36        )
37        .with_temperature(0.1)
38        .with_max_tokens(20),
39        ChatCompletionRequest::new(
40            "llama3-8b-8192".to_string(),
41            vec![Message {
42                role: Role::User,
43                content: Content::Text("Tell me a short joke.".to_string()),
44                function_call: None,
45            }],
46        )
47        .with_temperature(0.9)
48        .with_max_tokens(100),
49        ChatCompletionRequest::new(
50            "llama3-8b-8192".to_string(),
51            vec![Message {
52                role: Role::User,
53                content: Content::Text(
54                    "What is the largest planet in our solar system?".to_string(),
55                ),
56                function_call: None,
57            }],
58        )
59        .with_temperature(0.5)
60        .with_max_tokens(60),
61    ];
62
63    println!(
64        "📤 Prepared {} requests for batch processing",
65        requests.len()
66    );
67
68    // Method 1: Batch processing with concurrency limit
69    println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
70    let start_time = std::time::Instant::now();
71
72    let responses = client
73        .chat_completion_batch(requests.clone(), Some(2))
74        .await?;
75
76    let duration = start_time.elapsed();
77    println!("⏱️  Batch processing completed in {:?}", duration);
78
79    // Process responses
80    for (i, response) in responses.iter().enumerate() {
81        match response {
82            Ok(resp) => {
83                println!(
84                    "✅ Request {}: {}",
85                    i + 1,
86                    resp.choices[0].message.content.as_text()
87                );
88            }
89            Err(e) => {
90                println!("❌ Request {} failed: {}", i + 1, e);
91            }
92        }
93    }
94
95    // Method 2: Smart batch processing (auto-select strategy)
96    println!("\n🧠 Method 2: Smart batch processing");
97    let start_time = std::time::Instant::now();
98
99    let responses = client.chat_completion_batch_smart(requests.clone()).await?;
100
101    let duration = start_time.elapsed();
102    println!("⏱️  Smart batch processing completed in {:?}", duration);
103
104    // Count successes and failures
105    let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
106    let failed: Vec<_> = responses
107        .iter()
108        .enumerate()
109        .filter_map(|(i, r)| r.as_ref().err().map(|e| (i, e)))
110        .collect();
111
112    println!("📊 Results:");
113    println!("   ✅ Successful: {}/{}", successful.len(), responses.len());
114    println!("   ❌ Failed: {}/{}", failed.len(), responses.len());
115    println!(
116        "   📈 Success rate: {:.1}%",
117        (successful.len() as f64 / responses.len() as f64) * 100.0
118    );
119
120    // Method 3: Unlimited concurrent batch processing
121    println!("\n🚀 Method 3: Unlimited concurrent batch processing");
122    let start_time = std::time::Instant::now();
123
124    let responses = client.chat_completion_batch(requests, None).await?;
125
126    let duration = start_time.elapsed();
127    println!(
128        "⏱️  Unlimited concurrent processing completed in {:?}",
129        duration
130    );
131
132    // Display all responses
133    for (i, response) in responses.iter().enumerate() {
134        match response {
135            Ok(resp) => {
136                println!(
137                    "✅ Request {}: {}",
138                    i + 1,
139                    resp.choices[0].message.content.as_text()
140                );
141            }
142            Err(e) => {
143                println!("❌ Request {} failed: {}", i + 1, e);
144            }
145        }
146    }
147
148    println!("\n🎉 Batch processing example completed successfully!");
149    Ok(())
150}
examples/builder_pattern.rs (line 18)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8    println!("🚀 AI Client Builder Pattern Example");
9    println!("===================================");
10
11    // Example 1: Simplest usage - automatic environment variable detection
12    println!("\n📋 Example 1: Simplest usage");
13    println!("   Automatically detect GROQ_BASE_URL and AI_PROXY_URL from environment variables");
14
15    let client = AiClientBuilder::new(Provider::Groq).build()?;
16    println!(
17        "✅ Client created successfully, provider: {:?}",
18        client.current_provider()
19    );
20
21    // Example 2: Custom base_url
22    println!("\n📋 Example 2: Custom base_url");
23    println!("   Use custom Groq server address");
24
25    let client = AiClientBuilder::new(Provider::Groq)
26        .with_base_url("https://custom.groq.com")
27        .build()?;
28    println!("✅ Client created successfully with custom base_url");
29
30    // Example 3: Custom base_url and proxy
31    println!("\n📋 Example 3: Custom base_url and proxy");
32    println!("   Use custom server and proxy");
33
34    let client = AiClientBuilder::new(Provider::Groq)
35        .with_base_url("https://custom.groq.com")
36        .with_proxy(Some("http://proxy.example.com:8080"))
37        .build()?;
38    println!("✅ Client created successfully with custom base_url and proxy");
39
40    // Example 4: Full custom configuration
41    println!("\n📋 Example 4: Full custom configuration");
42    println!("   Custom timeout, connection pool and other advanced configurations");
43
44    let client = AiClientBuilder::new(Provider::Groq)
45        .with_base_url("https://custom.groq.com")
46        .with_proxy(Some("http://proxy.example.com:8080"))
47        .with_timeout(Duration::from_secs(60))
48        .with_pool_config(32, Duration::from_secs(90))
49        .build()?;
50    println!("✅ Client created successfully with full custom configuration");
51
52    // Example 5: Use convenient builder method
53    println!("\n📋 Example 5: Use convenient builder method");
54    println!("   Create builder through AiClient::builder()");
55
56    let client = AiClient::builder(Provider::Groq)
57        .with_base_url("https://custom.groq.com")
58        .with_proxy(Some("http://proxy.example.com:8080"))
59        .build()?;
60    println!("✅ Client created successfully using convenient builder method");
61
62    // Example 6: Environment variable priority demonstration
63    println!("\n📋 Example 6: Environment variable priority demonstration");
64    println!("   Set environment variables, then use builder");
65
66    // Set environment variables
67    std::env::set_var("GROQ_BASE_URL", "https://env.groq.com");
68    std::env::set_var("AI_PROXY_URL", "http://env.proxy.com:8080");
69
70    // Don't set any custom configuration, should use environment variables
71    let client = AiClientBuilder::new(Provider::Groq).build()?;
72    println!("✅ Client created successfully using environment variable configuration");
73
74    // Explicit settings override environment variables
75    let client = AiClientBuilder::new(Provider::Groq)
76        .with_base_url("https://explicit.groq.com")
77        .with_proxy(Some("http://explicit.proxy.com:8080"))
78        .build()?;
79    println!(
80        "✅ Client created successfully, explicit configuration overrides environment variables"
81    );
82
83    // Example 7: Different provider configurations
84    println!("\n📋 Example 7: Different provider configurations");
85
86    // Groq
87    let groq_client = AiClientBuilder::new(Provider::Groq)
88        .with_base_url("https://custom.groq.com")
89        .build()?;
90    println!("✅ Groq client created successfully");
91
92    // DeepSeek
93    let deepseek_client = AiClientBuilder::new(Provider::DeepSeek)
94        .with_base_url("https://custom.deepseek.com")
95        .with_proxy(Some("http://proxy.example.com:8080"))
96        .build()?;
97    println!("✅ DeepSeek client created successfully");
98
99    // Ollama (local deployment)
100    let ollama_client = AiClientBuilder::new(Provider::Ollama)
101        .with_base_url("http://localhost:11434")
102        .build()?;
103    println!("✅ Ollama client created successfully");
104
105    // Example 8: Error handling
106    println!("\n📋 Example 8: Error handling");
107    println!("   Try to set custom configuration for unsupported provider");
108
109    match AiClientBuilder::new(Provider::OpenAI)
110        .with_base_url("https://custom.openai.com")
111        .build()
112    {
113        Ok(_) => println!("❌ This should not succeed"),
114        Err(e) => println!("✅ Correctly caught error: {}", e),
115    }
116
117    println!("\n🎉 All examples completed!");
118    println!("\n💡 Advantages of builder pattern:");
119    println!("   1. Automatic environment variable detection, reducing configuration code");
120    println!("   2. Support for progressive custom configuration");
121    println!("   3. Method chaining for cleaner code");
122    println!("   4. Backward compatible, existing code requires no changes");
123    println!("   5. Support for advanced configuration (timeout, connection pool, etc.)");
124
125    Ok(())
126}

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,