AiClient

Struct AiClient 

Source
pub struct AiClient { /* private fields */ }
Expand description

统一AI客户端

使用示例:

use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // 切换模型提供商,只需更改 Provider 的值
    let client = AiClient::new(Provider::Groq)?;
     
    let request = ChatCompletionRequest::new(
        "test-model".to_string(),
        vec![Message {
            role: Role::User,
            content: "Hello".to_string(),
        }],
    );
     
    // 注意:这里需要设置GROQ_API_KEY环境变量才能实际调用API
    // 可选:设置AI_PROXY_URL环境变量使用代理服务器
    // let response = client.chat_completion(request).await?;
     
    println!("Client created successfully with provider: {:?}", client.current_provider());
    println!("Request prepared for model: {}", request.model);
     
    Ok(())
}

§代理服务器配置

通过设置 AI_PROXY_URL 环境变量来配置代理服务器:

export AI_PROXY_URL=http://proxy.example.com:8080

支持的代理格式:

  • HTTP代理: http://proxy.example.com:8080
  • HTTPS代理: https://proxy.example.com:8080
  • 带认证: http://user:pass@proxy.example.com:8080

Implementations§

Source§

impl AiClient

Source

pub fn new(provider: Provider) -> Result<Self, AiLibError>

创建新的AI客户端

§Arguments
  • provider - 选择要使用的AI模型提供商
§Returns
  • Result<Self, AiLibError> - 成功时返回客户端实例,失败时返回错误
§Example
use ai_lib::{AiClient, Provider};

let client = AiClient::new(Provider::Groq)?;
Examples found in repository?
examples/list_models_smoke.rs (line 24)
4async fn main() {
5    let providers = vec![
6        Provider::Groq,
7        Provider::XaiGrok,
8        Provider::Ollama,
9        Provider::DeepSeek,
10        Provider::Anthropic,
11        Provider::AzureOpenAI,
12        Provider::HuggingFace,
13        Provider::TogetherAI,
14        Provider::Qwen,
15        Provider::OpenAI,
16        Provider::Gemini,
17        Provider::Mistral,
18        Provider::Cohere,
19        // Provider::Bedrock, // 已移除:Bedrock 暂缓实现/不在公开 API 中
20    ];
21
22    for p in providers {
23        println!("--- Provider: {:?} ---", p);
24        match AiClient::new(p) {
25            Ok(client) => match client.list_models().await {
26                Ok(models) => {
27                    println!("Found {} models (showing up to 5):", models.len());
28                    for m in models.into_iter().take(5) {
29                        println!(" - {}", m);
30                    }
31                }
32                Err(e) => println!("list_models error: {:?}", e),
33            },
34            Err(e) => println!("client init error: {:?}", e),
35        }
36    }
37}
More examples
Hide additional examples
examples/cohere_stream.rs (line 7)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    // Ensure COHERE_API_KEY env var is set if making real requests
7    let client = AiClient::new(Provider::Cohere)?;
8
9    let request = ChatCompletionRequest::new(
10        "command-xlarge-nightly".to_string(),
11        vec![Message { role: Role::User, content: "Write a haiku about rust programming".to_string() }]
12    ).with_temperature(0.7).with_max_tokens(60);
13
14    // List models
15    match client.list_models().await {
16        Ok(models) => println!("Models: {:?}", models),
17        Err(e) => eprintln!("Failed to list models: {}", e),
18    }
19
20    // Streaming
21    let mut stream = client.chat_completion_stream(request).await?;
22    while let Some(chunk) = stream.next().await {
23        match chunk {
24            Ok(c) => {
25                for choice in c.choices {
26                    if let Some(delta) = choice.delta.content {
27                        print!("{}", delta);
28                    }
29                }
30            }
31            Err(e) => {
32                eprintln!("Stream error: {}", e);
33                break;
34            }
35        }
36    }
37
38    Ok(())
39}
examples/basic_usage.rs (line 9)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 AI-lib Basic Usage Example");
6    println!("================================");
7    
8    // 切换模型提供商,只需更改 Provider 的值
9    let client = AiClient::new(Provider::Groq)?;
10    println!("✅ Created client with provider: {:?}", client.current_provider());
11    
12    // 获取支持的模型列表
13    let models = client.list_models().await?;
14    println!("📋 Available models: {:?}", models);
15    
16    // 创建聊天请求
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Hello! Please introduce yourself briefly.".to_string(),
22        }],
23    ).with_temperature(0.7)
24     .with_max_tokens(100);
25    
26    println!("📤 Sending request to model: {}", request.model);
27    
28    // 发送请求
29    let response = client.chat_completion(request).await?;
30    
31    println!("📥 Received response:");
32    println!("   ID: {}", response.id);
33    println!("   Model: {}", response.model);
34    println!("   Content: {}", response.choices[0].message.content);
35    println!("   Usage: {} tokens", response.usage.total_tokens);
36    
37    Ok(())
38}
examples/debug_request.rs (line 25)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🔍 调试请求格式");
6    println!("===============");
7    
8    // 创建测试请求
9    let request = ChatCompletionRequest::new(
10        "gpt-3.5-turbo".to_string(),
11        vec![Message {
12            role: Role::User,
13            content: "Hello!".to_string(),
14        }],
15    ).with_max_tokens(10);
16    
17    println!("📤 原始请求:");
18    println!("   模型: {}", request.model);
19    println!("   消息数量: {}", request.messages.len());
20    println!("   消息[0]: {:?} - {}", request.messages[0].role, request.messages[0].content);
21    println!("   max_tokens: {:?}", request.max_tokens);
22    
23    // 测试OpenAI
24    println!("\n🤖 测试OpenAI...");
25    match AiClient::new(Provider::OpenAI) {
26        Ok(client) => {
27            match client.chat_completion(request.clone()).await {
28                Ok(response) => {
29                    println!("✅ 成功!");
30                    println!("   响应: {}", response.choices[0].message.content);
31                }
32                Err(e) => {
33                    println!("❌ 失败: {}", e);
34                    
35                    // 如果是400错误,说明请求格式有问题
36                    if e.to_string().contains("400") {
37                        println!("   这通常表示请求格式不正确");
38                        println!("   让我们检查请求是否包含必要字段...");
39                    }
40                }
41            }
42        }
43        Err(e) => println!("❌ 客户端创建失败: {}", e),
44    }
45    
46    Ok(())
47}
examples/config_driven_example.rs (line 19)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 配置驱动的AI-lib示例");
6    println!("========================");
7    
8    // 演示配置驱动的优势:轻松切换提供商
9    let providers = vec![
10        (Provider::Groq, "Groq"),
11        (Provider::OpenAI, "OpenAI"), 
12        (Provider::DeepSeek, "DeepSeek"),
13    ];
14    
15    for (provider, name) in providers {
16        println!("\n📡 测试提供商: {}", name);
17        
18        // 创建客户端 - 只需改变枚举值
19        let client = AiClient::new(provider)?;
20        println!("✅ 客户端创建成功: {:?}", client.current_provider());
21        
22        // 获取模型列表
23        match client.list_models().await {
24            Ok(models) => println!("📋 可用模型: {:?}", models),
25            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
26        }
27        
28        // 创建测试请求
29        let request = ChatCompletionRequest::new(
30            "test-model".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: "Hello from ai-lib!".to_string(),
34            }],
35        );
36        
37        println!("📤 请求已准备,模型: {}", request.model);
38        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
39    }
40    
41    println!("\n🎯 配置驱动的核心优势:");
42    println!("   • 零代码切换: 只需改变Provider枚举值");
43    println!("   • 统一接口: 所有提供商使用相同的API");
44    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
45    
46    Ok(())
47}
examples/test_without_proxy.rs (line 15)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🌐 测试不使用代理的连接");
6    println!("======================");
7    
8    // 临时移除代理设置
9    std::env::remove_var("AI_PROXY_URL");
10    
11    println!("ℹ️  已临时移除AI_PROXY_URL设置");
12    
13    // 测试DeepSeek(国内可直连)
14    println!("\n🔍 测试DeepSeek (直连):");
15    match AiClient::new(Provider::DeepSeek) {
16        Ok(client) => {
17            let request = ChatCompletionRequest::new(
18                "deepseek-chat".to_string(),
19                vec![Message {
20                    role: Role::User,
21                    content: "Hello! Please respond with just 'Hi' to test.".to_string(),
22                }],
23            ).with_max_tokens(5);
24            
25            match client.chat_completion(request).await {
26                Ok(response) => {
27                    println!("✅ DeepSeek 直连成功!");
28                    println!("   响应: {}", response.choices[0].message.content);
29                    println!("   Token使用: {}", response.usage.total_tokens);
30                }
31                Err(e) => {
32                    println!("❌ DeepSeek 请求失败: {}", e);
33                    if e.to_string().contains("402") {
34                        println!("   (这是余额不足错误,说明连接正常)");
35                    }
36                }
37            }
38        }
39        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
40    }
41    
42    println!("\n💡 结论:");
43    println!("   • DeepSeek可以直连,不需要代理");
44    println!("   • OpenAI和Groq需要通过代理访问");
45    println!("   • 代理可能会修改请求内容,导致格式错误");
46    println!("   • 建议检查代理服务器的配置");
47    
48    Ok(())
49}
Source

pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>

发送聊天完成请求

§Arguments
  • request - 聊天完成请求
§Returns
  • Result<ChatCompletionResponse, AiLibError> - 成功时返回响应,失败时返回错误
Examples found in repository?
examples/basic_usage.rs (line 29)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 AI-lib Basic Usage Example");
6    println!("================================");
7    
8    // 切换模型提供商,只需更改 Provider 的值
9    let client = AiClient::new(Provider::Groq)?;
10    println!("✅ Created client with provider: {:?}", client.current_provider());
11    
12    // 获取支持的模型列表
13    let models = client.list_models().await?;
14    println!("📋 Available models: {:?}", models);
15    
16    // 创建聊天请求
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Hello! Please introduce yourself briefly.".to_string(),
22        }],
23    ).with_temperature(0.7)
24     .with_max_tokens(100);
25    
26    println!("📤 Sending request to model: {}", request.model);
27    
28    // 发送请求
29    let response = client.chat_completion(request).await?;
30    
31    println!("📥 Received response:");
32    println!("   ID: {}", response.id);
33    println!("   Model: {}", response.model);
34    println!("   Content: {}", response.choices[0].message.content);
35    println!("   Usage: {} tokens", response.usage.total_tokens);
36    
37    Ok(())
38}
More examples
Hide additional examples
examples/debug_request.rs (line 27)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🔍 调试请求格式");
6    println!("===============");
7    
8    // 创建测试请求
9    let request = ChatCompletionRequest::new(
10        "gpt-3.5-turbo".to_string(),
11        vec![Message {
12            role: Role::User,
13            content: "Hello!".to_string(),
14        }],
15    ).with_max_tokens(10);
16    
17    println!("📤 原始请求:");
18    println!("   模型: {}", request.model);
19    println!("   消息数量: {}", request.messages.len());
20    println!("   消息[0]: {:?} - {}", request.messages[0].role, request.messages[0].content);
21    println!("   max_tokens: {:?}", request.max_tokens);
22    
23    // 测试OpenAI
24    println!("\n🤖 测试OpenAI...");
25    match AiClient::new(Provider::OpenAI) {
26        Ok(client) => {
27            match client.chat_completion(request.clone()).await {
28                Ok(response) => {
29                    println!("✅ 成功!");
30                    println!("   响应: {}", response.choices[0].message.content);
31                }
32                Err(e) => {
33                    println!("❌ 失败: {}", e);
34                    
35                    // 如果是400错误,说明请求格式有问题
36                    if e.to_string().contains("400") {
37                        println!("   这通常表示请求格式不正确");
38                        println!("   让我们检查请求是否包含必要字段...");
39                    }
40                }
41            }
42        }
43        Err(e) => println!("❌ 客户端创建失败: {}", e),
44    }
45    
46    Ok(())
47}
examples/test_without_proxy.rs (line 25)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🌐 测试不使用代理的连接");
6    println!("======================");
7    
8    // 临时移除代理设置
9    std::env::remove_var("AI_PROXY_URL");
10    
11    println!("ℹ️  已临时移除AI_PROXY_URL设置");
12    
13    // 测试DeepSeek(国内可直连)
14    println!("\n🔍 测试DeepSeek (直连):");
15    match AiClient::new(Provider::DeepSeek) {
16        Ok(client) => {
17            let request = ChatCompletionRequest::new(
18                "deepseek-chat".to_string(),
19                vec![Message {
20                    role: Role::User,
21                    content: "Hello! Please respond with just 'Hi' to test.".to_string(),
22                }],
23            ).with_max_tokens(5);
24            
25            match client.chat_completion(request).await {
26                Ok(response) => {
27                    println!("✅ DeepSeek 直连成功!");
28                    println!("   响应: {}", response.choices[0].message.content);
29                    println!("   Token使用: {}", response.usage.total_tokens);
30                }
31                Err(e) => {
32                    println!("❌ DeepSeek 请求失败: {}", e);
33                    if e.to_string().contains("402") {
34                        println!("   (这是余额不足错误,说明连接正常)");
35                    }
36                }
37            }
38        }
39        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
40    }
41    
42    println!("\n💡 结论:");
43    println!("   • DeepSeek可以直连,不需要代理");
44    println!("   • OpenAI和Groq需要通过代理访问");
45    println!("   • 代理可能会修改请求内容,导致格式错误");
46    println!("   • 建议检查代理服务器的配置");
47    
48    Ok(())
49}
examples/openai_test.rs (line 46)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🤖 OpenAI Provider 测试");
6    println!("=====================");
7    
8    // 检查API密钥
9    match std::env::var("OPENAI_API_KEY") {
10        Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
11        Err(_) => {
12            println!("❌ 未设置 OPENAI_API_KEY 环境变量");
13            println!("   请设置: export OPENAI_API_KEY=your_api_key");
14            return Ok(());
15        }
16    }
17    
18    // 创建OpenAI客户端
19    let client = AiClient::new(Provider::OpenAI)?;
20    println!("✅ OpenAI客户端创建成功");
21    
22    // 获取模型列表
23    println!("\n📋 获取OpenAI模型列表...");
24    match client.list_models().await {
25        Ok(models) => {
26            println!("✅ 成功获取 {} 个模型", models.len());
27            println!("   常用模型:");
28            for model in models.iter().filter(|m| m.contains("gpt")) {
29                println!("   • {}", model);
30            }
31        }
32        Err(e) => println!("❌ 获取模型列表失败: {}", e),
33    }
34    
35    // 测试聊天完成
36    println!("\n💬 测试聊天完成...");
37    let request = ChatCompletionRequest::new(
38        "gpt-3.5-turbo".to_string(),
39        vec![Message {
40            role: Role::User,
41            content: "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection.".to_string(),
42        }],
43    ).with_max_tokens(20)
44     .with_temperature(0.7);
45    
46    match client.chat_completion(request).await {
47        Ok(response) => {
48            println!("✅ 聊天完成成功!");
49            println!("   模型: {}", response.model);
50            println!("   响应: {}", response.choices[0].message.content);
51            println!("   Token使用: {} (prompt: {}, completion: {})", 
52                response.usage.total_tokens,
53                response.usage.prompt_tokens,
54                response.usage.completion_tokens
55            );
56        }
57        Err(e) => println!("❌ 聊天完成失败: {}", e),
58    }
59    
60    println!("\n🎯 OpenAI配置驱动测试完成!");
61    println!("   这证明了配置驱动架构的强大之处:");
62    println!("   • 无需编写OpenAI特定代码");
63    println!("   • 只需在ProviderConfigs中添加配置");
64    println!("   • 自动支持所有OpenAI兼容的功能");
65    
66    Ok(())
67}
examples/test_all_providers.rs (line 48)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🧪 测试所有AI提供商");
6    println!("==================");
7    
8    // 检查代理配置
9    if let Ok(proxy_url) = std::env::var("AI_PROXY_URL") {
10        println!("🌐 使用代理: {}", proxy_url);
11    }
12    
13    let providers = vec![
14        (Provider::Groq, "Groq", "llama3-8b-8192"),
15        (Provider::OpenAI, "OpenAI", "gpt-3.5-turbo"),
16        (Provider::DeepSeek, "DeepSeek", "deepseek-chat"),
17    ];
18    
19    for (provider, name, model) in providers {
20        println!("\n🔍 测试提供商: {}", name);
21        println!("{}", "─".repeat(30));
22        
23        match AiClient::new(provider) {
24            Ok(client) => {
25                println!("✅ 客户端创建成功");
26                
27                // 测试模型列表
28                match client.list_models().await {
29                    Ok(models) => {
30                        println!("📋 可用模型数量: {}", models.len());
31                        if !models.is_empty() {
32                            println!("   前3个模型: {:?}", &models[..models.len().min(3)]);
33                        }
34                    }
35                    Err(e) => println!("⚠️  获取模型列表失败: {}", e),
36                }
37                
38                // 测试聊天完成
39                let request = ChatCompletionRequest::new(
40                    model.to_string(),
41                    vec![Message {
42                        role: Role::User,
43                        content: "Hello! Please respond with just 'Hi' to test the API.".to_string(),
44                    }],
45                ).with_max_tokens(10);
46                
47                println!("📤 发送测试请求到模型: {}", model);
48                match client.chat_completion(request).await {
49                    Ok(response) => {
50                        println!("✅ 请求成功!");
51                        println!("   响应ID: {}", response.id);
52                        println!("   内容: {}", response.choices[0].message.content);
53                        println!("   使用tokens: {}", response.usage.total_tokens);
54                    }
55                    Err(e) => println!("❌ 请求失败: {}", e),
56                }
57            }
58            Err(e) => {
59                println!("❌ 客户端创建失败: {}", e);
60            }
61        }
62    }
63    
64    println!("\n💡 提示:");
65    println!("   • 确保设置了对应的API密钥环境变量");
66    println!("   • GROQ_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY");
67    println!("   • 可选设置AI_PROXY_URL使用代理服务器");
68    
69    Ok(())
70}
examples/test_https_proxy.rs (line 29)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🔒 HTTPS代理测试");
6    println!("================");
7    
8    // 临时设置HTTPS代理
9    std::env::set_var("AI_PROXY_URL", "https://192.168.2.13:8889");
10    println!("🌐 使用HTTPS代理: https://192.168.2.13:8889");
11    
12    if std::env::var("OPENAI_API_KEY").is_err() {
13        println!("❌ 未设置OPENAI_API_KEY");
14        return Ok(());
15    }
16    
17    // 测试OpenAI
18    println!("\n🤖 测试OpenAI (HTTPS代理):");
19    let client = AiClient::new(Provider::OpenAI)?;
20    
21    let request = ChatCompletionRequest::new(
22        "gpt-3.5-turbo".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: "Say 'HTTPS proxy works!' exactly.".to_string(),
26        }],
27    ).with_max_tokens(10);
28    
29    match client.chat_completion(request).await {
30        Ok(response) => {
31            println!("✅ HTTPS代理测试成功!");
32            println!("   响应: '{}'", response.choices[0].message.content);
33            println!("   Token使用: {}", response.usage.total_tokens);
34        }
35        Err(e) => {
36            println!("❌ HTTPS代理测试失败: {}", e);
37            
38            // 分析错误
39            let error_str = e.to_string();
40            if error_str.contains("you must provide a model parameter") {
41                println!("   → 这可能是代理服务器的问题,而不是HTTPS协议问题");
42            } else if error_str.contains("certificate") || error_str.contains("tls") {
43                println!("   → HTTPS证书或TLS相关问题");
44            } else if error_str.contains("connection") {
45                println!("   → 连接问题,可能是代理服务器配置");
46            }
47        }
48    }
49    
50    // 测试Groq (对比)
51    println!("\n🚀 测试Groq (HTTPS代理对比):");
52    if std::env::var("GROQ_API_KEY").is_ok() {
53        let groq_client = AiClient::new(Provider::Groq)?;
54        let groq_request = ChatCompletionRequest::new(
55            "llama3-8b-8192".to_string(),
56            vec![Message {
57                role: Role::User,
58                content: "Say 'Groq HTTPS proxy works!' exactly.".to_string(),
59            }],
60        ).with_max_tokens(10);
61        
62        match groq_client.chat_completion(groq_request).await {
63            Ok(response) => {
64                println!("✅ Groq HTTPS代理成功!");
65                println!("   响应: '{}'", response.choices[0].message.content);
66            }
67            Err(e) => {
68                println!("❌ Groq HTTPS代理失败: {}", e);
69            }
70        }
71    }
72    
73    println!("\n💡 HTTPS代理测试结论:");
74    println!("   • 如果Groq成功而OpenAI失败,说明是OpenAI特定问题");
75    println!("   • 如果都失败,可能是HTTPS代理配置问题");
76    println!("   • 如果都成功,说明HTTPS代理完全支持");
77    
78    Ok(())
79}
Source

pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>

流式聊天完成请求

§Arguments
  • request - 聊天完成请求
§Returns
  • Result<impl Stream<Item = Result<ChatCompletionChunk, AiLibError>>, AiLibError> - 成功时返回流式响应
Examples found in repository?
examples/cohere_stream.rs (line 21)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    // Ensure COHERE_API_KEY env var is set if making real requests
7    let client = AiClient::new(Provider::Cohere)?;
8
9    let request = ChatCompletionRequest::new(
10        "command-xlarge-nightly".to_string(),
11        vec![Message { role: Role::User, content: "Write a haiku about rust programming".to_string() }]
12    ).with_temperature(0.7).with_max_tokens(60);
13
14    // List models
15    match client.list_models().await {
16        Ok(models) => println!("Models: {:?}", models),
17        Err(e) => eprintln!("Failed to list models: {}", e),
18    }
19
20    // Streaming
21    let mut stream = client.chat_completion_stream(request).await?;
22    while let Some(chunk) = stream.next().await {
23        match chunk {
24            Ok(c) => {
25                for choice in c.choices {
26                    if let Some(delta) = choice.delta.content {
27                        print!("{}", delta);
28                    }
29                }
30            }
31            Err(e) => {
32                eprintln!("Stream error: {}", e);
33                break;
34            }
35        }
36    }
37
38    Ok(())
39}
More examples
Hide additional examples
examples/test_streaming.rs (line 34)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌊 流式响应测试");
7    println!("================");
8    
9    // 检查Groq API密钥
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14    
15    // 创建Groq客户端
16    let client = AiClient::new(Provider::Groq)?;
17    println!("✅ Groq客户端创建成功");
18    
19    // 创建流式请求
20    let request = ChatCompletionRequest::new(
21        "llama3-8b-8192".to_string(),
22        vec![Message {
23            role: Role::User,
24            content: "Please write a short poem about AI in exactly 4 lines.".to_string(),
25        }],
26    ).with_max_tokens(100)
27     .with_temperature(0.7);
28    
29    println!("\n📤 发送流式请求...");
30    println!("   模型: {}", request.model);
31    println!("   消息: {}", request.messages[0].content);
32    
33    // 获取流式响应
34    match client.chat_completion_stream(request).await {
35        Ok(mut stream) => {
36            println!("\n🌊 开始接收流式响应:");
37            println!("{}", "─".repeat(50));
38            
39            let mut full_content = String::new();
40            let mut chunk_count = 0;
41            
42            while let Some(result) = stream.next().await {
43                match result {
44                    Ok(chunk) => {
45                        chunk_count += 1;
46                        
47                        if let Some(choice) = chunk.choices.first() {
48                            if let Some(content) = &choice.delta.content {
49                                print!("{}", content);
50                                full_content.push_str(content);
51                                
52                                // 刷新输出
53                                use std::io::{self, Write};
54                                io::stdout().flush().unwrap();
55                            }
56                            
57                            // 检查是否完成
58                            if choice.finish_reason.is_some() {
59                                println!("\n{}", "─".repeat(50));
60                                println!("✅ 流式响应完成!");
61                                println!("   完成原因: {:?}", choice.finish_reason);
62                                break;
63                            }
64                        }
65                    }
66                    Err(e) => {
67                        println!("\n❌ 流式响应错误: {}", e);
68                        break;
69                    }
70                }
71            }
72            
73            println!("\n📊 流式响应统计:");
74            println!("   数据块数量: {}", chunk_count);
75            println!("   总内容长度: {} 字符", full_content.len());
76            println!("   完整内容: \"{}\"", full_content.trim());
77        }
78        Err(e) => {
79            println!("❌ 流式请求失败: {}", e);
80        }
81    }
82    
83    println!("\n💡 流式响应的优势:");
84    println!("   • 实时显示生成内容");
85    println!("   • 更好的用户体验");
86    println!("   • 可以提前停止生成");
87    println!("   • 适合长文本生成");
88    
89    Ok(())
90}
examples/test_groq_generic.rs (line 40)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🔄 测试配置驱动的Groq");
7    println!("====================");
8    
9    if std::env::var("GROQ_API_KEY").is_err() {
10        println!("❌ 未设置GROQ_API_KEY");
11        return Ok(());
12    }
13    
14    let client = AiClient::new(Provider::Groq)?;
15    println!("✅ Groq客户端创建成功 (使用GenericAdapter)");
16    
17    // 测试普通聊天
18    let request = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: "Say 'Hello from Generic Groq!' in exactly those words.".to_string(),
23        }],
24    ).with_max_tokens(20);
25    
26    println!("\n💬 测试普通聊天...");
27    match client.chat_completion(request.clone()).await {
28        Ok(response) => {
29            println!("✅ 普通聊天成功!");
30            println!("   响应: '{}'", response.choices[0].message.content);
31            println!("   Token使用: {}", response.usage.total_tokens);
32        }
33        Err(e) => {
34            println!("❌ 普通聊天失败: {}", e);
35        }
36    }
37    
38    // 测试流式聊天
39    println!("\n🌊 测试流式聊天...");
40    match client.chat_completion_stream(request).await {
41        Ok(mut stream) => {
42            print!("   流式响应: ");
43            let mut content = String::new();
44            
45            while let Some(result) = stream.next().await {
46                match result {
47                    Ok(chunk) => {
48                        if let Some(choice) = chunk.choices.first() {
49                            if let Some(text) = &choice.delta.content {
50                                print!("{}", text);
51                                content.push_str(text);
52                                use std::io::{self, Write};
53                                io::stdout().flush().unwrap();
54                            }
55                            if choice.finish_reason.is_some() {
56                                println!();
57                                break;
58                            }
59                        }
60                    }
61                    Err(e) => {
62                        println!("\n❌ 流式错误: {}", e);
63                        break;
64                    }
65                }
66            }
67            
68            if !content.is_empty() {
69                println!("✅ 流式聊天成功!");
70                println!("   完整内容: '{}'", content.trim());
71            }
72        }
73        Err(e) => {
74            println!("❌ 流式聊天失败: {}", e);
75        }
76    }
77    
78    // 测试模型列表
79    println!("\n📋 测试模型列表...");
80    match client.list_models().await {
81        Ok(models) => {
82            println!("✅ 模型列表获取成功!");
83            println!("   可用模型: {:?}", models);
84        }
85        Err(e) => {
86            println!("❌ 模型列表获取失败: {}", e);
87        }
88    }
89    
90    println!("\n🎯 配置驱动Groq测试结果:");
91    println!("   • 使用GenericAdapter而不是GroqAdapter");
92    println!("   • 代码量从250行减少到10行配置");
93    println!("   • 功能完全相同:普通聊天、流式聊天、模型列表");
94    println!("   • 证明了OpenAI兼容性和通用适配器的有效性");
95    
96    Ok(())
97}
examples/test_streaming_clean.rs (line 28)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌊 清洁版流式响应测试");
7    println!("======================");
8    
9    if std::env::var("GROQ_API_KEY").is_err() {
10        println!("❌ 未设置GROQ_API_KEY");
11        return Ok(());
12    }
13    
14    let client = AiClient::new(Provider::Groq)?;
15    println!("✅ Groq客户端创建成功");
16    
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Write a haiku about programming.".to_string(),
22        }],
23    ).with_max_tokens(50)
24     .with_temperature(0.8);
25    
26    println!("\n📤 发送流式请求: {}", request.messages[0].content);
27    
28    match client.chat_completion_stream(request).await {
29        Ok(mut stream) => {
30            println!("\n🎭 AI回复:");
31            print!("   ");
32            
33            let mut content_parts = Vec::new();
34            
35            while let Some(result) = stream.next().await {
36                match result {
37                    Ok(chunk) => {
38                        if let Some(choice) = chunk.choices.first() {
39                            if let Some(content) = &choice.delta.content {
40                                // 尝试解析JSON内容
41                                if content.contains("\"content\":") {
42                                    if let Ok(json) = serde_json::from_str::<serde_json::Value>(content) {
43                                        if let Some(text) = json["content"].as_str() {
44                                            if !text.is_empty() {
45                                                print!("{}", text);
46                                                content_parts.push(text.to_string());
47                                                use std::io::{self, Write};
48                                                io::stdout().flush().unwrap();
49                                            }
50                                        }
51                                    }
52                                } else if !content.trim().is_empty() && !content.contains("data:") {
53                                    // 直接输出非JSON内容
54                                    print!("{}", content);
55                                    content_parts.push(content.clone());
56                                    use std::io::{self, Write};
57                                    io::stdout().flush().unwrap();
58                                }
59                            }
60                            
61                            if choice.finish_reason.is_some() {
62                                println!("\n");
63                                break;
64                            }
65                        }
66                    }
67                    Err(e) => {
68                        println!("\n❌ 流式响应错误: {}", e);
69                        break;
70                    }
71                }
72            }
73            
74            let full_content = content_parts.join("");
75            if !full_content.is_empty() {
76                println!("✅ 流式响应完成!");
77                println!("📝 完整内容: \"{}\"", full_content.trim());
78            } else {
79                println!("⚠️  未提取到有效内容,可能需要改进SSE解析");
80            }
81        }
82        Err(e) => {
83            println!("❌ 流式请求失败: {}", e);
84        }
85    }
86    
87    Ok(())
88}
examples/test_streaming_improved.rs (line 28)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌊 改进的流式响应测试");
7    println!("======================");
8    
9    if std::env::var("GROQ_API_KEY").is_err() {
10        println!("❌ 未设置GROQ_API_KEY");
11        return Ok(());
12    }
13    
14    let client = AiClient::new(Provider::Groq)?;
15    println!("✅ Groq客户端创建成功");
16    
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Write a creative story about a robot learning to paint. Keep it under 100 words.".to_string(),
22        }],
23    ).with_max_tokens(150)
24     .with_temperature(0.8);
25    
26    println!("\n📤 发送流式请求...");
27    
28    match client.chat_completion_stream(request).await {
29        Ok(mut stream) => {
30            println!("🎨 AI创作中:");
31            print!("   ");
32            
33            let mut content = String::new();
34            let mut chunk_count = 0;
35            
36            while let Some(result) = stream.next().await {
37                match result {
38                    Ok(chunk) => {
39                        chunk_count += 1;
40                        
41                        if let Some(choice) = chunk.choices.first() {
42                            if let Some(text) = &choice.delta.content {
43                                if !text.is_empty() {
44                                    print!("{}", text);
45                                    content.push_str(text);
46                                    
47                                    use std::io::{self, Write};
48                                    io::stdout().flush().unwrap();
49                                }
50                            }
51                            
52                            if choice.finish_reason.is_some() {
53                                println!("\n");
54                                println!("✅ 创作完成! (原因: {:?})", choice.finish_reason);
55                                break;
56                            }
57                        }
58                    }
59                    Err(e) => {
60                        println!("\n❌ 流式错误: {}", e);
61                        break;
62                    }
63                }
64            }
65            
66            println!("\n📊 统计信息:");
67            println!("   数据块: {}", chunk_count);
68            println!("   字符数: {}", content.len());
69            println!("   单词数: {}", content.split_whitespace().count());
70        }
71        Err(e) => {
72            println!("❌ 流式请求失败: {}", e);
73        }
74    }
75    
76    // 测试DeepSeek流式响应
77    if std::env::var("DEEPSEEK_API_KEY").is_ok() {
78        println!("\n{}", "=".repeat(50));
79        println!("🧠 测试DeepSeek流式响应");
80        
81        let deepseek_client = AiClient::new(Provider::DeepSeek)?;
82        let request = ChatCompletionRequest::new(
83            "deepseek-chat".to_string(),
84            vec![Message {
85                role: Role::User,
86                content: "Explain quantum computing in one sentence.".to_string(),
87            }],
88        ).with_max_tokens(50);
89        
90        match deepseek_client.chat_completion_stream(request).await {
91            Ok(mut stream) => {
92                println!("🔬 DeepSeek回复:");
93                print!("   ");
94                
95                while let Some(result) = stream.next().await {
96                    match result {
97                        Ok(chunk) => {
98                            if let Some(choice) = chunk.choices.first() {
99                                if let Some(text) = &choice.delta.content {
100                                    print!("{}", text);
101                                    use std::io::{self, Write};
102                                    io::stdout().flush().unwrap();
103                                }
104                                if choice.finish_reason.is_some() {
105                                    println!("\n✅ DeepSeek流式响应成功!");
106                                    break;
107                                }
108                            }
109                        }
110                        Err(e) => {
111                            println!("\n❌ DeepSeek流式错误: {}", e);
112                            break;
113                        }
114                    }
115                }
116            }
117            Err(e) => {
118                println!("❌ DeepSeek流式请求失败: {}", e);
119            }
120        }
121    }
122    
123    Ok(())
124}
Source

pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>

带取消控制的流式聊天完成请求

§Arguments
  • request - 聊天完成请求
§Returns
  • (Stream, CancelHandle) - 流式响应和取消句柄
Source

pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>

获取支持的模型列表

§Returns
  • Result<Vec<String>, AiLibError> - 成功时返回模型列表,失败时返回错误
Examples found in repository?
examples/list_models_smoke.rs (line 25)
4async fn main() {
5    let providers = vec![
6        Provider::Groq,
7        Provider::XaiGrok,
8        Provider::Ollama,
9        Provider::DeepSeek,
10        Provider::Anthropic,
11        Provider::AzureOpenAI,
12        Provider::HuggingFace,
13        Provider::TogetherAI,
14        Provider::Qwen,
15        Provider::OpenAI,
16        Provider::Gemini,
17        Provider::Mistral,
18        Provider::Cohere,
19        // Provider::Bedrock, // 已移除:Bedrock 暂缓实现/不在公开 API 中
20    ];
21
22    for p in providers {
23        println!("--- Provider: {:?} ---", p);
24        match AiClient::new(p) {
25            Ok(client) => match client.list_models().await {
26                Ok(models) => {
27                    println!("Found {} models (showing up to 5):", models.len());
28                    for m in models.into_iter().take(5) {
29                        println!(" - {}", m);
30                    }
31                }
32                Err(e) => println!("list_models error: {:?}", e),
33            },
34            Err(e) => println!("client init error: {:?}", e),
35        }
36    }
37}
More examples
Hide additional examples
examples/cohere_stream.rs (line 15)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    // Ensure COHERE_API_KEY env var is set if making real requests
7    let client = AiClient::new(Provider::Cohere)?;
8
9    let request = ChatCompletionRequest::new(
10        "command-xlarge-nightly".to_string(),
11        vec![Message { role: Role::User, content: "Write a haiku about rust programming".to_string() }]
12    ).with_temperature(0.7).with_max_tokens(60);
13
14    // List models
15    match client.list_models().await {
16        Ok(models) => println!("Models: {:?}", models),
17        Err(e) => eprintln!("Failed to list models: {}", e),
18    }
19
20    // Streaming
21    let mut stream = client.chat_completion_stream(request).await?;
22    while let Some(chunk) = stream.next().await {
23        match chunk {
24            Ok(c) => {
25                for choice in c.choices {
26                    if let Some(delta) = choice.delta.content {
27                        print!("{}", delta);
28                    }
29                }
30            }
31            Err(e) => {
32                eprintln!("Stream error: {}", e);
33                break;
34            }
35        }
36    }
37
38    Ok(())
39}
examples/basic_usage.rs (line 13)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 AI-lib Basic Usage Example");
6    println!("================================");
7    
8    // 切换模型提供商,只需更改 Provider 的值
9    let client = AiClient::new(Provider::Groq)?;
10    println!("✅ Created client with provider: {:?}", client.current_provider());
11    
12    // 获取支持的模型列表
13    let models = client.list_models().await?;
14    println!("📋 Available models: {:?}", models);
15    
16    // 创建聊天请求
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Hello! Please introduce yourself briefly.".to_string(),
22        }],
23    ).with_temperature(0.7)
24     .with_max_tokens(100);
25    
26    println!("📤 Sending request to model: {}", request.model);
27    
28    // 发送请求
29    let response = client.chat_completion(request).await?;
30    
31    println!("📥 Received response:");
32    println!("   ID: {}", response.id);
33    println!("   Model: {}", response.model);
34    println!("   Content: {}", response.choices[0].message.content);
35    println!("   Usage: {} tokens", response.usage.total_tokens);
36    
37    Ok(())
38}
examples/config_driven_example.rs (line 23)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 配置驱动的AI-lib示例");
6    println!("========================");
7    
8    // 演示配置驱动的优势:轻松切换提供商
9    let providers = vec![
10        (Provider::Groq, "Groq"),
11        (Provider::OpenAI, "OpenAI"), 
12        (Provider::DeepSeek, "DeepSeek"),
13    ];
14    
15    for (provider, name) in providers {
16        println!("\n📡 测试提供商: {}", name);
17        
18        // 创建客户端 - 只需改变枚举值
19        let client = AiClient::new(provider)?;
20        println!("✅ 客户端创建成功: {:?}", client.current_provider());
21        
22        // 获取模型列表
23        match client.list_models().await {
24            Ok(models) => println!("📋 可用模型: {:?}", models),
25            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
26        }
27        
28        // 创建测试请求
29        let request = ChatCompletionRequest::new(
30            "test-model".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: "Hello from ai-lib!".to_string(),
34            }],
35        );
36        
37        println!("📤 请求已准备,模型: {}", request.model);
38        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
39    }
40    
41    println!("\n🎯 配置驱动的核心优势:");
42    println!("   • 零代码切换: 只需改变Provider枚举值");
43    println!("   • 统一接口: 所有提供商使用相同的API");
44    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
45    
46    Ok(())
47}
examples/proxy_example.rs (line 38)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🌐 AI-lib 代理服务器支持示例");
6    println!("============================");
7    
8    // 检查代理配置
9    match std::env::var("AI_PROXY_URL") {
10        Ok(proxy_url) => {
11            println!("✅ 检测到代理配置: {}", proxy_url);
12            println!("   所有HTTP请求将通过此代理服务器");
13        }
14        Err(_) => {
15            println!("ℹ️  未设置AI_PROXY_URL环境变量");
16            println!("   如需使用代理,请设置: export AI_PROXY_URL=http://proxy.example.com:8080");
17        }
18    }
19    
20    println!("\n🚀 创建AI客户端...");
21    let client = AiClient::new(Provider::Groq)?;
22    println!("✅ 客户端创建成功,提供商: {:?}", client.current_provider());
23    
24    // 创建测试请求
25    let request = ChatCompletionRequest::new(
26        "llama3-8b-8192".to_string(),
27        vec![Message {
28            role: Role::User,
29            content: "Hello! This request may go through a proxy.".to_string(),
30        }],
31    );
32    
33    println!("\n📤 准备发送请求...");
34    println!("   模型: {}", request.model);
35    println!("   消息: {}", request.messages[0].content);
36    
37    // 获取模型列表(这个请求也会通过代理)
38    match client.list_models().await {
39        Ok(models) => {
40            println!("\n📋 通过代理获取到的模型列表:");
41            for model in models {
42                println!("   • {}", model);
43            }
44        }
45        Err(e) => {
46            println!("\n⚠️  获取模型列表失败: {}", e);
47            println!("   这可能是由于:");
48            println!("   • 未设置GROQ_API_KEY环境变量");
49            println!("   • 代理服务器配置错误");
50            println!("   • 网络连接问题");
51        }
52    }
53    
54    println!("\n💡 代理配置说明:");
55    println!("   • 设置环境变量: AI_PROXY_URL=http://your-proxy:port");
56    println!("   • 支持HTTP和HTTPS代理");
57    println!("   • 支持带认证的代理: http://user:pass@proxy:port");
58    println!("   • 所有AI提供商都会自动使用此代理配置");
59    
60    Ok(())
61}
examples/openai_test.rs (line 24)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🤖 OpenAI Provider 测试");
6    println!("=====================");
7    
8    // 检查API密钥
9    match std::env::var("OPENAI_API_KEY") {
10        Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
11        Err(_) => {
12            println!("❌ 未设置 OPENAI_API_KEY 环境变量");
13            println!("   请设置: export OPENAI_API_KEY=your_api_key");
14            return Ok(());
15        }
16    }
17    
18    // 创建OpenAI客户端
19    let client = AiClient::new(Provider::OpenAI)?;
20    println!("✅ OpenAI客户端创建成功");
21    
22    // 获取模型列表
23    println!("\n📋 获取OpenAI模型列表...");
24    match client.list_models().await {
25        Ok(models) => {
26            println!("✅ 成功获取 {} 个模型", models.len());
27            println!("   常用模型:");
28            for model in models.iter().filter(|m| m.contains("gpt")) {
29                println!("   • {}", model);
30            }
31        }
32        Err(e) => println!("❌ 获取模型列表失败: {}", e),
33    }
34    
35    // 测试聊天完成
36    println!("\n💬 测试聊天完成...");
37    let request = ChatCompletionRequest::new(
38        "gpt-3.5-turbo".to_string(),
39        vec![Message {
40            role: Role::User,
41            content: "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection.".to_string(),
42        }],
43    ).with_max_tokens(20)
44     .with_temperature(0.7);
45    
46    match client.chat_completion(request).await {
47        Ok(response) => {
48            println!("✅ 聊天完成成功!");
49            println!("   模型: {}", response.model);
50            println!("   响应: {}", response.choices[0].message.content);
51            println!("   Token使用: {} (prompt: {}, completion: {})", 
52                response.usage.total_tokens,
53                response.usage.prompt_tokens,
54                response.usage.completion_tokens
55            );
56        }
57        Err(e) => println!("❌ 聊天完成失败: {}", e),
58    }
59    
60    println!("\n🎯 OpenAI配置驱动测试完成!");
61    println!("   这证明了配置驱动架构的强大之处:");
62    println!("   • 无需编写OpenAI特定代码");
63    println!("   • 只需在ProviderConfigs中添加配置");
64    println!("   • 自动支持所有OpenAI兼容的功能");
65    
66    Ok(())
67}
Source

pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>

切换AI模型提供商

§Arguments
  • provider - 新的提供商
§Returns
  • Result<(), AiLibError> - 成功时返回(),失败时返回错误
§Example
use ai_lib::{AiClient, Provider};

let mut client = AiClient::new(Provider::Groq)?;
// 从Groq切换到Groq(演示切换功能)
client.switch_provider(Provider::Groq)?;
Source

pub fn current_provider(&self) -> Provider

获取当前使用的提供商

Examples found in repository?
examples/basic_usage.rs (line 10)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 AI-lib Basic Usage Example");
6    println!("================================");
7    
8    // 切换模型提供商,只需更改 Provider 的值
9    let client = AiClient::new(Provider::Groq)?;
10    println!("✅ Created client with provider: {:?}", client.current_provider());
11    
12    // 获取支持的模型列表
13    let models = client.list_models().await?;
14    println!("📋 Available models: {:?}", models);
15    
16    // 创建聊天请求
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Hello! Please introduce yourself briefly.".to_string(),
22        }],
23    ).with_temperature(0.7)
24     .with_max_tokens(100);
25    
26    println!("📤 Sending request to model: {}", request.model);
27    
28    // 发送请求
29    let response = client.chat_completion(request).await?;
30    
31    println!("📥 Received response:");
32    println!("   ID: {}", response.id);
33    println!("   Model: {}", response.model);
34    println!("   Content: {}", response.choices[0].message.content);
35    println!("   Usage: {} tokens", response.usage.total_tokens);
36    
37    Ok(())
38}
More examples
Hide additional examples
examples/config_driven_example.rs (line 20)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🚀 配置驱动的AI-lib示例");
6    println!("========================");
7    
8    // 演示配置驱动的优势:轻松切换提供商
9    let providers = vec![
10        (Provider::Groq, "Groq"),
11        (Provider::OpenAI, "OpenAI"), 
12        (Provider::DeepSeek, "DeepSeek"),
13    ];
14    
15    for (provider, name) in providers {
16        println!("\n📡 测试提供商: {}", name);
17        
18        // 创建客户端 - 只需改变枚举值
19        let client = AiClient::new(provider)?;
20        println!("✅ 客户端创建成功: {:?}", client.current_provider());
21        
22        // 获取模型列表
23        match client.list_models().await {
24            Ok(models) => println!("📋 可用模型: {:?}", models),
25            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
26        }
27        
28        // 创建测试请求
29        let request = ChatCompletionRequest::new(
30            "test-model".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: "Hello from ai-lib!".to_string(),
34            }],
35        );
36        
37        println!("📤 请求已准备,模型: {}", request.model);
38        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
39    }
40    
41    println!("\n🎯 配置驱动的核心优势:");
42    println!("   • 零代码切换: 只需改变Provider枚举值");
43    println!("   • 统一接口: 所有提供商使用相同的API");
44    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
45    
46    Ok(())
47}
examples/proxy_example.rs (line 22)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("🌐 AI-lib 代理服务器支持示例");
6    println!("============================");
7    
8    // 检查代理配置
9    match std::env::var("AI_PROXY_URL") {
10        Ok(proxy_url) => {
11            println!("✅ 检测到代理配置: {}", proxy_url);
12            println!("   所有HTTP请求将通过此代理服务器");
13        }
14        Err(_) => {
15            println!("ℹ️  未设置AI_PROXY_URL环境变量");
16            println!("   如需使用代理,请设置: export AI_PROXY_URL=http://proxy.example.com:8080");
17        }
18    }
19    
20    println!("\n🚀 创建AI客户端...");
21    let client = AiClient::new(Provider::Groq)?;
22    println!("✅ 客户端创建成功,提供商: {:?}", client.current_provider());
23    
24    // 创建测试请求
25    let request = ChatCompletionRequest::new(
26        "llama3-8b-8192".to_string(),
27        vec![Message {
28            role: Role::User,
29            content: "Hello! This request may go through a proxy.".to_string(),
30        }],
31    );
32    
33    println!("\n📤 准备发送请求...");
34    println!("   模型: {}", request.model);
35    println!("   消息: {}", request.messages[0].content);
36    
37    // 获取模型列表(这个请求也会通过代理)
38    match client.list_models().await {
39        Ok(models) => {
40            println!("\n📋 通过代理获取到的模型列表:");
41            for model in models {
42                println!("   • {}", model);
43            }
44        }
45        Err(e) => {
46            println!("\n⚠️  获取模型列表失败: {}", e);
47            println!("   这可能是由于:");
48            println!("   • 未设置GROQ_API_KEY环境变量");
49            println!("   • 代理服务器配置错误");
50            println!("   • 网络连接问题");
51        }
52    }
53    
54    println!("\n💡 代理配置说明:");
55    println!("   • 设置环境变量: AI_PROXY_URL=http://your-proxy:port");
56    println!("   • 支持HTTP和HTTPS代理");
57    println!("   • 支持带认证的代理: http://user:pass@proxy:port");
58    println!("   • 所有AI提供商都会自动使用此代理配置");
59    
60    Ok(())
61}

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,