AiClient

Struct AiClient 

Source
pub struct AiClient { /* private fields */ }
Expand description

统一AI客户端

使用示例:

use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // 切换模型提供商,只需更改 Provider 的值
    let client = AiClient::new(Provider::Groq)?;
     
    let request = ChatCompletionRequest::new(
        "test-model".to_string(),
        vec![Message {
            role: Role::User,
            content: ai_lib::types::common::Content::Text("Hello".to_string()),
            function_call: None,
        }],
    );
     
    // 注意:这里需要设置GROQ_API_KEY环境变量才能实际调用API
    // 可选:设置AI_PROXY_URL环境变量使用代理服务器
    // let response = client.chat_completion(request).await?;
     
    println!("Client created successfully with provider: {:?}", client.current_provider());
    println!("Request prepared for model: {}", request.model);
     
    Ok(())
}

§代理服务器配置

通过设置 AI_PROXY_URL 环境变量来配置代理服务器:

export AI_PROXY_URL=http://proxy.example.com:8080

支持的代理格式:

  • HTTP代理: http://proxy.example.com:8080
  • HTTPS代理: https://proxy.example.com:8080
  • 带认证: http://user:pass@proxy.example.com:8080

Implementations§

Source§

impl AiClient

Source

pub fn new(provider: Provider) -> Result<Self, AiLibError>

创建新的AI客户端

§Arguments
  • provider - 选择要使用的AI模型提供商
§Returns
  • Result<Self, AiLibError> - 成功时返回客户端实例,失败时返回错误
§Example
use ai_lib::{AiClient, Provider};

let client = AiClient::new(Provider::Groq)?;
Examples found in repository?
examples/multimodal_example.rs (line 8)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("Multimodal example: image + audio content in a message");
7
8    let _client = AiClient::new(Provider::Groq)?;
9
10    let request = ChatCompletionRequest::new(
11        "multimodal-model".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::new_image(
15                Some("https://example.com/dog.jpg".into()),
16                Some("image/jpeg".into()),
17                Some("dog.jpg".into()),
18            ),
19            function_call: None,
20        }],
21    );
22
23    println!(
24        "Prepared multimodal request; image URL: {}",
25        request.messages[0].content.as_text()
26    );
27
28    // Note: this example demonstrates the type usage only and does not call the API.
29    Ok(())
30}
More examples
Hide additional examples
examples/list_models_smoke.rs (line 24)
4async fn main() {
5    let providers = vec![
6        Provider::Groq,
7        Provider::XaiGrok,
8        Provider::Ollama,
9        Provider::DeepSeek,
10        Provider::Anthropic,
11        Provider::AzureOpenAI,
12        Provider::HuggingFace,
13        Provider::TogetherAI,
14        Provider::Qwen,
15        Provider::OpenAI,
16        Provider::Gemini,
17        Provider::Mistral,
18        Provider::Cohere,
19        // Provider::Bedrock, // 已移除:Bedrock 暂缓实现/不在公开 API 中
20    ];
21
22    for p in providers {
23        println!("--- Provider: {:?} ---", p);
24        match AiClient::new(p) {
25            Ok(client) => match client.list_models().await {
26                Ok(models) => {
27                    println!("Found {} models (showing up to 5):", models.len());
28                    for m in models.into_iter().take(5) {
29                        println!(" - {}", m);
30                    }
31                }
32                Err(e) => println!("list_models error: {:?}", e),
33            },
34            Err(e) => println!("client init error: {:?}", e),
35        }
36    }
37}
examples/cohere_stream.rs (line 8)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
examples/basic_usage.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
examples/config_driven_example.rs (line 20)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 配置驱动的AI-lib示例");
7    println!("========================");
8
9    // 演示配置驱动的优势:轻松切换提供商
10    let providers = vec![
11        (Provider::Groq, "Groq"),
12        (Provider::OpenAI, "OpenAI"),
13        (Provider::DeepSeek, "DeepSeek"),
14    ];
15
16    for (provider, name) in providers {
17        println!("\n📡 测试提供商: {}", name);
18
19        // 创建客户端 - 只需改变枚举值
20        let client = AiClient::new(provider)?;
21        println!("✅ 客户端创建成功: {:?}", client.current_provider());
22
23        // 获取模型列表
24        match client.list_models().await {
25            Ok(models) => println!("📋 可用模型: {:?}", models),
26            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
27        }
28
29        // 创建测试请求
30        let request = ChatCompletionRequest::new(
31            "test-model".to_string(),
32            vec![Message {
33                role: Role::User,
34                content: Content::Text("Hello from ai-lib!".to_string()),
35                function_call: None,
36            }],
37        );
38
39        println!("📤 请求已准备,模型: {}", request.model);
40        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
41    }
42
43    println!("\n🎯 配置驱动的核心优势:");
44    println!("   • 零代码切换: 只需改变Provider枚举值");
45    println!("   • 统一接口: 所有提供商使用相同的API");
46    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
47
48    Ok(())
49}
examples/debug_request.rs (line 32)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🔍 调试请求格式");
7    println!("===============");
8
9    // 创建测试请求
10    let request = ChatCompletionRequest::new(
11        "gpt-3.5-turbo".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Hello!".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_max_tokens(10);
19
20    println!("📤 原始请求:");
21    println!("   模型: {}", request.model);
22    println!("   消息数量: {}", request.messages.len());
23    println!(
24        "   消息[0]: {:?} - {}",
25        request.messages[0].role,
26        request.messages[0].content.as_text()
27    );
28    println!("   max_tokens: {:?}", request.max_tokens);
29
30    // 测试OpenAI
31    println!("\n🤖 测试OpenAI...");
32    match AiClient::new(Provider::OpenAI) {
33        Ok(client) => {
34            match client.chat_completion(request.clone()).await {
35                Ok(response) => {
36                    println!("✅ 成功!");
37                    println!("   响应: {}", response.choices[0].message.content.as_text());
38                }
39                Err(e) => {
40                    println!("❌ 失败: {}", e);
41
42                    // 如果是400错误,说明请求格式有问题
43                    if e.to_string().contains("400") {
44                        println!("   这通常表示请求格式不正确");
45                        println!("   让我们检查请求是否包含必要字段...");
46                    }
47                }
48            }
49        }
50        Err(e) => println!("❌ 客户端创建失败: {}", e),
51    }
52
53    Ok(())
54}
Source

pub fn new_with_metrics( provider: Provider, metrics: Arc<dyn Metrics>, ) -> Result<Self, AiLibError>

Create AiClient with injected metrics implementation

Source

pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self

Set metrics implementation on client

Source

pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>

发送聊天完成请求

§Arguments
  • request - 聊天完成请求
§Returns
  • Result<ChatCompletionResponse, AiLibError> - 成功时返回响应,失败时返回错误
Examples found in repository?
examples/basic_usage.rs (line 35)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
More examples
Hide additional examples
examples/debug_request.rs (line 34)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🔍 调试请求格式");
7    println!("===============");
8
9    // 创建测试请求
10    let request = ChatCompletionRequest::new(
11        "gpt-3.5-turbo".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Hello!".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_max_tokens(10);
19
20    println!("📤 原始请求:");
21    println!("   模型: {}", request.model);
22    println!("   消息数量: {}", request.messages.len());
23    println!(
24        "   消息[0]: {:?} - {}",
25        request.messages[0].role,
26        request.messages[0].content.as_text()
27    );
28    println!("   max_tokens: {:?}", request.max_tokens);
29
30    // 测试OpenAI
31    println!("\n🤖 测试OpenAI...");
32    match AiClient::new(Provider::OpenAI) {
33        Ok(client) => {
34            match client.chat_completion(request.clone()).await {
35                Ok(response) => {
36                    println!("✅ 成功!");
37                    println!("   响应: {}", response.choices[0].message.content.as_text());
38                }
39                Err(e) => {
40                    println!("❌ 失败: {}", e);
41
42                    // 如果是400错误,说明请求格式有问题
43                    if e.to_string().contains("400") {
44                        println!("   这通常表示请求格式不正确");
45                        println!("   让我们检查请求是否包含必要字段...");
46                    }
47                }
48            }
49        }
50        Err(e) => println!("❌ 客户端创建失败: {}", e),
51    }
52
53    Ok(())
54}
examples/test_without_proxy.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 测试不使用代理的连接");
7    println!("======================");
8
9    // 临时移除代理设置
10    std::env::remove_var("AI_PROXY_URL");
11
12    println!("ℹ️  已临时移除AI_PROXY_URL设置");
13
14    // 测试DeepSeek(国内可直连)
15    println!("\n🔍 测试DeepSeek (直连):");
16    match AiClient::new(Provider::DeepSeek) {
17        Ok(client) => {
18            let request = ChatCompletionRequest::new(
19                "deepseek-chat".to_string(),
20                vec![Message {
21                    role: Role::User,
22                    content: Content::Text(
23                        "Hello! Please respond with just 'Hi' to test.".to_string(),
24                    ),
25                    function_call: None,
26                }],
27            )
28            .with_max_tokens(5);
29
30            match client.chat_completion(request).await {
31                Ok(response) => {
32                    println!("✅ DeepSeek 直连成功!");
33                    println!("   响应: {}", response.choices[0].message.content.as_text());
34                    println!("   Token使用: {}", response.usage.total_tokens);
35                }
36                Err(e) => {
37                    println!("❌ DeepSeek 请求失败: {}", e);
38                    if e.to_string().contains("402") {
39                        println!("   (这是余额不足错误,说明连接正常)");
40                    }
41                }
42            }
43        }
44        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
45    }
46
47    println!("\n💡 结论:");
48    println!("   • DeepSeek可以直连,不需要代理");
49    println!("   • OpenAI和Groq需要通过代理访问");
50    println!("   • 代理可能会修改请求内容,导致格式错误");
51    println!("   • 建议检查代理服务器的配置");
52
53    Ok(())
54}
examples/openai_test.rs (line 52)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🤖 OpenAI Provider 测试");
7    println!("=====================");
8
9    // 检查API密钥
10    match std::env::var("OPENAI_API_KEY") {
11        Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
12        Err(_) => {
13            println!("❌ 未设置 OPENAI_API_KEY 环境变量");
14            println!("   请设置: export OPENAI_API_KEY=your_api_key");
15            return Ok(());
16        }
17    }
18
19    // 创建OpenAI客户端
20    let client = AiClient::new(Provider::OpenAI)?;
21    println!("✅ OpenAI客户端创建成功");
22
23    // 获取模型列表
24    println!("\n📋 获取OpenAI模型列表...");
25    match client.list_models().await {
26        Ok(models) => {
27            println!("✅ 成功获取 {} 个模型", models.len());
28            println!("   常用模型:");
29            for model in models.iter().filter(|m| m.contains("gpt")) {
30                println!("   • {}", model);
31            }
32        }
33        Err(e) => println!("❌ 获取模型列表失败: {}", e),
34    }
35
36    // 测试聊天完成
37    println!("\n💬 测试聊天完成...");
38    let request = ChatCompletionRequest::new(
39        "gpt-3.5-turbo".to_string(),
40        vec![Message {
41            role: Role::User,
42            content: Content::Text(
43                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
44                    .to_string(),
45            ),
46            function_call: None,
47        }],
48    )
49    .with_max_tokens(20)
50    .with_temperature(0.7);
51
52    match client.chat_completion(request).await {
53        Ok(response) => {
54            println!("✅ 聊天完成成功!");
55            println!("   模型: {}", response.model);
56            println!("   响应: {}", response.choices[0].message.content.as_text());
57            println!(
58                "   Token使用: {} (prompt: {}, completion: {})",
59                response.usage.total_tokens,
60                response.usage.prompt_tokens,
61                response.usage.completion_tokens
62            );
63        }
64        Err(e) => println!("❌ 聊天完成失败: {}", e),
65    }
66
67    println!("\n🎯 OpenAI配置驱动测试完成!");
68    println!("   这证明了配置驱动架构的强大之处:");
69    println!("   • 无需编写OpenAI特定代码");
70    println!("   • 只需在ProviderConfigs中添加配置");
71    println!("   • 自动支持所有OpenAI兼容的功能");
72
73    Ok(())
74}
examples/test_all_providers.rs (line 53)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🧪 测试所有AI提供商");
7    println!("==================");
8
9    // 检查代理配置
10    if let Ok(proxy_url) = std::env::var("AI_PROXY_URL") {
11        println!("🌐 使用代理: {}", proxy_url);
12    }
13
14    let providers = vec![
15        (Provider::Groq, "Groq", "llama3-8b-8192"),
16        (Provider::OpenAI, "OpenAI", "gpt-3.5-turbo"),
17        (Provider::DeepSeek, "DeepSeek", "deepseek-chat"),
18    ];
19
20    for (provider, name, model) in providers {
21        println!("\n🔍 测试提供商: {}", name);
22        println!("{}", "─".repeat(30));
23
24        match AiClient::new(provider) {
25            Ok(client) => {
26                println!("✅ 客户端创建成功");
27
28                // 测试模型列表
29                match client.list_models().await {
30                    Ok(models) => {
31                        println!("📋 可用模型数量: {}", models.len());
32                        if !models.is_empty() {
33                            println!("   前3个模型: {:?}", &models[..models.len().min(3)]);
34                        }
35                    }
36                    Err(e) => println!("⚠️  获取模型列表失败: {}", e),
37                }
38
39                // 测试聊天完成
40                let request = ChatCompletionRequest::new(
41                    model.to_string(),
42                    vec![Message {
43                        role: Role::User,
44                        content: Content::Text(
45                            "Hello! Please respond with just 'Hi' to test the API.".to_string(),
46                        ),
47                        function_call: None,
48                    }],
49                )
50                .with_max_tokens(10);
51
52                println!("📤 发送测试请求到模型: {}", model);
53                match client.chat_completion(request).await {
54                    Ok(response) => {
55                        println!("✅ 请求成功!");
56                        println!("   响应ID: {}", response.id);
57                        println!("   内容: {}", response.choices[0].message.content.as_text());
58                        println!("   使用tokens: {}", response.usage.total_tokens);
59                    }
60                    Err(e) => println!("❌ 请求失败: {}", e),
61                }
62            }
63            Err(e) => {
64                println!("❌ 客户端创建失败: {}", e);
65            }
66        }
67    }
68
69    println!("\n💡 提示:");
70    println!("   • 确保设置了对应的API密钥环境变量");
71    println!("   • GROQ_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY");
72    println!("   • 可选设置AI_PROXY_URL使用代理服务器");
73
74    Ok(())
75}
examples/function_call_openai.rs (line 40)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8    println!("🔧 OpenAI Function Calling example (ai-lib)");
9
10    // Ensure OPENAI_API_KEY is set in env before running
11    let client = AiClient::new(Provider::OpenAI)?;
12
13    // Build a simple user message
14    let user_msg = Message {
15        role: Role::User,
16        content: Content::Text("Please call the ascii_horse tool with size=3".to_string()),
17        function_call: None,
18    };
19
20    // Define a Tool (JSON Schema for parameters)
21    let ascii_horse_tool = Tool {
22        name: "ascii_horse".to_string(),
23        description: Some("Draws an ASCII horse of given size".to_string()),
24        parameters: Some(json!({
25            "type": "object",
26            "properties": {
27                "size": { "type": "integer", "description": "Size of the horse" }
28            },
29            "required": ["size"]
30        })),
31    };
32
33    let mut req = ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![user_msg]);
34    req.functions = Some(vec![ascii_horse_tool]);
35    req.function_call = Some(FunctionCallPolicy::Auto("auto".to_string()));
36    req = req.with_max_tokens(200).with_temperature(0.0);
37
38    println!("📤 Sending request to OpenAI (model={})", req.model);
39
40    let resp = client.chat_completion(req).await?;
41
42    // Handle a possible function call from the model: execute locally and send the result back
43    for choice in resp.choices {
44        let msg = choice.message;
45        if let Some(fc) = msg.function_call {
46            println!("🛠️  Model invoked function: {}", fc.name);
47            let args = fc.arguments.unwrap_or(serde_json::json!(null));
48            println!("   arguments: {}", args);
49
50            // Simple local tool: ascii_horse
51            if fc.name == "ascii_horse" {
52                // Parse size param
53                let size = args.get("size").and_then(|v| v.as_i64()).unwrap_or(3) as usize;
54                let horse = generate_ascii_horse(size);
55                println!("⚙️ Executed ascii_horse locally, output:\n{}", horse);
56
57                // Send follow-up message with tool result as assistant message
58                let tool_msg = Message {
59                    role: Role::Assistant,
60                    content: Content::Text(horse.clone()),
61                    function_call: None,
62                };
63
64                let mut followup =
65                    ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![tool_msg]);
66                followup = followup.with_max_tokens(200).with_temperature(0.0);
67                let follow_resp = client.chat_completion(followup).await?;
68                for fc_choice in follow_resp.choices {
69                    println!(
70                        "🗨️ Final model response: {}",
71                        fc_choice.message.content.as_text()
72                    );
73                }
74            }
75        } else {
76            println!("💬 Model message: {}", msg.content.as_text());
77        }
78    }
79
80    Ok(())
81}
Source

pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>

流式聊天完成请求

§Arguments
  • request - 聊天完成请求
§Returns
  • Result<impl Stream<Item = Result<ChatCompletionChunk, AiLibError>>, AiLibError> - 成功时返回流式响应
Examples found in repository?
examples/cohere_stream.rs (line 28)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
More examples
Hide additional examples
examples/test_streaming.rs (line 39)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 流式响应测试");
8    println!("================");
9
10    // 检查Groq API密钥
11    if std::env::var("GROQ_API_KEY").is_err() {
12        println!("❌ 未设置GROQ_API_KEY");
13        return Ok(());
14    }
15
16    // 创建Groq客户端
17    let client = AiClient::new(Provider::Groq)?;
18    println!("✅ Groq客户端创建成功");
19
20    // 创建流式请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text(
26                "Please write a short poem about AI in exactly 4 lines.".to_string(),
27            ),
28            function_call: None,
29        }],
30    )
31    .with_max_tokens(100)
32    .with_temperature(0.7);
33
34    println!("\n📤 发送流式请求...");
35    println!("   模型: {}", request.model);
36    println!("   消息: {}", request.messages[0].content.as_text());
37
38    // 获取流式响应
39    match client.chat_completion_stream(request).await {
40        Ok(mut stream) => {
41            println!("\n🌊 开始接收流式响应:");
42            println!("{}", "─".repeat(50));
43
44            let mut full_content = String::new();
45            let mut chunk_count = 0;
46
47            while let Some(result) = stream.next().await {
48                match result {
49                    Ok(chunk) => {
50                        chunk_count += 1;
51
52                        if let Some(choice) = chunk.choices.first() {
53                            if let Some(content) = &choice.delta.content {
54                                print!("{}", content);
55                                full_content.push_str(content);
56
57                                // 刷新输出
58                                use std::io::{self, Write};
59                                io::stdout().flush().unwrap();
60                            }
61
62                            // 检查是否完成
63                            if choice.finish_reason.is_some() {
64                                println!("\n{}", "─".repeat(50));
65                                println!("✅ 流式响应完成!");
66                                println!("   完成原因: {:?}", choice.finish_reason);
67                                break;
68                            }
69                        }
70                    }
71                    Err(e) => {
72                        println!("\n❌ 流式响应错误: {}", e);
73                        break;
74                    }
75                }
76            }
77
78            println!("\n📊 流式响应统计:");
79            println!("   数据块数量: {}", chunk_count);
80            println!("   总内容长度: {} 字符", full_content.len());
81            println!("   完整内容: \"{}\"", full_content.trim());
82        }
83        Err(e) => {
84            println!("❌ 流式请求失败: {}", e);
85        }
86    }
87
88    println!("\n💡 流式响应的优势:");
89    println!("   • 实时显示生成内容");
90    println!("   • 更好的用户体验");
91    println!("   • 可以提前停止生成");
92    println!("   • 适合长文本生成");
93
94    Ok(())
95}
examples/test_groq_generic.rs (line 48)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔄 测试配置驱动的Groq");
8    println!("====================");
9
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14
15    let client = AiClient::new(Provider::Groq)?;
16    println!("✅ Groq客户端创建成功 (使用GenericAdapter)");
17
18    // 测试普通聊天
19    let request = ChatCompletionRequest::new(
20        "llama3-8b-8192".to_string(),
21        vec![Message {
22            role: Role::User,
23            content: Content::Text(
24                "Say 'Hello from Generic Groq!' in exactly those words.".to_string(),
25            ),
26            function_call: None,
27        }],
28    )
29    .with_max_tokens(20);
30
31    println!("\n💬 测试普通聊天...");
32    match client.chat_completion(request.clone()).await {
33        Ok(response) => {
34            println!("✅ 普通聊天成功!");
35            println!(
36                "   响应: '{}'",
37                response.choices[0].message.content.as_text()
38            );
39            println!("   Token使用: {}", response.usage.total_tokens);
40        }
41        Err(e) => {
42            println!("❌ 普通聊天失败: {}", e);
43        }
44    }
45
46    // 测试流式聊天
47    println!("\n🌊 测试流式聊天...");
48    match client.chat_completion_stream(request).await {
49        Ok(mut stream) => {
50            print!("   流式响应: ");
51            let mut content = String::new();
52
53            while let Some(result) = stream.next().await {
54                match result {
55                    Ok(chunk) => {
56                        if let Some(choice) = chunk.choices.first() {
57                            if let Some(text) = &choice.delta.content {
58                                print!("{}", text);
59                                content.push_str(text);
60                                use std::io::{self, Write};
61                                io::stdout().flush().unwrap();
62                            }
63                            if choice.finish_reason.is_some() {
64                                println!();
65                                break;
66                            }
67                        }
68                    }
69                    Err(e) => {
70                        println!("\n❌ 流式错误: {}", e);
71                        break;
72                    }
73                }
74            }
75
76            if !content.is_empty() {
77                println!("✅ 流式聊天成功!");
78                println!("   完整内容: '{}'", content.trim());
79            }
80        }
81        Err(e) => {
82            println!("❌ 流式聊天失败: {}", e);
83        }
84    }
85
86    // 测试模型列表
87    println!("\n📋 测试模型列表...");
88    match client.list_models().await {
89        Ok(models) => {
90            println!("✅ 模型列表获取成功!");
91            println!("   可用模型: {:?}", models);
92        }
93        Err(e) => {
94            println!("❌ 模型列表获取失败: {}", e);
95        }
96    }
97
98    println!("\n🎯 配置驱动Groq测试结果:");
99    println!("   • 使用GenericAdapter而不是GroqAdapter");
100    println!("   • 代码量从250行减少到10行配置");
101    println!("   • 功能完全相同:普通聊天、流式聊天、模型列表");
102    println!("   • 证明了OpenAI兼容性和通用适配器的有效性");
103
104    Ok(())
105}
examples/test_streaming_clean.rs (line 34)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 清洁版流式响应测试");
8    println!("======================");
9
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14
15    let client = AiClient::new(Provider::Groq)?;
16    println!("✅ Groq客户端创建成功");
17
18    let request = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text("Write a haiku about programming.".to_string()),
23            function_call: None,
24        }],
25    )
26    .with_max_tokens(50)
27    .with_temperature(0.8);
28
29    println!(
30        "\n📤 发送流式请求: {}",
31        request.messages[0].content.as_text()
32    );
33
34    match client.chat_completion_stream(request).await {
35        Ok(mut stream) => {
36            println!("\n🎭 AI回复:");
37            print!("   ");
38
39            let mut content_parts: Vec<String> = Vec::new();
40
41            while let Some(result) = stream.next().await {
42                match result {
43                    Ok(chunk) => {
44                        if let Some(choice) = chunk.choices.first() {
45                            if let Some(content) = &choice.delta.content {
46                                // try to parse JSON-shaped content first
47                                if content.contains("\"content\":") {
48                                    if let Ok(json) =
49                                        serde_json::from_str::<serde_json::Value>(content)
50                                    {
51                                        if let Some(text) = json["content"].as_str() {
52                                            if !text.is_empty() {
53                                                print!("{}", text);
54                                                content_parts.push(text.to_string());
55                                                use std::io::{self, Write};
56                                                io::stdout().flush().unwrap();
57                                            }
58                                        }
59                                    }
60                                } else if !content.trim().is_empty() && !content.contains("data:") {
61                                    // direct plain text
62                                    print!("{}", content);
63                                    content_parts.push(content.clone());
64                                    use std::io::{self, Write};
65                                    io::stdout().flush().unwrap();
66                                }
67                            }
68
69                            if choice.finish_reason.is_some() {
70                                println!("\n");
71                                break;
72                            }
73                        }
74                    }
75                    Err(e) => {
76                        println!("\n❌ 流式响应错误: {}", e);
77                        break;
78                    }
79                }
80            }
81
82            let full_content = content_parts.join("");
83            if !full_content.is_empty() {
84                println!("✅ 流式响应完成!");
85                println!("📝 完整内容: \"{}\"", full_content.trim());
86            } else {
87                println!("⚠️  未提取到有效内容,可能需要改进SSE解析");
88            }
89        }
90        Err(e) => {
91            println!("❌ 流式请求失败: {}", e);
92        }
93    }
94
95    Ok(())
96}
examples/test_streaming_improved.rs (line 34)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 改进的流式响应测试");
8    println!("======================");
9
10    if std::env::var("GROQ_API_KEY").is_err() {
11        println!("❌ 未设置GROQ_API_KEY");
12        return Ok(());
13    }
14
15    let client = AiClient::new(Provider::Groq)?;
16    println!("✅ Groq客户端创建成功");
17
18    let request = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text(
23                "Write a creative story about a robot learning to paint. Keep it under 100 words."
24                    .to_string(),
25            ),
26            function_call: None,
27        }],
28    )
29    .with_max_tokens(150)
30    .with_temperature(0.8);
31
32    println!("\n📤 发送流式请求...");
33
34    match client.chat_completion_stream(request).await {
35        Ok(mut stream) => {
36            println!("🎨 AI创作中:");
37            print!("   ");
38
39            let mut content = String::new();
40            let mut chunk_count = 0;
41
42            while let Some(result) = stream.next().await {
43                match result {
44                    Ok(chunk) => {
45                        chunk_count += 1;
46
47                        if let Some(choice) = chunk.choices.first() {
48                            if let Some(text) = &choice.delta.content {
49                                if !text.is_empty() {
50                                    print!("{}", text);
51                                    content.push_str(text);
52
53                                    use std::io::{self, Write};
54                                    io::stdout().flush().unwrap();
55                                }
56                            }
57
58                            if choice.finish_reason.is_some() {
59                                println!("\n");
60                                println!("✅ 创作完成! (原因: {:?})", choice.finish_reason);
61                                break;
62                            }
63                        }
64                    }
65                    Err(e) => {
66                        println!("\n❌ 流式错误: {}", e);
67                        break;
68                    }
69                }
70            }
71
72            println!("\n📊 统计信息:");
73            println!("   数据块: {}", chunk_count);
74            println!("   字符数: {}", content.len());
75            println!("   单词数: {}", content.split_whitespace().count());
76        }
77        Err(e) => {
78            println!("❌ 流式请求失败: {}", e);
79        }
80    }
81
82    // 测试DeepSeek流式响应
83    if std::env::var("DEEPSEEK_API_KEY").is_ok() {
84        println!("\n{}", "=".repeat(50));
85        println!("🧠 测试DeepSeek流式响应");
86
87        let deepseek_client = AiClient::new(Provider::DeepSeek)?;
88        let request = ChatCompletionRequest::new(
89            "deepseek-chat".to_string(),
90            vec![Message {
91                role: Role::User,
92                content: Content::Text("Explain quantum computing in one sentence.".to_string()),
93                function_call: None,
94            }],
95        )
96        .with_max_tokens(50);
97
98        match deepseek_client.chat_completion_stream(request).await {
99            Ok(mut stream) => {
100                println!("🔬 DeepSeek回复:");
101                print!("   ");
102
103                while let Some(result) = stream.next().await {
104                    match result {
105                        Ok(chunk) => {
106                            if let Some(choice) = chunk.choices.first() {
107                                if let Some(text) = &choice.delta.content {
108                                    print!("{}", text);
109                                    use std::io::{self, Write};
110                                    io::stdout().flush().unwrap();
111                                }
112                                if choice.finish_reason.is_some() {
113                                    println!("\n✅ DeepSeek流式响应成功!");
114                                    break;
115                                }
116                            }
117                        }
118                        Err(e) => {
119                            println!("\n❌ DeepSeek流式错误: {}", e);
120                            break;
121                        }
122                    }
123                }
124            }
125            Err(e) => {
126                println!("❌ DeepSeek流式请求失败: {}", e);
127            }
128        }
129    }
130
131    Ok(())
132}
Source

pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>

带取消控制的流式聊天完成请求

§Arguments
  • request - 聊天完成请求
§Returns
  • Result<(impl Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin, CancelHandle), AiLibError> - 成功时返回流式响应和取消句柄
Source

pub async fn chat_completion_batch( &self, requests: Vec<ChatCompletionRequest>, concurrency_limit: Option<usize>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>

批量聊天完成请求

§Arguments
  • requests - 聊天完成请求列表
  • concurrency_limit - 最大并发请求数(None表示无限制)
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - 返回所有请求的响应结果
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = AiClient::new(Provider::Groq)?;
     
    let requests = vec![
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("Hello".to_string()),
                function_call: None,
            }],
        ),
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("How are you?".to_string()),
                function_call: None,
            }],
        ),
    ];
     
    // 限制并发数为5
    let responses = client.chat_completion_batch(requests, Some(5)).await?;
     
    for (i, response) in responses.iter().enumerate() {
        match response {
            Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
            Err(e) => println!("Request {} failed: {}", i, e),
        }
    }
     
    Ok(())
}
Examples found in repository?
examples/batch_processing.rs (line 69)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Batch Processing Example");
7    println!("==================================");
8
9    // 创建客户端
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 准备多个请求
17    let requests = vec![
18        ChatCompletionRequest::new(
19            "llama3-8b-8192".to_string(),
20            vec![Message {
21                role: Role::User,
22                content: Content::Text("What is the capital of France?".to_string()),
23                function_call: None,
24            }],
25        )
26        .with_temperature(0.7)
27        .with_max_tokens(50),
28
29        ChatCompletionRequest::new(
30            "llama3-8b-8192".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: Content::Text("What is 2 + 2?".to_string()),
34                function_call: None,
35            }],
36        )
37        .with_temperature(0.1)
38        .with_max_tokens(20),
39
40        ChatCompletionRequest::new(
41            "llama3-8b-8192".to_string(),
42            vec![Message {
43                role: Role::User,
44                content: Content::Text("Tell me a short joke.".to_string()),
45                function_call: None,
46            }],
47        )
48        .with_temperature(0.9)
49        .with_max_tokens(100),
50
51        ChatCompletionRequest::new(
52            "llama3-8b-8192".to_string(),
53            vec![Message {
54                role: Role::User,
55                content: Content::Text("What is the largest planet in our solar system?".to_string()),
56                function_call: None,
57            }],
58        )
59        .with_temperature(0.5)
60        .with_max_tokens(60),
61    ];
62
63    println!("📤 Prepared {} requests for batch processing", requests.len());
64
65    // 方法1: 使用并发限制的批处理
66    println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
67    let start_time = std::time::Instant::now();
68    
69    let responses = client.chat_completion_batch(requests.clone(), Some(2)).await?;
70    
71    let duration = start_time.elapsed();
72    println!("⏱️  Batch processing completed in {:?}", duration);
73
74    // 处理响应
75    for (i, response) in responses.iter().enumerate() {
76        match response {
77            Ok(resp) => {
78                println!(
79                    "✅ Request {}: {}",
80                    i + 1,
81                    resp.choices[0].message.content.as_text()
82                );
83            }
84            Err(e) => {
85                println!("❌ Request {} failed: {}", i + 1, e);
86            }
87        }
88    }
89
90    // 方法2: 使用智能批处理(自动选择策略)
91    println!("\n🧠 Method 2: Smart batch processing");
92    let start_time = std::time::Instant::now();
93    
94    let responses = client.chat_completion_batch_smart(requests.clone()).await?;
95    
96    let duration = start_time.elapsed();
97    println!("⏱️  Smart batch processing completed in {:?}", duration);
98
99    // 统计成功和失败
100    let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
101    let failed: Vec<_> = responses.iter().enumerate().filter_map(|(i, r)| {
102        r.as_ref().err().map(|e| (i, e))
103    }).collect();
104
105    println!("📊 Results:");
106    println!("   ✅ Successful: {}/{}", successful.len(), responses.len());
107    println!("   ❌ Failed: {}/{}", failed.len(), responses.len());
108    println!("   📈 Success rate: {:.1}%", (successful.len() as f64 / responses.len() as f64) * 100.0);
109
110    // 方法3: 无限制并发批处理
111    println!("\n🚀 Method 3: Unlimited concurrent batch processing");
112    let start_time = std::time::Instant::now();
113    
114    let responses = client.chat_completion_batch(requests, None).await?;
115    
116    let duration = start_time.elapsed();
117    println!("⏱️  Unlimited concurrent processing completed in {:?}", duration);
118
119    // 显示所有响应
120    for (i, response) in responses.iter().enumerate() {
121        match response {
122            Ok(resp) => {
123                println!(
124                    "✅ Request {}: {}",
125                    i + 1,
126                    resp.choices[0].message.content.as_text()
127                );
128            }
129            Err(e) => {
130                println!("❌ Request {} failed: {}", i + 1, e);
131            }
132        }
133    }
134
135    println!("\n🎉 Batch processing example completed successfully!");
136    Ok(())
137}
Source

pub async fn chat_completion_batch_smart( &self, requests: Vec<ChatCompletionRequest>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>

智能批量处理:根据请求数量自动选择处理策略

§Arguments
  • requests - 聊天完成请求列表
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - 返回所有请求的响应结果
Examples found in repository?
examples/batch_processing.rs (line 94)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Batch Processing Example");
7    println!("==================================");
8
9    // 创建客户端
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 准备多个请求
17    let requests = vec![
18        ChatCompletionRequest::new(
19            "llama3-8b-8192".to_string(),
20            vec![Message {
21                role: Role::User,
22                content: Content::Text("What is the capital of France?".to_string()),
23                function_call: None,
24            }],
25        )
26        .with_temperature(0.7)
27        .with_max_tokens(50),
28
29        ChatCompletionRequest::new(
30            "llama3-8b-8192".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: Content::Text("What is 2 + 2?".to_string()),
34                function_call: None,
35            }],
36        )
37        .with_temperature(0.1)
38        .with_max_tokens(20),
39
40        ChatCompletionRequest::new(
41            "llama3-8b-8192".to_string(),
42            vec![Message {
43                role: Role::User,
44                content: Content::Text("Tell me a short joke.".to_string()),
45                function_call: None,
46            }],
47        )
48        .with_temperature(0.9)
49        .with_max_tokens(100),
50
51        ChatCompletionRequest::new(
52            "llama3-8b-8192".to_string(),
53            vec![Message {
54                role: Role::User,
55                content: Content::Text("What is the largest planet in our solar system?".to_string()),
56                function_call: None,
57            }],
58        )
59        .with_temperature(0.5)
60        .with_max_tokens(60),
61    ];
62
63    println!("📤 Prepared {} requests for batch processing", requests.len());
64
65    // 方法1: 使用并发限制的批处理
66    println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
67    let start_time = std::time::Instant::now();
68    
69    let responses = client.chat_completion_batch(requests.clone(), Some(2)).await?;
70    
71    let duration = start_time.elapsed();
72    println!("⏱️  Batch processing completed in {:?}", duration);
73
74    // 处理响应
75    for (i, response) in responses.iter().enumerate() {
76        match response {
77            Ok(resp) => {
78                println!(
79                    "✅ Request {}: {}",
80                    i + 1,
81                    resp.choices[0].message.content.as_text()
82                );
83            }
84            Err(e) => {
85                println!("❌ Request {} failed: {}", i + 1, e);
86            }
87        }
88    }
89
90    // 方法2: 使用智能批处理(自动选择策略)
91    println!("\n🧠 Method 2: Smart batch processing");
92    let start_time = std::time::Instant::now();
93    
94    let responses = client.chat_completion_batch_smart(requests.clone()).await?;
95    
96    let duration = start_time.elapsed();
97    println!("⏱️  Smart batch processing completed in {:?}", duration);
98
99    // 统计成功和失败
100    let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
101    let failed: Vec<_> = responses.iter().enumerate().filter_map(|(i, r)| {
102        r.as_ref().err().map(|e| (i, e))
103    }).collect();
104
105    println!("📊 Results:");
106    println!("   ✅ Successful: {}/{}", successful.len(), responses.len());
107    println!("   ❌ Failed: {}/{}", failed.len(), responses.len());
108    println!("   📈 Success rate: {:.1}%", (successful.len() as f64 / responses.len() as f64) * 100.0);
109
110    // 方法3: 无限制并发批处理
111    println!("\n🚀 Method 3: Unlimited concurrent batch processing");
112    let start_time = std::time::Instant::now();
113    
114    let responses = client.chat_completion_batch(requests, None).await?;
115    
116    let duration = start_time.elapsed();
117    println!("⏱️  Unlimited concurrent processing completed in {:?}", duration);
118
119    // 显示所有响应
120    for (i, response) in responses.iter().enumerate() {
121        match response {
122            Ok(resp) => {
123                println!(
124                    "✅ Request {}: {}",
125                    i + 1,
126                    resp.choices[0].message.content.as_text()
127                );
128            }
129            Err(e) => {
130                println!("❌ Request {} failed: {}", i + 1, e);
131            }
132        }
133    }
134
135    println!("\n🎉 Batch processing example completed successfully!");
136    Ok(())
137}
Source

pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>

批量聊天完成请求

§Arguments
  • requests - 聊天完成请求列表
  • concurrency_limit - 最大并发请求数(None表示无限制)
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - 返回所有请求的响应结果
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = AiClient::new(Provider::Groq)?;
     
    let requests = vec![
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("Hello".to_string()),
                function_call: None,
            }],
        ),
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("How are you?".to_string()),
                function_call: None,
            }],
        ),
    ];
     
    // 限制并发数为5
    let responses = client.chat_completion_batch(requests, Some(5)).await?;
     
    for (i, response) in responses.iter().enumerate() {
        match response {
            Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
            Err(e) => println!("Request {} failed: {}", i, e),
        }
    }
     
    Ok(())
}
获取支持的模型列表

* `Result<Vec<String>, AiLibError>` - 成功时返回模型列表,失败时返回错误
Examples found in repository?
examples/list_models_smoke.rs (line 25)
4async fn main() {
5    let providers = vec![
6        Provider::Groq,
7        Provider::XaiGrok,
8        Provider::Ollama,
9        Provider::DeepSeek,
10        Provider::Anthropic,
11        Provider::AzureOpenAI,
12        Provider::HuggingFace,
13        Provider::TogetherAI,
14        Provider::Qwen,
15        Provider::OpenAI,
16        Provider::Gemini,
17        Provider::Mistral,
18        Provider::Cohere,
19        // Provider::Bedrock, // 已移除:Bedrock 暂缓实现/不在公开 API 中
20    ];
21
22    for p in providers {
23        println!("--- Provider: {:?} ---", p);
24        match AiClient::new(p) {
25            Ok(client) => match client.list_models().await {
26                Ok(models) => {
27                    println!("Found {} models (showing up to 5):", models.len());
28                    for m in models.into_iter().take(5) {
29                        println!(" - {}", m);
30                    }
31                }
32                Err(e) => println!("list_models error: {:?}", e),
33            },
34            Err(e) => println!("client init error: {:?}", e),
35        }
36    }
37}
More examples
Hide additional examples
examples/cohere_stream.rs (line 22)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
examples/basic_usage.rs (line 17)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
examples/config_driven_example.rs (line 24)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 配置驱动的AI-lib示例");
7    println!("========================");
8
9    // 演示配置驱动的优势:轻松切换提供商
10    let providers = vec![
11        (Provider::Groq, "Groq"),
12        (Provider::OpenAI, "OpenAI"),
13        (Provider::DeepSeek, "DeepSeek"),
14    ];
15
16    for (provider, name) in providers {
17        println!("\n📡 测试提供商: {}", name);
18
19        // 创建客户端 - 只需改变枚举值
20        let client = AiClient::new(provider)?;
21        println!("✅ 客户端创建成功: {:?}", client.current_provider());
22
23        // 获取模型列表
24        match client.list_models().await {
25            Ok(models) => println!("📋 可用模型: {:?}", models),
26            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
27        }
28
29        // 创建测试请求
30        let request = ChatCompletionRequest::new(
31            "test-model".to_string(),
32            vec![Message {
33                role: Role::User,
34                content: Content::Text("Hello from ai-lib!".to_string()),
35                function_call: None,
36            }],
37        );
38
39        println!("📤 请求已准备,模型: {}", request.model);
40        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
41    }
42
43    println!("\n🎯 配置驱动的核心优势:");
44    println!("   • 零代码切换: 只需改变Provider枚举值");
45    println!("   • 统一接口: 所有提供商使用相同的API");
46    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
47
48    Ok(())
49}
examples/proxy_example.rs (line 40)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 AI-lib 代理服务器支持示例");
7    println!("============================");
8
9    // 检查代理配置
10    match std::env::var("AI_PROXY_URL") {
11        Ok(proxy_url) => {
12            println!("✅ 检测到代理配置: {}", proxy_url);
13            println!("   所有HTTP请求将通过此代理服务器");
14        }
15        Err(_) => {
16            println!("ℹ️  未设置AI_PROXY_URL环境变量");
17            println!("   如需使用代理,请设置: export AI_PROXY_URL=http://proxy.example.com:8080");
18        }
19    }
20
21    println!("\n🚀 创建AI客户端...");
22    let client = AiClient::new(Provider::Groq)?;
23    println!("✅ 客户端创建成功,提供商: {:?}", client.current_provider());
24
25    // 创建测试请求
26    let request = ChatCompletionRequest::new(
27        "llama3-8b-8192".to_string(),
28        vec![Message {
29            role: Role::User,
30            content: Content::Text("Hello! This request may go through a proxy.".to_string()),
31            function_call: None,
32        }],
33    );
34
35    println!("\n📤 准备发送请求...");
36    println!("   模型: {}", request.model);
37    println!("   消息: {}", request.messages[0].content.as_text());
38
39    // 获取模型列表(这个请求也会通过代理)
40    match client.list_models().await {
41        Ok(models) => {
42            println!("\n📋 通过代理获取到的模型列表:");
43            for model in models {
44                println!("   • {}", model);
45            }
46        }
47        Err(e) => {
48            println!("\n⚠️  获取模型列表失败: {}", e);
49            println!("   这可能是由于:");
50            println!("   • 未设置GROQ_API_KEY环境变量");
51            println!("   • 代理服务器配置错误");
52            println!("   • 网络连接问题");
53        }
54    }
55
56    println!("\n💡 代理配置说明:");
57    println!("   • 设置环境变量: AI_PROXY_URL=http://your-proxy:port");
58    println!("   • 支持HTTP和HTTPS代理");
59    println!("   • 支持带认证的代理: http://user:pass@proxy:port");
60    println!("   • 所有AI提供商都会自动使用此代理配置");
61
62    Ok(())
63}
examples/openai_test.rs (line 25)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🤖 OpenAI Provider 测试");
7    println!("=====================");
8
9    // 检查API密钥
10    match std::env::var("OPENAI_API_KEY") {
11        Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
12        Err(_) => {
13            println!("❌ 未设置 OPENAI_API_KEY 环境变量");
14            println!("   请设置: export OPENAI_API_KEY=your_api_key");
15            return Ok(());
16        }
17    }
18
19    // 创建OpenAI客户端
20    let client = AiClient::new(Provider::OpenAI)?;
21    println!("✅ OpenAI客户端创建成功");
22
23    // 获取模型列表
24    println!("\n📋 获取OpenAI模型列表...");
25    match client.list_models().await {
26        Ok(models) => {
27            println!("✅ 成功获取 {} 个模型", models.len());
28            println!("   常用模型:");
29            for model in models.iter().filter(|m| m.contains("gpt")) {
30                println!("   • {}", model);
31            }
32        }
33        Err(e) => println!("❌ 获取模型列表失败: {}", e),
34    }
35
36    // 测试聊天完成
37    println!("\n💬 测试聊天完成...");
38    let request = ChatCompletionRequest::new(
39        "gpt-3.5-turbo".to_string(),
40        vec![Message {
41            role: Role::User,
42            content: Content::Text(
43                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
44                    .to_string(),
45            ),
46            function_call: None,
47        }],
48    )
49    .with_max_tokens(20)
50    .with_temperature(0.7);
51
52    match client.chat_completion(request).await {
53        Ok(response) => {
54            println!("✅ 聊天完成成功!");
55            println!("   模型: {}", response.model);
56            println!("   响应: {}", response.choices[0].message.content.as_text());
57            println!(
58                "   Token使用: {} (prompt: {}, completion: {})",
59                response.usage.total_tokens,
60                response.usage.prompt_tokens,
61                response.usage.completion_tokens
62            );
63        }
64        Err(e) => println!("❌ 聊天完成失败: {}", e),
65    }
66
67    println!("\n🎯 OpenAI配置驱动测试完成!");
68    println!("   这证明了配置驱动架构的强大之处:");
69    println!("   • 无需编写OpenAI特定代码");
70    println!("   • 只需在ProviderConfigs中添加配置");
71    println!("   • 自动支持所有OpenAI兼容的功能");
72
73    Ok(())
74}
Source

pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>

切换AI模型提供商

§Arguments
  • provider - 新的提供商
§Returns
  • Result<(), AiLibError> - 成功时返回(),失败时返回错误
§Example
use ai_lib::{AiClient, Provider};

let mut client = AiClient::new(Provider::Groq)?;
// 从Groq切换到Groq(演示切换功能)
client.switch_provider(Provider::Groq)?;
Source

pub fn current_provider(&self) -> Provider

获取当前使用的提供商

Examples found in repository?
examples/basic_usage.rs (line 13)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
More examples
Hide additional examples
examples/config_driven_example.rs (line 21)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 配置驱动的AI-lib示例");
7    println!("========================");
8
9    // 演示配置驱动的优势:轻松切换提供商
10    let providers = vec![
11        (Provider::Groq, "Groq"),
12        (Provider::OpenAI, "OpenAI"),
13        (Provider::DeepSeek, "DeepSeek"),
14    ];
15
16    for (provider, name) in providers {
17        println!("\n📡 测试提供商: {}", name);
18
19        // 创建客户端 - 只需改变枚举值
20        let client = AiClient::new(provider)?;
21        println!("✅ 客户端创建成功: {:?}", client.current_provider());
22
23        // 获取模型列表
24        match client.list_models().await {
25            Ok(models) => println!("📋 可用模型: {:?}", models),
26            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
27        }
28
29        // 创建测试请求
30        let request = ChatCompletionRequest::new(
31            "test-model".to_string(),
32            vec![Message {
33                role: Role::User,
34                content: Content::Text("Hello from ai-lib!".to_string()),
35                function_call: None,
36            }],
37        );
38
39        println!("📤 请求已准备,模型: {}", request.model);
40        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
41    }
42
43    println!("\n🎯 配置驱动的核心优势:");
44    println!("   • 零代码切换: 只需改变Provider枚举值");
45    println!("   • 统一接口: 所有提供商使用相同的API");
46    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
47
48    Ok(())
49}
examples/proxy_example.rs (line 23)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 AI-lib 代理服务器支持示例");
7    println!("============================");
8
9    // 检查代理配置
10    match std::env::var("AI_PROXY_URL") {
11        Ok(proxy_url) => {
12            println!("✅ 检测到代理配置: {}", proxy_url);
13            println!("   所有HTTP请求将通过此代理服务器");
14        }
15        Err(_) => {
16            println!("ℹ️  未设置AI_PROXY_URL环境变量");
17            println!("   如需使用代理,请设置: export AI_PROXY_URL=http://proxy.example.com:8080");
18        }
19    }
20
21    println!("\n🚀 创建AI客户端...");
22    let client = AiClient::new(Provider::Groq)?;
23    println!("✅ 客户端创建成功,提供商: {:?}", client.current_provider());
24
25    // 创建测试请求
26    let request = ChatCompletionRequest::new(
27        "llama3-8b-8192".to_string(),
28        vec![Message {
29            role: Role::User,
30            content: Content::Text("Hello! This request may go through a proxy.".to_string()),
31            function_call: None,
32        }],
33    );
34
35    println!("\n📤 准备发送请求...");
36    println!("   模型: {}", request.model);
37    println!("   消息: {}", request.messages[0].content.as_text());
38
39    // 获取模型列表(这个请求也会通过代理)
40    match client.list_models().await {
41        Ok(models) => {
42            println!("\n📋 通过代理获取到的模型列表:");
43            for model in models {
44                println!("   • {}", model);
45            }
46        }
47        Err(e) => {
48            println!("\n⚠️  获取模型列表失败: {}", e);
49            println!("   这可能是由于:");
50            println!("   • 未设置GROQ_API_KEY环境变量");
51            println!("   • 代理服务器配置错误");
52            println!("   • 网络连接问题");
53        }
54    }
55
56    println!("\n💡 代理配置说明:");
57    println!("   • 设置环境变量: AI_PROXY_URL=http://your-proxy:port");
58    println!("   • 支持HTTP和HTTPS代理");
59    println!("   • 支持带认证的代理: http://user:pass@proxy:port");
60    println!("   • 所有AI提供商都会自动使用此代理配置");
61
62    Ok(())
63}
examples/batch_processing.rs (line 13)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Batch Processing Example");
7    println!("==================================");
8
9    // 创建客户端
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 准备多个请求
17    let requests = vec![
18        ChatCompletionRequest::new(
19            "llama3-8b-8192".to_string(),
20            vec![Message {
21                role: Role::User,
22                content: Content::Text("What is the capital of France?".to_string()),
23                function_call: None,
24            }],
25        )
26        .with_temperature(0.7)
27        .with_max_tokens(50),
28
29        ChatCompletionRequest::new(
30            "llama3-8b-8192".to_string(),
31            vec![Message {
32                role: Role::User,
33                content: Content::Text("What is 2 + 2?".to_string()),
34                function_call: None,
35            }],
36        )
37        .with_temperature(0.1)
38        .with_max_tokens(20),
39
40        ChatCompletionRequest::new(
41            "llama3-8b-8192".to_string(),
42            vec![Message {
43                role: Role::User,
44                content: Content::Text("Tell me a short joke.".to_string()),
45                function_call: None,
46            }],
47        )
48        .with_temperature(0.9)
49        .with_max_tokens(100),
50
51        ChatCompletionRequest::new(
52            "llama3-8b-8192".to_string(),
53            vec![Message {
54                role: Role::User,
55                content: Content::Text("What is the largest planet in our solar system?".to_string()),
56                function_call: None,
57            }],
58        )
59        .with_temperature(0.5)
60        .with_max_tokens(60),
61    ];
62
63    println!("📤 Prepared {} requests for batch processing", requests.len());
64
65    // 方法1: 使用并发限制的批处理
66    println!("\n🔄 Method 1: Batch processing with concurrency limit (2)");
67    let start_time = std::time::Instant::now();
68    
69    let responses = client.chat_completion_batch(requests.clone(), Some(2)).await?;
70    
71    let duration = start_time.elapsed();
72    println!("⏱️  Batch processing completed in {:?}", duration);
73
74    // 处理响应
75    for (i, response) in responses.iter().enumerate() {
76        match response {
77            Ok(resp) => {
78                println!(
79                    "✅ Request {}: {}",
80                    i + 1,
81                    resp.choices[0].message.content.as_text()
82                );
83            }
84            Err(e) => {
85                println!("❌ Request {} failed: {}", i + 1, e);
86            }
87        }
88    }
89
90    // 方法2: 使用智能批处理(自动选择策略)
91    println!("\n🧠 Method 2: Smart batch processing");
92    let start_time = std::time::Instant::now();
93    
94    let responses = client.chat_completion_batch_smart(requests.clone()).await?;
95    
96    let duration = start_time.elapsed();
97    println!("⏱️  Smart batch processing completed in {:?}", duration);
98
99    // 统计成功和失败
100    let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
101    let failed: Vec<_> = responses.iter().enumerate().filter_map(|(i, r)| {
102        r.as_ref().err().map(|e| (i, e))
103    }).collect();
104
105    println!("📊 Results:");
106    println!("   ✅ Successful: {}/{}", successful.len(), responses.len());
107    println!("   ❌ Failed: {}/{}", failed.len(), responses.len());
108    println!("   📈 Success rate: {:.1}%", (successful.len() as f64 / responses.len() as f64) * 100.0);
109
110    // 方法3: 无限制并发批处理
111    println!("\n🚀 Method 3: Unlimited concurrent batch processing");
112    let start_time = std::time::Instant::now();
113    
114    let responses = client.chat_completion_batch(requests, None).await?;
115    
116    let duration = start_time.elapsed();
117    println!("⏱️  Unlimited concurrent processing completed in {:?}", duration);
118
119    // 显示所有响应
120    for (i, response) in responses.iter().enumerate() {
121        match response {
122            Ok(resp) => {
123                println!(
124                    "✅ Request {}: {}",
125                    i + 1,
126                    resp.choices[0].message.content.as_text()
127                );
128            }
129            Err(e) => {
130                println!("❌ Request {} failed: {}", i + 1, e);
131            }
132        }
133    }
134
135    println!("\n🎉 Batch processing example completed successfully!");
136    Ok(())
137}

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,