ChatCompletionRequest

Struct ChatCompletionRequest 

Source
pub struct ChatCompletionRequest {
    pub model: String,
    pub messages: Vec<Message>,
    pub temperature: Option<f32>,
    pub max_tokens: Option<u32>,
    pub stream: Option<bool>,
    pub top_p: Option<f32>,
    pub frequency_penalty: Option<f32>,
    pub presence_penalty: Option<f32>,
    pub functions: Option<Vec<Tool>>,
    pub function_call: Option<FunctionCallPolicy>,
}

Fields§

§model: String§messages: Vec<Message>§temperature: Option<f32>§max_tokens: Option<u32>§stream: Option<bool>§top_p: Option<f32>§frequency_penalty: Option<f32>§presence_penalty: Option<f32>§functions: Option<Vec<Tool>>

Optional function/tool definitions for Function Calling

§function_call: Option<FunctionCallPolicy>

Function call policy: “auto”/“none”/specific name

Implementations§

Source§

impl ChatCompletionRequest

Source

pub fn new(model: String, messages: Vec<Message>) -> Self

Examples found in repository?
examples/multimodal_example.rs (lines 10-21)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("Multimodal example: image + audio content in a message");
7
8    let _client = AiClient::new(Provider::Groq)?;
9
10    let request = ChatCompletionRequest::new(
11        "multimodal-model".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::new_image(
15                Some("https://example.com/dog.jpg".into()),
16                Some("image/jpeg".into()),
17                Some("dog.jpg".into()),
18            ),
19            function_call: None,
20        }],
21    );
22
23    println!(
24        "Prepared multimodal request; image URL: {}",
25        request.messages[0].content.as_text()
26    );
27
28    // Note: this example demonstrates the type usage only and does not call the API.
29    Ok(())
30}
More examples
Hide additional examples
examples/cohere_stream.rs (lines 10-17)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
examples/basic_usage.rs (lines 21-28)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
examples/config_driven_example.rs (lines 30-37)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 配置驱动的AI-lib示例");
7    println!("========================");
8
9    // 演示配置驱动的优势:轻松切换提供商
10    let providers = vec![
11        (Provider::Groq, "Groq"),
12        (Provider::OpenAI, "OpenAI"),
13        (Provider::DeepSeek, "DeepSeek"),
14    ];
15
16    for (provider, name) in providers {
17        println!("\n📡 测试提供商: {}", name);
18
19        // 创建客户端 - 只需改变枚举值
20        let client = AiClient::new(provider)?;
21        println!("✅ 客户端创建成功: {:?}", client.current_provider());
22
23        // 获取模型列表
24        match client.list_models().await {
25            Ok(models) => println!("📋 可用模型: {:?}", models),
26            Err(e) => println!("⚠️  获取模型列表失败: {}", e),
27        }
28
29        // 创建测试请求
30        let request = ChatCompletionRequest::new(
31            "test-model".to_string(),
32            vec![Message {
33                role: Role::User,
34                content: Content::Text("Hello from ai-lib!".to_string()),
35                function_call: None,
36            }],
37        );
38
39        println!("📤 请求已准备,模型: {}", request.model);
40        println!("   (需要设置对应的API_KEY环境变量才能实际调用)");
41    }
42
43    println!("\n🎯 配置驱动的核心优势:");
44    println!("   • 零代码切换: 只需改变Provider枚举值");
45    println!("   • 统一接口: 所有提供商使用相同的API");
46    println!("   • 快速扩展: 新增兼容提供商只需添加配置");
47
48    Ok(())
49}
examples/debug_request.rs (lines 10-17)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🔍 调试请求格式");
7    println!("===============");
8
9    // 创建测试请求
10    let request = ChatCompletionRequest::new(
11        "gpt-3.5-turbo".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Hello!".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_max_tokens(10);
19
20    println!("📤 原始请求:");
21    println!("   模型: {}", request.model);
22    println!("   消息数量: {}", request.messages.len());
23    println!(
24        "   消息[0]: {:?} - {}",
25        request.messages[0].role,
26        request.messages[0].content.as_text()
27    );
28    println!("   max_tokens: {:?}", request.max_tokens);
29
30    // 测试OpenAI
31    println!("\n🤖 测试OpenAI...");
32    match AiClient::new(Provider::OpenAI) {
33        Ok(client) => {
34            match client.chat_completion(request.clone()).await {
35                Ok(response) => {
36                    println!("✅ 成功!");
37                    println!("   响应: {}", response.choices[0].message.content.as_text());
38                }
39                Err(e) => {
40                    println!("❌ 失败: {}", e);
41
42                    // 如果是400错误,说明请求格式有问题
43                    if e.to_string().contains("400") {
44                        println!("   这通常表示请求格式不正确");
45                        println!("   让我们检查请求是否包含必要字段...");
46                    }
47                }
48            }
49        }
50        Err(e) => println!("❌ 客户端创建失败: {}", e),
51    }
52
53    Ok(())
54}
examples/test_without_proxy.rs (lines 18-27)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 测试不使用代理的连接");
7    println!("======================");
8
9    // 临时移除代理设置
10    std::env::remove_var("AI_PROXY_URL");
11
12    println!("ℹ️  已临时移除AI_PROXY_URL设置");
13
14    // 测试DeepSeek(国内可直连)
15    println!("\n🔍 测试DeepSeek (直连):");
16    match AiClient::new(Provider::DeepSeek) {
17        Ok(client) => {
18            let request = ChatCompletionRequest::new(
19                "deepseek-chat".to_string(),
20                vec![Message {
21                    role: Role::User,
22                    content: Content::Text(
23                        "Hello! Please respond with just 'Hi' to test.".to_string(),
24                    ),
25                    function_call: None,
26                }],
27            )
28            .with_max_tokens(5);
29
30            match client.chat_completion(request).await {
31                Ok(response) => {
32                    println!("✅ DeepSeek 直连成功!");
33                    println!("   响应: {}", response.choices[0].message.content.as_text());
34                    println!("   Token使用: {}", response.usage.total_tokens);
35                }
36                Err(e) => {
37                    println!("❌ DeepSeek 请求失败: {}", e);
38                    if e.to_string().contains("402") {
39                        println!("   (这是余额不足错误,说明连接正常)");
40                    }
41                }
42            }
43        }
44        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
45    }
46
47    println!("\n💡 结论:");
48    println!("   • DeepSeek可以直连,不需要代理");
49    println!("   • OpenAI和Groq需要通过代理访问");
50    println!("   • 代理可能会修改请求内容,导致格式错误");
51    println!("   • 建议检查代理服务器的配置");
52
53    Ok(())
54}
Source

pub fn with_temperature(self, temperature: f32) -> Self

Examples found in repository?
examples/cohere_stream.rs (line 18)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
More examples
Hide additional examples
examples/basic_usage.rs (line 29)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
examples/openai_test.rs (line 50)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🤖 OpenAI Provider 测试");
7    println!("=====================");
8
9    // 检查API密钥
10    match std::env::var("OPENAI_API_KEY") {
11        Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
12        Err(_) => {
13            println!("❌ 未设置 OPENAI_API_KEY 环境变量");
14            println!("   请设置: export OPENAI_API_KEY=your_api_key");
15            return Ok(());
16        }
17    }
18
19    // 创建OpenAI客户端
20    let client = AiClient::new(Provider::OpenAI)?;
21    println!("✅ OpenAI客户端创建成功");
22
23    // 获取模型列表
24    println!("\n📋 获取OpenAI模型列表...");
25    match client.list_models().await {
26        Ok(models) => {
27            println!("✅ 成功获取 {} 个模型", models.len());
28            println!("   常用模型:");
29            for model in models.iter().filter(|m| m.contains("gpt")) {
30                println!("   • {}", model);
31            }
32        }
33        Err(e) => println!("❌ 获取模型列表失败: {}", e),
34    }
35
36    // 测试聊天完成
37    println!("\n💬 测试聊天完成...");
38    let request = ChatCompletionRequest::new(
39        "gpt-3.5-turbo".to_string(),
40        vec![Message {
41            role: Role::User,
42            content: Content::Text(
43                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
44                    .to_string(),
45            ),
46            function_call: None,
47        }],
48    )
49    .with_max_tokens(20)
50    .with_temperature(0.7);
51
52    match client.chat_completion(request).await {
53        Ok(response) => {
54            println!("✅ 聊天完成成功!");
55            println!("   模型: {}", response.model);
56            println!("   响应: {}", response.choices[0].message.content.as_text());
57            println!(
58                "   Token使用: {} (prompt: {}, completion: {})",
59                response.usage.total_tokens,
60                response.usage.prompt_tokens,
61                response.usage.completion_tokens
62            );
63        }
64        Err(e) => println!("❌ 聊天完成失败: {}", e),
65    }
66
67    println!("\n🎯 OpenAI配置驱动测试完成!");
68    println!("   这证明了配置驱动架构的强大之处:");
69    println!("   • 无需编写OpenAI特定代码");
70    println!("   • 只需在ProviderConfigs中添加配置");
71    println!("   • 自动支持所有OpenAI兼容的功能");
72
73    Ok(())
74}
examples/function_call_openai.rs (line 36)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8    println!("🔧 OpenAI Function Calling example (ai-lib)");
9
10    // Ensure OPENAI_API_KEY is set in env before running
11    let client = AiClient::new(Provider::OpenAI)?;
12
13    // Build a simple user message
14    let user_msg = Message {
15        role: Role::User,
16        content: Content::Text("Please call the ascii_horse tool with size=3".to_string()),
17        function_call: None,
18    };
19
20    // Define a Tool (JSON Schema for parameters)
21    let ascii_horse_tool = Tool {
22        name: "ascii_horse".to_string(),
23        description: Some("Draws an ASCII horse of given size".to_string()),
24        parameters: Some(json!({
25            "type": "object",
26            "properties": {
27                "size": { "type": "integer", "description": "Size of the horse" }
28            },
29            "required": ["size"]
30        })),
31    };
32
33    let mut req = ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![user_msg]);
34    req.functions = Some(vec![ascii_horse_tool]);
35    req.function_call = Some(FunctionCallPolicy::Auto("auto".to_string()));
36    req = req.with_max_tokens(200).with_temperature(0.0);
37
38    println!("📤 Sending request to OpenAI (model={})", req.model);
39
40    let resp = client.chat_completion(req).await?;
41
42    // Handle a possible function call from the model: execute locally and send the result back
43    for choice in resp.choices {
44        let msg = choice.message;
45        if let Some(fc) = msg.function_call {
46            println!("🛠️  Model invoked function: {}", fc.name);
47            let args = fc.arguments.unwrap_or(serde_json::json!(null));
48            println!("   arguments: {}", args);
49
50            // Simple local tool: ascii_horse
51            if fc.name == "ascii_horse" {
52                // Parse size param
53                let size = args.get("size").and_then(|v| v.as_i64()).unwrap_or(3) as usize;
54                let horse = generate_ascii_horse(size);
55                println!("⚙️ Executed ascii_horse locally, output:\n{}", horse);
56
57                // Send follow-up message with tool result as assistant message
58                let tool_msg = Message {
59                    role: Role::Assistant,
60                    content: Content::Text(horse.clone()),
61                    function_call: None,
62                };
63
64                let mut followup =
65                    ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![tool_msg]);
66                followup = followup.with_max_tokens(200).with_temperature(0.0);
67                let follow_resp = client.chat_completion(followup).await?;
68                for fc_choice in follow_resp.choices {
69                    println!(
70                        "🗨️ Final model response: {}",
71                        fc_choice.message.content.as_text()
72                    );
73                }
74            }
75        } else {
76            println!("💬 Model message: {}", msg.content.as_text());
77        }
78    }
79
80    Ok(())
81}
examples/test_streaming.rs (line 32)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 流式响应测试");
8    println!("================");
9
10    // 检查Groq API密钥
11    if std::env::var("GROQ_API_KEY").is_err() {
12        println!("❌ 未设置GROQ_API_KEY");
13        return Ok(());
14    }
15
16    // 创建Groq客户端
17    let client = AiClient::new(Provider::Groq)?;
18    println!("✅ Groq客户端创建成功");
19
20    // 创建流式请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text(
26                "Please write a short poem about AI in exactly 4 lines.".to_string(),
27            ),
28            function_call: None,
29        }],
30    )
31    .with_max_tokens(100)
32    .with_temperature(0.7);
33
34    println!("\n📤 发送流式请求...");
35    println!("   模型: {}", request.model);
36    println!("   消息: {}", request.messages[0].content.as_text());
37
38    // 获取流式响应
39    match client.chat_completion_stream(request).await {
40        Ok(mut stream) => {
41            println!("\n🌊 开始接收流式响应:");
42            println!("{}", "─".repeat(50));
43
44            let mut full_content = String::new();
45            let mut chunk_count = 0;
46
47            while let Some(result) = stream.next().await {
48                match result {
49                    Ok(chunk) => {
50                        chunk_count += 1;
51
52                        if let Some(choice) = chunk.choices.first() {
53                            if let Some(content) = &choice.delta.content {
54                                print!("{}", content);
55                                full_content.push_str(content);
56
57                                // 刷新输出
58                                use std::io::{self, Write};
59                                io::stdout().flush().unwrap();
60                            }
61
62                            // 检查是否完成
63                            if choice.finish_reason.is_some() {
64                                println!("\n{}", "─".repeat(50));
65                                println!("✅ 流式响应完成!");
66                                println!("   完成原因: {:?}", choice.finish_reason);
67                                break;
68                            }
69                        }
70                    }
71                    Err(e) => {
72                        println!("\n❌ 流式响应错误: {}", e);
73                        break;
74                    }
75                }
76            }
77
78            println!("\n📊 流式响应统计:");
79            println!("   数据块数量: {}", chunk_count);
80            println!("   总内容长度: {} 字符", full_content.len());
81            println!("   完整内容: \"{}\"", full_content.trim());
82        }
83        Err(e) => {
84            println!("❌ 流式请求失败: {}", e);
85        }
86    }
87
88    println!("\n💡 流式响应的优势:");
89    println!("   • 实时显示生成内容");
90    println!("   • 更好的用户体验");
91    println!("   • 可以提前停止生成");
92    println!("   • 适合长文本生成");
93
94    Ok(())
95}
examples/test_openai_specific.rs (line 65)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🤖 OpenAI 专项测试");
7    println!("==================");
8
9    // 检查OpenAI API密钥
10    match std::env::var("OPENAI_API_KEY") {
11        Ok(key) => {
12            let masked = format!("{}...{}", &key[..8], &key[key.len() - 4..]);
13            println!("🔑 OpenAI API Key: {}", masked);
14        }
15        Err(_) => {
16            println!("❌ 未设置OPENAI_API_KEY");
17            return Ok(());
18        }
19    }
20
21    // 创建OpenAI客户端
22    println!("\n📡 创建OpenAI客户端...");
23    let client = match AiClient::new(Provider::OpenAI) {
24        Ok(client) => {
25            println!("✅ 客户端创建成功");
26            client
27        }
28        Err(e) => {
29            println!("❌ 客户端创建失败: {}", e);
30            return Ok(());
31        }
32    };
33
34    // 测试模型列表
35    println!("\n📋 获取模型列表...");
36    match client.list_models().await {
37        Ok(models) => {
38            println!("✅ 成功获取 {} 个模型", models.len());
39
40            // 显示GPT模型
41            let gpt_models: Vec<_> = models
42                .iter()
43                .filter(|m| m.contains("gpt"))
44                .take(5)
45                .collect();
46            println!("   GPT模型: {:?}", gpt_models);
47        }
48        Err(e) => {
49            println!("❌ 获取模型列表失败: {}", e);
50            return Ok(());
51        }
52    }
53
54    // 测试聊天完成
55    println!("\n💬 测试聊天完成...");
56    let request = ChatCompletionRequest::new(
57        "gpt-3.5-turbo".to_string(),
58        vec![Message {
59            role: Role::User,
60            content: Content::Text("Summarize the following text: ...".to_string()),
61            function_call: None,
62        }],
63    )
64    .with_max_tokens(20)
65    .with_temperature(0.0); // 使用0温度确保一致性
66
67    match client.chat_completion(request).await {
68        Ok(response) => {
69            println!("✅ 聊天完成成功!");
70            println!("   模型: {}", response.model);
71            println!(
72                "   响应: '{}'",
73                response.choices[0].message.content.as_text()
74            );
75            println!(
76                "   Token使用: {} (prompt: {}, completion: {})",
77                response.usage.total_tokens,
78                response.usage.prompt_tokens,
79                response.usage.completion_tokens
80            );
81            println!("   完成原因: {:?}", response.choices[0].finish_reason);
82        }
83        Err(e) => {
84            println!("❌ 聊天完成失败: {}", e);
85
86            // 分析错误类型
87            let error_str = e.to_string();
88            if error_str.contains("400") {
89                println!("   → 这是请求格式错误");
90            } else if error_str.contains("401") {
91                println!("   → 这是认证错误,检查API密钥");
92            } else if error_str.contains("429") {
93                println!("   → 这是速率限制错误");
94            } else if error_str.contains("500") {
95                println!("   → 这是服务器错误");
96            }
97        }
98    }
99
100    println!("\n🎯 OpenAI测试完成!");
101
102    Ok(())
103}
Source

pub fn with_max_tokens(self, max_tokens: u32) -> Self

Examples found in repository?
examples/cohere_stream.rs (line 19)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
More examples
Hide additional examples
examples/basic_usage.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🚀 AI-lib Basic Usage Example");
7    println!("================================");
8
9    // 切换模型提供商,只需更改 Provider 的值
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "✅ Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // 获取支持的模型列表
17    let models = client.list_models().await?;
18    println!("📋 Available models: {:?}", models);
19
20    // 创建聊天请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("📤 Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("📥 Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}
examples/debug_request.rs (line 18)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🔍 调试请求格式");
7    println!("===============");
8
9    // 创建测试请求
10    let request = ChatCompletionRequest::new(
11        "gpt-3.5-turbo".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Hello!".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_max_tokens(10);
19
20    println!("📤 原始请求:");
21    println!("   模型: {}", request.model);
22    println!("   消息数量: {}", request.messages.len());
23    println!(
24        "   消息[0]: {:?} - {}",
25        request.messages[0].role,
26        request.messages[0].content.as_text()
27    );
28    println!("   max_tokens: {:?}", request.max_tokens);
29
30    // 测试OpenAI
31    println!("\n🤖 测试OpenAI...");
32    match AiClient::new(Provider::OpenAI) {
33        Ok(client) => {
34            match client.chat_completion(request.clone()).await {
35                Ok(response) => {
36                    println!("✅ 成功!");
37                    println!("   响应: {}", response.choices[0].message.content.as_text());
38                }
39                Err(e) => {
40                    println!("❌ 失败: {}", e);
41
42                    // 如果是400错误,说明请求格式有问题
43                    if e.to_string().contains("400") {
44                        println!("   这通常表示请求格式不正确");
45                        println!("   让我们检查请求是否包含必要字段...");
46                    }
47                }
48            }
49        }
50        Err(e) => println!("❌ 客户端创建失败: {}", e),
51    }
52
53    Ok(())
54}
examples/test_without_proxy.rs (line 28)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 测试不使用代理的连接");
7    println!("======================");
8
9    // 临时移除代理设置
10    std::env::remove_var("AI_PROXY_URL");
11
12    println!("ℹ️  已临时移除AI_PROXY_URL设置");
13
14    // 测试DeepSeek(国内可直连)
15    println!("\n🔍 测试DeepSeek (直连):");
16    match AiClient::new(Provider::DeepSeek) {
17        Ok(client) => {
18            let request = ChatCompletionRequest::new(
19                "deepseek-chat".to_string(),
20                vec![Message {
21                    role: Role::User,
22                    content: Content::Text(
23                        "Hello! Please respond with just 'Hi' to test.".to_string(),
24                    ),
25                    function_call: None,
26                }],
27            )
28            .with_max_tokens(5);
29
30            match client.chat_completion(request).await {
31                Ok(response) => {
32                    println!("✅ DeepSeek 直连成功!");
33                    println!("   响应: {}", response.choices[0].message.content.as_text());
34                    println!("   Token使用: {}", response.usage.total_tokens);
35                }
36                Err(e) => {
37                    println!("❌ DeepSeek 请求失败: {}", e);
38                    if e.to_string().contains("402") {
39                        println!("   (这是余额不足错误,说明连接正常)");
40                    }
41                }
42            }
43        }
44        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
45    }
46
47    println!("\n💡 结论:");
48    println!("   • DeepSeek可以直连,不需要代理");
49    println!("   • OpenAI和Groq需要通过代理访问");
50    println!("   • 代理可能会修改请求内容,导致格式错误");
51    println!("   • 建议检查代理服务器的配置");
52
53    Ok(())
54}
examples/compare_requests.rs (line 17)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🔍 对比请求格式");
7    println!("================");
8
9    let request = ChatCompletionRequest::new(
10        "test-model".to_string(),
11        vec![Message {
12            role: Role::User,
13            content: Content::Text("Hello!".to_string()),
14            function_call: None,
15        }],
16    )
17    .with_max_tokens(10);
18
19    println!("📤 测试请求:");
20    println!("   模型: {}", request.model);
21    println!("   消息: {:?}", request.messages[0]);
22    println!("   max_tokens: {:?}", request.max_tokens);
23
24    // 测试Groq (工作正常)
25    println!("\n🟢 Groq (工作正常):");
26    if let Ok(_groq_client) = AiClient::new(Provider::Groq) {
27        // Groq使用独立适配器,我们知道它工作正常
28        println!("   ✅ 使用独立适配器 (GroqAdapter)");
29        println!("   ✅ 请求格式正确");
30    }
31
32    // 测试OpenAI (有问题)
33    println!("\n🔴 OpenAI (有问题):");
34    if let Ok(_openai_client) = AiClient::new(Provider::OpenAI) {
35        println!("   ❌ 使用配置驱动适配器 (GenericAdapter)");
36        println!("   ❌ 请求格式错误: 'you must provide a model parameter'");
37        println!("   🔍 可能的问题:");
38        println!("      - JSON序列化问题");
39        println!("      - 字段映射错误");
40        println!("      - 请求体构建错误");
41    }
42
43    println!("\n💡 解决方案:");
44    println!("   1. 检查GenericAdapter的convert_request方法");
45    println!("   2. 确保JSON字段名正确");
46    println!("   3. 验证请求体结构");
47    println!("   4. 考虑为OpenAI创建独立适配器");
48
49    // 建议的修复
50    println!("\n🔧 建议修复:");
51    println!("   选项1: 修复GenericAdapter的请求转换逻辑");
52    println!("   选项2: 为OpenAI创建独立适配器 (像Groq一样)");
53    println!("   选项3: 添加更多调试信息来定位问题");
54
55    Ok(())
56}
examples/openai_test.rs (line 49)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🤖 OpenAI Provider 测试");
7    println!("=====================");
8
9    // 检查API密钥
10    match std::env::var("OPENAI_API_KEY") {
11        Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
12        Err(_) => {
13            println!("❌ 未设置 OPENAI_API_KEY 环境变量");
14            println!("   请设置: export OPENAI_API_KEY=your_api_key");
15            return Ok(());
16        }
17    }
18
19    // 创建OpenAI客户端
20    let client = AiClient::new(Provider::OpenAI)?;
21    println!("✅ OpenAI客户端创建成功");
22
23    // 获取模型列表
24    println!("\n📋 获取OpenAI模型列表...");
25    match client.list_models().await {
26        Ok(models) => {
27            println!("✅ 成功获取 {} 个模型", models.len());
28            println!("   常用模型:");
29            for model in models.iter().filter(|m| m.contains("gpt")) {
30                println!("   • {}", model);
31            }
32        }
33        Err(e) => println!("❌ 获取模型列表失败: {}", e),
34    }
35
36    // 测试聊天完成
37    println!("\n💬 测试聊天完成...");
38    let request = ChatCompletionRequest::new(
39        "gpt-3.5-turbo".to_string(),
40        vec![Message {
41            role: Role::User,
42            content: Content::Text(
43                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
44                    .to_string(),
45            ),
46            function_call: None,
47        }],
48    )
49    .with_max_tokens(20)
50    .with_temperature(0.7);
51
52    match client.chat_completion(request).await {
53        Ok(response) => {
54            println!("✅ 聊天完成成功!");
55            println!("   模型: {}", response.model);
56            println!("   响应: {}", response.choices[0].message.content.as_text());
57            println!(
58                "   Token使用: {} (prompt: {}, completion: {})",
59                response.usage.total_tokens,
60                response.usage.prompt_tokens,
61                response.usage.completion_tokens
62            );
63        }
64        Err(e) => println!("❌ 聊天完成失败: {}", e),
65    }
66
67    println!("\n🎯 OpenAI配置驱动测试完成!");
68    println!("   这证明了配置驱动架构的强大之处:");
69    println!("   • 无需编写OpenAI特定代码");
70    println!("   • 只需在ProviderConfigs中添加配置");
71    println!("   • 自动支持所有OpenAI兼容的功能");
72
73    Ok(())
74}
Source

pub fn ignore_previous(self) -> Self

Drop previous conversational messages while keeping system messages and the last non-system message. Useful to reset context while preserving system instructions.

Trait Implementations§

Source§

impl Clone for ChatCompletionRequest

Source§

fn clone(&self) -> ChatCompletionRequest

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ChatCompletionRequest

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<'de> Deserialize<'de> for ChatCompletionRequest

Source§

fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>
where __D: Deserializer<'de>,

Deserialize this value from the given Serde deserializer. Read more
Source§

impl Serialize for ChatCompletionRequest

Source§

fn serialize<__S>(&self, __serializer: __S) -> Result<__S::Ok, __S::Error>
where __S: Serializer,

Serialize this value into the given Serde serializer. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> DeserializeOwned for T
where T: for<'de> Deserialize<'de>,

Source§

impl<T> ErasedDestructor for T
where T: 'static,