pub struct ChatCompletionRequest {
pub model: String,
pub messages: Vec<Message>,
pub temperature: Option<f32>,
pub max_tokens: Option<u32>,
pub stream: Option<bool>,
pub top_p: Option<f32>,
pub frequency_penalty: Option<f32>,
pub presence_penalty: Option<f32>,
}
Fields§
§model: String
§messages: Vec<Message>
§temperature: Option<f32>
§max_tokens: Option<u32>
§stream: Option<bool>
§top_p: Option<f32>
§frequency_penalty: Option<f32>
§presence_penalty: Option<f32>
Implementations§
Source§impl ChatCompletionRequest
impl ChatCompletionRequest
Sourcepub fn new(model: String, messages: Vec<Message>) -> Self
pub fn new(model: String, messages: Vec<Message>) -> Self
Examples found in repository?
examples/cohere_stream.rs (lines 9-12)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Ensure COHERE_API_KEY env var is set if making real requests
7 let client = AiClient::new(Provider::Cohere)?;
8
9 let request = ChatCompletionRequest::new(
10 "command-xlarge-nightly".to_string(),
11 vec![Message { role: Role::User, content: "Write a haiku about rust programming".to_string() }]
12 ).with_temperature(0.7).with_max_tokens(60);
13
14 // List models
15 match client.list_models().await {
16 Ok(models) => println!("Models: {:?}", models),
17 Err(e) => eprintln!("Failed to list models: {}", e),
18 }
19
20 // Streaming
21 let mut stream = client.chat_completion_stream(request).await?;
22 while let Some(chunk) = stream.next().await {
23 match chunk {
24 Ok(c) => {
25 for choice in c.choices {
26 if let Some(delta) = choice.delta.content {
27 print!("{}", delta);
28 }
29 }
30 }
31 Err(e) => {
32 eprintln!("Stream error: {}", e);
33 break;
34 }
35 }
36 }
37
38 Ok(())
39}
More examples
examples/basic_usage.rs (lines 17-23)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🚀 AI-lib Basic Usage Example");
6 println!("================================");
7
8 // 切换模型提供商,只需更改 Provider 的值
9 let client = AiClient::new(Provider::Groq)?;
10 println!("✅ Created client with provider: {:?}", client.current_provider());
11
12 // 获取支持的模型列表
13 let models = client.list_models().await?;
14 println!("📋 Available models: {:?}", models);
15
16 // 创建聊天请求
17 let request = ChatCompletionRequest::new(
18 "llama3-8b-8192".to_string(),
19 vec![Message {
20 role: Role::User,
21 content: "Hello! Please introduce yourself briefly.".to_string(),
22 }],
23 ).with_temperature(0.7)
24 .with_max_tokens(100);
25
26 println!("📤 Sending request to model: {}", request.model);
27
28 // 发送请求
29 let response = client.chat_completion(request).await?;
30
31 println!("📥 Received response:");
32 println!(" ID: {}", response.id);
33 println!(" Model: {}", response.model);
34 println!(" Content: {}", response.choices[0].message.content);
35 println!(" Usage: {} tokens", response.usage.total_tokens);
36
37 Ok(())
38}
examples/debug_request.rs (lines 9-15)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🔍 调试请求格式");
6 println!("===============");
7
8 // 创建测试请求
9 let request = ChatCompletionRequest::new(
10 "gpt-3.5-turbo".to_string(),
11 vec![Message {
12 role: Role::User,
13 content: "Hello!".to_string(),
14 }],
15 ).with_max_tokens(10);
16
17 println!("📤 原始请求:");
18 println!(" 模型: {}", request.model);
19 println!(" 消息数量: {}", request.messages.len());
20 println!(" 消息[0]: {:?} - {}", request.messages[0].role, request.messages[0].content);
21 println!(" max_tokens: {:?}", request.max_tokens);
22
23 // 测试OpenAI
24 println!("\n🤖 测试OpenAI...");
25 match AiClient::new(Provider::OpenAI) {
26 Ok(client) => {
27 match client.chat_completion(request.clone()).await {
28 Ok(response) => {
29 println!("✅ 成功!");
30 println!(" 响应: {}", response.choices[0].message.content);
31 }
32 Err(e) => {
33 println!("❌ 失败: {}", e);
34
35 // 如果是400错误,说明请求格式有问题
36 if e.to_string().contains("400") {
37 println!(" 这通常表示请求格式不正确");
38 println!(" 让我们检查请求是否包含必要字段...");
39 }
40 }
41 }
42 }
43 Err(e) => println!("❌ 客户端创建失败: {}", e),
44 }
45
46 Ok(())
47}
examples/config_driven_example.rs (lines 29-35)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🚀 配置驱动的AI-lib示例");
6 println!("========================");
7
8 // 演示配置驱动的优势:轻松切换提供商
9 let providers = vec![
10 (Provider::Groq, "Groq"),
11 (Provider::OpenAI, "OpenAI"),
12 (Provider::DeepSeek, "DeepSeek"),
13 ];
14
15 for (provider, name) in providers {
16 println!("\n📡 测试提供商: {}", name);
17
18 // 创建客户端 - 只需改变枚举值
19 let client = AiClient::new(provider)?;
20 println!("✅ 客户端创建成功: {:?}", client.current_provider());
21
22 // 获取模型列表
23 match client.list_models().await {
24 Ok(models) => println!("📋 可用模型: {:?}", models),
25 Err(e) => println!("⚠️ 获取模型列表失败: {}", e),
26 }
27
28 // 创建测试请求
29 let request = ChatCompletionRequest::new(
30 "test-model".to_string(),
31 vec![Message {
32 role: Role::User,
33 content: "Hello from ai-lib!".to_string(),
34 }],
35 );
36
37 println!("📤 请求已准备,模型: {}", request.model);
38 println!(" (需要设置对应的API_KEY环境变量才能实际调用)");
39 }
40
41 println!("\n🎯 配置驱动的核心优势:");
42 println!(" • 零代码切换: 只需改变Provider枚举值");
43 println!(" • 统一接口: 所有提供商使用相同的API");
44 println!(" • 快速扩展: 新增兼容提供商只需添加配置");
45
46 Ok(())
47}
examples/test_without_proxy.rs (lines 17-23)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🌐 测试不使用代理的连接");
6 println!("======================");
7
8 // 临时移除代理设置
9 std::env::remove_var("AI_PROXY_URL");
10
11 println!("ℹ️ 已临时移除AI_PROXY_URL设置");
12
13 // 测试DeepSeek(国内可直连)
14 println!("\n🔍 测试DeepSeek (直连):");
15 match AiClient::new(Provider::DeepSeek) {
16 Ok(client) => {
17 let request = ChatCompletionRequest::new(
18 "deepseek-chat".to_string(),
19 vec![Message {
20 role: Role::User,
21 content: "Hello! Please respond with just 'Hi' to test.".to_string(),
22 }],
23 ).with_max_tokens(5);
24
25 match client.chat_completion(request).await {
26 Ok(response) => {
27 println!("✅ DeepSeek 直连成功!");
28 println!(" 响应: {}", response.choices[0].message.content);
29 println!(" Token使用: {}", response.usage.total_tokens);
30 }
31 Err(e) => {
32 println!("❌ DeepSeek 请求失败: {}", e);
33 if e.to_string().contains("402") {
34 println!(" (这是余额不足错误,说明连接正常)");
35 }
36 }
37 }
38 }
39 Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
40 }
41
42 println!("\n💡 结论:");
43 println!(" • DeepSeek可以直连,不需要代理");
44 println!(" • OpenAI和Groq需要通过代理访问");
45 println!(" • 代理可能会修改请求内容,导致格式错误");
46 println!(" • 建议检查代理服务器的配置");
47
48 Ok(())
49}
examples/compare_requests.rs (lines 8-14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🔍 对比请求格式");
6 println!("================");
7
8 let request = ChatCompletionRequest::new(
9 "test-model".to_string(),
10 vec![Message {
11 role: Role::User,
12 content: "Hello!".to_string(),
13 }],
14 ).with_max_tokens(10);
15
16 println!("📤 测试请求:");
17 println!(" 模型: {}", request.model);
18 println!(" 消息: {:?}", request.messages[0]);
19 println!(" max_tokens: {:?}", request.max_tokens);
20
21 // 测试Groq (工作正常)
22 println!("\n🟢 Groq (工作正常):");
23 if let Ok(_groq_client) = AiClient::new(Provider::Groq) {
24 // Groq使用独立适配器,我们知道它工作正常
25 println!(" ✅ 使用独立适配器 (GroqAdapter)");
26 println!(" ✅ 请求格式正确");
27 }
28
29 // 测试OpenAI (有问题)
30 println!("\n🔴 OpenAI (有问题):");
31 if let Ok(_openai_client) = AiClient::new(Provider::OpenAI) {
32 println!(" ❌ 使用配置驱动适配器 (GenericAdapter)");
33 println!(" ❌ 请求格式错误: 'you must provide a model parameter'");
34 println!(" 🔍 可能的问题:");
35 println!(" - JSON序列化问题");
36 println!(" - 字段映射错误");
37 println!(" - 请求体构建错误");
38 }
39
40 println!("\n💡 解决方案:");
41 println!(" 1. 检查GenericAdapter的convert_request方法");
42 println!(" 2. 确保JSON字段名正确");
43 println!(" 3. 验证请求体结构");
44 println!(" 4. 考虑为OpenAI创建独立适配器");
45
46 // 建议的修复
47 println!("\n🔧 建议修复:");
48 println!(" 选项1: 修复GenericAdapter的请求转换逻辑");
49 println!(" 选项2: 为OpenAI创建独立适配器 (像Groq一样)");
50 println!(" 选项3: 添加更多调试信息来定位问题");
51
52 Ok(())
53}
Additional examples can be found in:
- examples/proxy_example.rs
- examples/openai_test.rs
- examples/test_all_providers.rs
- examples/test_https_proxy.rs
- examples/test_anthropic.rs
- examples/test_openai_specific.rs
- examples/test_streaming.rs
- examples/test_gemini.rs
- examples/test_groq_generic.rs
- examples/test_streaming_clean.rs
- examples/test_proxy_working.rs
- examples/test_all_config_driven.rs
- examples/test_retry_mechanism.rs
- examples/test_hybrid_architecture.rs
- examples/test_streaming_improved.rs
Sourcepub fn with_temperature(self, temperature: f32) -> Self
pub fn with_temperature(self, temperature: f32) -> Self
Examples found in repository?
examples/cohere_stream.rs (line 12)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Ensure COHERE_API_KEY env var is set if making real requests
7 let client = AiClient::new(Provider::Cohere)?;
8
9 let request = ChatCompletionRequest::new(
10 "command-xlarge-nightly".to_string(),
11 vec![Message { role: Role::User, content: "Write a haiku about rust programming".to_string() }]
12 ).with_temperature(0.7).with_max_tokens(60);
13
14 // List models
15 match client.list_models().await {
16 Ok(models) => println!("Models: {:?}", models),
17 Err(e) => eprintln!("Failed to list models: {}", e),
18 }
19
20 // Streaming
21 let mut stream = client.chat_completion_stream(request).await?;
22 while let Some(chunk) = stream.next().await {
23 match chunk {
24 Ok(c) => {
25 for choice in c.choices {
26 if let Some(delta) = choice.delta.content {
27 print!("{}", delta);
28 }
29 }
30 }
31 Err(e) => {
32 eprintln!("Stream error: {}", e);
33 break;
34 }
35 }
36 }
37
38 Ok(())
39}
More examples
examples/basic_usage.rs (line 23)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🚀 AI-lib Basic Usage Example");
6 println!("================================");
7
8 // 切换模型提供商,只需更改 Provider 的值
9 let client = AiClient::new(Provider::Groq)?;
10 println!("✅ Created client with provider: {:?}", client.current_provider());
11
12 // 获取支持的模型列表
13 let models = client.list_models().await?;
14 println!("📋 Available models: {:?}", models);
15
16 // 创建聊天请求
17 let request = ChatCompletionRequest::new(
18 "llama3-8b-8192".to_string(),
19 vec![Message {
20 role: Role::User,
21 content: "Hello! Please introduce yourself briefly.".to_string(),
22 }],
23 ).with_temperature(0.7)
24 .with_max_tokens(100);
25
26 println!("📤 Sending request to model: {}", request.model);
27
28 // 发送请求
29 let response = client.chat_completion(request).await?;
30
31 println!("📥 Received response:");
32 println!(" ID: {}", response.id);
33 println!(" Model: {}", response.model);
34 println!(" Content: {}", response.choices[0].message.content);
35 println!(" Usage: {} tokens", response.usage.total_tokens);
36
37 Ok(())
38}
examples/openai_test.rs (line 44)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🤖 OpenAI Provider 测试");
6 println!("=====================");
7
8 // 检查API密钥
9 match std::env::var("OPENAI_API_KEY") {
10 Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
11 Err(_) => {
12 println!("❌ 未设置 OPENAI_API_KEY 环境变量");
13 println!(" 请设置: export OPENAI_API_KEY=your_api_key");
14 return Ok(());
15 }
16 }
17
18 // 创建OpenAI客户端
19 let client = AiClient::new(Provider::OpenAI)?;
20 println!("✅ OpenAI客户端创建成功");
21
22 // 获取模型列表
23 println!("\n📋 获取OpenAI模型列表...");
24 match client.list_models().await {
25 Ok(models) => {
26 println!("✅ 成功获取 {} 个模型", models.len());
27 println!(" 常用模型:");
28 for model in models.iter().filter(|m| m.contains("gpt")) {
29 println!(" • {}", model);
30 }
31 }
32 Err(e) => println!("❌ 获取模型列表失败: {}", e),
33 }
34
35 // 测试聊天完成
36 println!("\n💬 测试聊天完成...");
37 let request = ChatCompletionRequest::new(
38 "gpt-3.5-turbo".to_string(),
39 vec![Message {
40 role: Role::User,
41 content: "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection.".to_string(),
42 }],
43 ).with_max_tokens(20)
44 .with_temperature(0.7);
45
46 match client.chat_completion(request).await {
47 Ok(response) => {
48 println!("✅ 聊天完成成功!");
49 println!(" 模型: {}", response.model);
50 println!(" 响应: {}", response.choices[0].message.content);
51 println!(" Token使用: {} (prompt: {}, completion: {})",
52 response.usage.total_tokens,
53 response.usage.prompt_tokens,
54 response.usage.completion_tokens
55 );
56 }
57 Err(e) => println!("❌ 聊天完成失败: {}", e),
58 }
59
60 println!("\n🎯 OpenAI配置驱动测试完成!");
61 println!(" 这证明了配置驱动架构的强大之处:");
62 println!(" • 无需编写OpenAI特定代码");
63 println!(" • 只需在ProviderConfigs中添加配置");
64 println!(" • 自动支持所有OpenAI兼容的功能");
65
66 Ok(())
67}
examples/test_openai_specific.rs (line 61)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🤖 OpenAI 专项测试");
6 println!("==================");
7
8 // 检查OpenAI API密钥
9 match std::env::var("OPENAI_API_KEY") {
10 Ok(key) => {
11 let masked = format!("{}...{}", &key[..8], &key[key.len()-4..]);
12 println!("🔑 OpenAI API Key: {}", masked);
13 }
14 Err(_) => {
15 println!("❌ 未设置OPENAI_API_KEY");
16 return Ok(());
17 }
18 }
19
20 // 创建OpenAI客户端
21 println!("\n📡 创建OpenAI客户端...");
22 let client = match AiClient::new(Provider::OpenAI) {
23 Ok(client) => {
24 println!("✅ 客户端创建成功");
25 client
26 }
27 Err(e) => {
28 println!("❌ 客户端创建失败: {}", e);
29 return Ok(());
30 }
31 };
32
33 // 测试模型列表
34 println!("\n📋 获取模型列表...");
35 match client.list_models().await {
36 Ok(models) => {
37 println!("✅ 成功获取 {} 个模型", models.len());
38
39 // 显示GPT模型
40 let gpt_models: Vec<_> = models.iter()
41 .filter(|m| m.contains("gpt"))
42 .take(5)
43 .collect();
44 println!(" GPT模型: {:?}", gpt_models);
45 }
46 Err(e) => {
47 println!("❌ 获取模型列表失败: {}", e);
48 return Ok(());
49 }
50 }
51
52 // 测试聊天完成
53 println!("\n💬 测试聊天完成...");
54 let request = ChatCompletionRequest::new(
55 "gpt-3.5-turbo".to_string(),
56 vec![Message {
57 role: Role::User,
58 content: "Say 'Hello from OpenAI!' in exactly those words.".to_string(),
59 }],
60 ).with_max_tokens(20)
61 .with_temperature(0.0); // 使用0温度确保一致性
62
63 match client.chat_completion(request).await {
64 Ok(response) => {
65 println!("✅ 聊天完成成功!");
66 println!(" 模型: {}", response.model);
67 println!(" 响应: '{}'", response.choices[0].message.content);
68 println!(" Token使用: {} (prompt: {}, completion: {})",
69 response.usage.total_tokens,
70 response.usage.prompt_tokens,
71 response.usage.completion_tokens
72 );
73 println!(" 完成原因: {:?}", response.choices[0].finish_reason);
74 }
75 Err(e) => {
76 println!("❌ 聊天完成失败: {}", e);
77
78 // 分析错误类型
79 let error_str = e.to_string();
80 if error_str.contains("400") {
81 println!(" → 这是请求格式错误");
82 } else if error_str.contains("401") {
83 println!(" → 这是认证错误,检查API密钥");
84 } else if error_str.contains("429") {
85 println!(" → 这是速率限制错误");
86 } else if error_str.contains("500") {
87 println!(" → 这是服务器错误");
88 }
89 }
90 }
91
92 println!("\n🎯 OpenAI测试完成!");
93
94 Ok(())
95}
examples/test_streaming.rs (line 27)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("🌊 流式响应测试");
7 println!("================");
8
9 // 检查Groq API密钥
10 if std::env::var("GROQ_API_KEY").is_err() {
11 println!("❌ 未设置GROQ_API_KEY");
12 return Ok(());
13 }
14
15 // 创建Groq客户端
16 let client = AiClient::new(Provider::Groq)?;
17 println!("✅ Groq客户端创建成功");
18
19 // 创建流式请求
20 let request = ChatCompletionRequest::new(
21 "llama3-8b-8192".to_string(),
22 vec![Message {
23 role: Role::User,
24 content: "Please write a short poem about AI in exactly 4 lines.".to_string(),
25 }],
26 ).with_max_tokens(100)
27 .with_temperature(0.7);
28
29 println!("\n📤 发送流式请求...");
30 println!(" 模型: {}", request.model);
31 println!(" 消息: {}", request.messages[0].content);
32
33 // 获取流式响应
34 match client.chat_completion_stream(request).await {
35 Ok(mut stream) => {
36 println!("\n🌊 开始接收流式响应:");
37 println!("{}", "─".repeat(50));
38
39 let mut full_content = String::new();
40 let mut chunk_count = 0;
41
42 while let Some(result) = stream.next().await {
43 match result {
44 Ok(chunk) => {
45 chunk_count += 1;
46
47 if let Some(choice) = chunk.choices.first() {
48 if let Some(content) = &choice.delta.content {
49 print!("{}", content);
50 full_content.push_str(content);
51
52 // 刷新输出
53 use std::io::{self, Write};
54 io::stdout().flush().unwrap();
55 }
56
57 // 检查是否完成
58 if choice.finish_reason.is_some() {
59 println!("\n{}", "─".repeat(50));
60 println!("✅ 流式响应完成!");
61 println!(" 完成原因: {:?}", choice.finish_reason);
62 break;
63 }
64 }
65 }
66 Err(e) => {
67 println!("\n❌ 流式响应错误: {}", e);
68 break;
69 }
70 }
71 }
72
73 println!("\n📊 流式响应统计:");
74 println!(" 数据块数量: {}", chunk_count);
75 println!(" 总内容长度: {} 字符", full_content.len());
76 println!(" 完整内容: \"{}\"", full_content.trim());
77 }
78 Err(e) => {
79 println!("❌ 流式请求失败: {}", e);
80 }
81 }
82
83 println!("\n💡 流式响应的优势:");
84 println!(" • 实时显示生成内容");
85 println!(" • 更好的用户体验");
86 println!(" • 可以提前停止生成");
87 println!(" • 适合长文本生成");
88
89 Ok(())
90}
examples/test_gemini.rs (line 32)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🤖 Google Gemini 独立适配器测试");
6 println!("===============================");
7
8 // 检查API密钥
9 match std::env::var("GEMINI_API_KEY") {
10 Ok(_) => println!("✅ 检测到 GEMINI_API_KEY"),
11 Err(_) => {
12 println!("❌ 未设置 GEMINI_API_KEY 环境变量");
13 println!(" 请设置: export GEMINI_API_KEY=your_api_key");
14 println!(" 获取API密钥: https://aistudio.google.com/app/apikey");
15 return Ok(());
16 }
17 }
18
19 // 创建Gemini客户端
20 let client = AiClient::new(Provider::Gemini)?;
21 println!("✅ Gemini客户端创建成功 (使用GeminiAdapter)");
22
23 // 测试聊天完成
24 println!("\n💬 测试Gemini聊天...");
25 let request = ChatCompletionRequest::new(
26 "gemini-1.5-flash".to_string(),
27 vec![Message {
28 role: Role::User,
29 content: "Hello Gemini! Please respond with 'Hello from Google Gemini via ai-lib!' to confirm the connection works.".to_string(),
30 }],
31 ).with_max_tokens(50)
32 .with_temperature(0.7);
33
34 match client.chat_completion(request).await {
35 Ok(response) => {
36 println!("✅ Gemini聊天成功!");
37 println!(" 模型: {}", response.model);
38 println!(" 响应: '{}'", response.choices[0].message.content);
39 println!(" Token使用: {} (prompt: {}, completion: {})",
40 response.usage.total_tokens,
41 response.usage.prompt_tokens,
42 response.usage.completion_tokens
43 );
44 }
45 Err(e) => {
46 println!("❌ Gemini聊天失败: {}", e);
47
48 // 分析错误类型
49 let error_str = e.to_string();
50 if error_str.contains("401") || error_str.contains("403") {
51 println!(" → 认证错误,请检查GEMINI_API_KEY");
52 } else if error_str.contains("400") {
53 println!(" → 请求格式错误");
54 } else if error_str.contains("429") {
55 println!(" → 速率限制,请稍后重试");
56 }
57 }
58 }
59
60 // 测试模型列表
61 println!("\n📋 测试模型列表...");
62 match client.list_models().await {
63 Ok(models) => {
64 println!("✅ 可用模型: {:?}", models);
65 }
66 Err(e) => {
67 println!("❌ 模型列表获取失败: {}", e);
68 }
69 }
70
71 println!("\n🎯 Gemini独立适配器特点:");
72 println!(" • 🔧 特殊API格式: contents数组 vs messages数组");
73 println!(" • 🔑 URL参数认证: ?key=<API_KEY> vs Authorization头");
74 println!(" • 📊 不同响应路径: candidates[0].content.parts[0].text");
75 println!(" • 🎭 角色映射: assistant → model");
76 println!(" • ⚙️ 配置字段: generationConfig vs 直接参数");
77
78 println!("\n🏗️ 混合架构验证:");
79 println!(" ✅ 独立适配器与配置驱动适配器共存");
80 println!(" ✅ 统一ChatApi接口,用户无感知差异");
81 println!(" ✅ 灵活处理特殊API格式和认证方式");
82
83 Ok(())
84}
Additional examples can be found in:
Sourcepub fn with_max_tokens(self, max_tokens: u32) -> Self
pub fn with_max_tokens(self, max_tokens: u32) -> Self
Examples found in repository?
examples/cohere_stream.rs (line 12)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Ensure COHERE_API_KEY env var is set if making real requests
7 let client = AiClient::new(Provider::Cohere)?;
8
9 let request = ChatCompletionRequest::new(
10 "command-xlarge-nightly".to_string(),
11 vec![Message { role: Role::User, content: "Write a haiku about rust programming".to_string() }]
12 ).with_temperature(0.7).with_max_tokens(60);
13
14 // List models
15 match client.list_models().await {
16 Ok(models) => println!("Models: {:?}", models),
17 Err(e) => eprintln!("Failed to list models: {}", e),
18 }
19
20 // Streaming
21 let mut stream = client.chat_completion_stream(request).await?;
22 while let Some(chunk) = stream.next().await {
23 match chunk {
24 Ok(c) => {
25 for choice in c.choices {
26 if let Some(delta) = choice.delta.content {
27 print!("{}", delta);
28 }
29 }
30 }
31 Err(e) => {
32 eprintln!("Stream error: {}", e);
33 break;
34 }
35 }
36 }
37
38 Ok(())
39}
More examples
examples/basic_usage.rs (line 24)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🚀 AI-lib Basic Usage Example");
6 println!("================================");
7
8 // 切换模型提供商,只需更改 Provider 的值
9 let client = AiClient::new(Provider::Groq)?;
10 println!("✅ Created client with provider: {:?}", client.current_provider());
11
12 // 获取支持的模型列表
13 let models = client.list_models().await?;
14 println!("📋 Available models: {:?}", models);
15
16 // 创建聊天请求
17 let request = ChatCompletionRequest::new(
18 "llama3-8b-8192".to_string(),
19 vec![Message {
20 role: Role::User,
21 content: "Hello! Please introduce yourself briefly.".to_string(),
22 }],
23 ).with_temperature(0.7)
24 .with_max_tokens(100);
25
26 println!("📤 Sending request to model: {}", request.model);
27
28 // 发送请求
29 let response = client.chat_completion(request).await?;
30
31 println!("📥 Received response:");
32 println!(" ID: {}", response.id);
33 println!(" Model: {}", response.model);
34 println!(" Content: {}", response.choices[0].message.content);
35 println!(" Usage: {} tokens", response.usage.total_tokens);
36
37 Ok(())
38}
examples/debug_request.rs (line 15)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🔍 调试请求格式");
6 println!("===============");
7
8 // 创建测试请求
9 let request = ChatCompletionRequest::new(
10 "gpt-3.5-turbo".to_string(),
11 vec![Message {
12 role: Role::User,
13 content: "Hello!".to_string(),
14 }],
15 ).with_max_tokens(10);
16
17 println!("📤 原始请求:");
18 println!(" 模型: {}", request.model);
19 println!(" 消息数量: {}", request.messages.len());
20 println!(" 消息[0]: {:?} - {}", request.messages[0].role, request.messages[0].content);
21 println!(" max_tokens: {:?}", request.max_tokens);
22
23 // 测试OpenAI
24 println!("\n🤖 测试OpenAI...");
25 match AiClient::new(Provider::OpenAI) {
26 Ok(client) => {
27 match client.chat_completion(request.clone()).await {
28 Ok(response) => {
29 println!("✅ 成功!");
30 println!(" 响应: {}", response.choices[0].message.content);
31 }
32 Err(e) => {
33 println!("❌ 失败: {}", e);
34
35 // 如果是400错误,说明请求格式有问题
36 if e.to_string().contains("400") {
37 println!(" 这通常表示请求格式不正确");
38 println!(" 让我们检查请求是否包含必要字段...");
39 }
40 }
41 }
42 }
43 Err(e) => println!("❌ 客户端创建失败: {}", e),
44 }
45
46 Ok(())
47}
examples/test_without_proxy.rs (line 23)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🌐 测试不使用代理的连接");
6 println!("======================");
7
8 // 临时移除代理设置
9 std::env::remove_var("AI_PROXY_URL");
10
11 println!("ℹ️ 已临时移除AI_PROXY_URL设置");
12
13 // 测试DeepSeek(国内可直连)
14 println!("\n🔍 测试DeepSeek (直连):");
15 match AiClient::new(Provider::DeepSeek) {
16 Ok(client) => {
17 let request = ChatCompletionRequest::new(
18 "deepseek-chat".to_string(),
19 vec![Message {
20 role: Role::User,
21 content: "Hello! Please respond with just 'Hi' to test.".to_string(),
22 }],
23 ).with_max_tokens(5);
24
25 match client.chat_completion(request).await {
26 Ok(response) => {
27 println!("✅ DeepSeek 直连成功!");
28 println!(" 响应: {}", response.choices[0].message.content);
29 println!(" Token使用: {}", response.usage.total_tokens);
30 }
31 Err(e) => {
32 println!("❌ DeepSeek 请求失败: {}", e);
33 if e.to_string().contains("402") {
34 println!(" (这是余额不足错误,说明连接正常)");
35 }
36 }
37 }
38 }
39 Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
40 }
41
42 println!("\n💡 结论:");
43 println!(" • DeepSeek可以直连,不需要代理");
44 println!(" • OpenAI和Groq需要通过代理访问");
45 println!(" • 代理可能会修改请求内容,导致格式错误");
46 println!(" • 建议检查代理服务器的配置");
47
48 Ok(())
49}
examples/compare_requests.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🔍 对比请求格式");
6 println!("================");
7
8 let request = ChatCompletionRequest::new(
9 "test-model".to_string(),
10 vec![Message {
11 role: Role::User,
12 content: "Hello!".to_string(),
13 }],
14 ).with_max_tokens(10);
15
16 println!("📤 测试请求:");
17 println!(" 模型: {}", request.model);
18 println!(" 消息: {:?}", request.messages[0]);
19 println!(" max_tokens: {:?}", request.max_tokens);
20
21 // 测试Groq (工作正常)
22 println!("\n🟢 Groq (工作正常):");
23 if let Ok(_groq_client) = AiClient::new(Provider::Groq) {
24 // Groq使用独立适配器,我们知道它工作正常
25 println!(" ✅ 使用独立适配器 (GroqAdapter)");
26 println!(" ✅ 请求格式正确");
27 }
28
29 // 测试OpenAI (有问题)
30 println!("\n🔴 OpenAI (有问题):");
31 if let Ok(_openai_client) = AiClient::new(Provider::OpenAI) {
32 println!(" ❌ 使用配置驱动适配器 (GenericAdapter)");
33 println!(" ❌ 请求格式错误: 'you must provide a model parameter'");
34 println!(" 🔍 可能的问题:");
35 println!(" - JSON序列化问题");
36 println!(" - 字段映射错误");
37 println!(" - 请求体构建错误");
38 }
39
40 println!("\n💡 解决方案:");
41 println!(" 1. 检查GenericAdapter的convert_request方法");
42 println!(" 2. 确保JSON字段名正确");
43 println!(" 3. 验证请求体结构");
44 println!(" 4. 考虑为OpenAI创建独立适配器");
45
46 // 建议的修复
47 println!("\n🔧 建议修复:");
48 println!(" 选项1: 修复GenericAdapter的请求转换逻辑");
49 println!(" 选项2: 为OpenAI创建独立适配器 (像Groq一样)");
50 println!(" 选项3: 添加更多调试信息来定位问题");
51
52 Ok(())
53}
examples/openai_test.rs (line 43)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🤖 OpenAI Provider 测试");
6 println!("=====================");
7
8 // 检查API密钥
9 match std::env::var("OPENAI_API_KEY") {
10 Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
11 Err(_) => {
12 println!("❌ 未设置 OPENAI_API_KEY 环境变量");
13 println!(" 请设置: export OPENAI_API_KEY=your_api_key");
14 return Ok(());
15 }
16 }
17
18 // 创建OpenAI客户端
19 let client = AiClient::new(Provider::OpenAI)?;
20 println!("✅ OpenAI客户端创建成功");
21
22 // 获取模型列表
23 println!("\n📋 获取OpenAI模型列表...");
24 match client.list_models().await {
25 Ok(models) => {
26 println!("✅ 成功获取 {} 个模型", models.len());
27 println!(" 常用模型:");
28 for model in models.iter().filter(|m| m.contains("gpt")) {
29 println!(" • {}", model);
30 }
31 }
32 Err(e) => println!("❌ 获取模型列表失败: {}", e),
33 }
34
35 // 测试聊天完成
36 println!("\n💬 测试聊天完成...");
37 let request = ChatCompletionRequest::new(
38 "gpt-3.5-turbo".to_string(),
39 vec![Message {
40 role: Role::User,
41 content: "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection.".to_string(),
42 }],
43 ).with_max_tokens(20)
44 .with_temperature(0.7);
45
46 match client.chat_completion(request).await {
47 Ok(response) => {
48 println!("✅ 聊天完成成功!");
49 println!(" 模型: {}", response.model);
50 println!(" 响应: {}", response.choices[0].message.content);
51 println!(" Token使用: {} (prompt: {}, completion: {})",
52 response.usage.total_tokens,
53 response.usage.prompt_tokens,
54 response.usage.completion_tokens
55 );
56 }
57 Err(e) => println!("❌ 聊天完成失败: {}", e),
58 }
59
60 println!("\n🎯 OpenAI配置驱动测试完成!");
61 println!(" 这证明了配置驱动架构的强大之处:");
62 println!(" • 无需编写OpenAI特定代码");
63 println!(" • 只需在ProviderConfigs中添加配置");
64 println!(" • 自动支持所有OpenAI兼容的功能");
65
66 Ok(())
67}
Additional examples can be found in:
- examples/test_all_providers.rs
- examples/test_https_proxy.rs
- examples/test_anthropic.rs
- examples/test_openai_specific.rs
- examples/test_streaming.rs
- examples/test_gemini.rs
- examples/test_groq_generic.rs
- examples/test_streaming_clean.rs
- examples/test_proxy_working.rs
- examples/test_all_config_driven.rs
- examples/test_retry_mechanism.rs
- examples/test_hybrid_architecture.rs
- examples/test_streaming_improved.rs
Trait Implementations§
Source§impl Clone for ChatCompletionRequest
impl Clone for ChatCompletionRequest
Source§fn clone(&self) -> ChatCompletionRequest
fn clone(&self) -> ChatCompletionRequest
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreSource§impl Debug for ChatCompletionRequest
impl Debug for ChatCompletionRequest
Source§impl<'de> Deserialize<'de> for ChatCompletionRequest
impl<'de> Deserialize<'de> for ChatCompletionRequest
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for ChatCompletionRequest
impl RefUnwindSafe for ChatCompletionRequest
impl Send for ChatCompletionRequest
impl Sync for ChatCompletionRequest
impl Unpin for ChatCompletionRequest
impl UnwindSafe for ChatCompletionRequest
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more