ChatCompletionRequest

Struct ChatCompletionRequest 

Source
pub struct ChatCompletionRequest {
    pub model: String,
    pub messages: Vec<Message>,
    pub temperature: Option<f32>,
    pub max_tokens: Option<u32>,
    pub stream: Option<bool>,
    pub top_p: Option<f32>,
    pub frequency_penalty: Option<f32>,
    pub presence_penalty: Option<f32>,
    pub functions: Option<Vec<Tool>>,
    pub function_call: Option<FunctionCallPolicy>,
}

Fields§

§model: String§messages: Vec<Message>§temperature: Option<f32>§max_tokens: Option<u32>§stream: Option<bool>§top_p: Option<f32>§frequency_penalty: Option<f32>§presence_penalty: Option<f32>§functions: Option<Vec<Tool>>

Optional function/tool definitions for Function Calling

§function_call: Option<FunctionCallPolicy>

Function call policy: “auto”/“none”/specific name

Implementations§

Source§

impl ChatCompletionRequest

Source

pub fn new(model: String, messages: Vec<Message>) -> Self

Examples found in repository?
examples/multimodal_example.rs (lines 10-21)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("Multimodal example: image + audio content in a message");
7
8    let _client = AiClient::new(Provider::Groq)?;
9
10    let request = ChatCompletionRequest::new(
11        "multimodal-model".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::new_image(
15                Some("https://example.com/dog.jpg".into()),
16                Some("image/jpeg".into()),
17                Some("dog.jpg".into()),
18            ),
19            function_call: None,
20        }],
21    );
22
23    println!(
24        "Prepared multimodal request; image URL: {}",
25        request.messages[0].content.as_text()
26    );
27
28    // Note: this example demonstrates the type usage only and does not call the API.
29    Ok(())
30}
More examples
Hide additional examples
examples/explicit_config.rs (lines 18-25)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("Explicit configuration example");
8    let opts = ConnectionOptions {
9        base_url: None,                                      // fallback to provider default
10        proxy: Some("http://proxy.example.com:8080".into()), // or None to use AI_PROXY_URL
11        api_key: None,                                       // rely on environment for now
12        timeout: Some(std::time::Duration::from_secs(40)),
13        disable_proxy: false,
14    };
15
16    let client = AiClient::with_options(Provider::Groq, opts)?;
17
18    let req = ChatCompletionRequest::new(
19        "llama3-8b-8192".to_string(),
20        vec![Message {
21            role: Role::User,
22            content: Content::Text("Ping from explicit config".into()),
23            function_call: None,
24        }],
25    );
26
27    // This may fail if GROQ_API_KEY not set; we only show structure.
28    match client.chat_completion(req).await {
29        Ok(resp) => println!("Response model: {}", resp.model),
30        Err(e) => println!(
31            "Request failed (expected in example without API key): {}",
32            e
33        ),
34    }
35    Ok(())
36}
examples/cohere_stream.rs (lines 10-17)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
examples/basic_usage.rs (lines 22-29)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
examples/config_driven_example.rs (lines 34-41)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 Config-driven AI-lib Example");
8    println!("================================");
9
10    // Demonstrate the advantages of config-driven approach: easy provider switching
11    let providers = vec![
12        (Provider::Groq, "Groq"),
13        (Provider::OpenAI, "OpenAI"),
14        (Provider::DeepSeek, "DeepSeek"),
15    ];
16
17    for (provider, name) in providers {
18        println!("\n📡 Testing Provider: {}", name);
19
20        // Create client - just change the enum value
21        let client = AiClient::new(provider)?;
22        println!(
23            "✅ Client created successfully: {:?}",
24            client.current_provider()
25        );
26
27        // Get model list
28        match client.list_models().await {
29            Ok(models) => println!("📋 Available models: {:?}", models),
30            Err(e) => println!("⚠️  Failed to get model list: {}", e),
31        }
32
33        // Create test request
34        let request = ChatCompletionRequest::new(
35            "test-model".to_string(),
36            vec![Message {
37                role: Role::User,
38                content: Content::Text("Hello from ai-lib!".to_string()),
39                function_call: None,
40            }],
41        );
42
43        println!("📤 Request prepared, model: {}", request.model);
44        println!("   (Need to set corresponding API_KEY environment variable for actual calls)");
45    }
46
47    println!("\n🎯 Core advantages of config-driven approach:");
48    println!("   • Zero-code switching: just change Provider enum value");
49    println!("   • Unified interface: all providers use the same API");
50    println!("   • Rapid expansion: add new compatible providers with just configuration");
51
52    Ok(())
53}
examples/debug_request.rs (lines 11-18)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔍 Debug Request Format");
8    println!("======================");
9
10    // Create test request
11    let request = ChatCompletionRequest::new(
12        "gpt-3.5-turbo".to_string(),
13        vec![Message {
14            role: Role::User,
15            content: Content::Text("Hello!".to_string()),
16            function_call: None,
17        }],
18    )
19    .with_max_tokens(10);
20
21    println!("📤 Original Request:");
22    println!("   Model: {}", request.model);
23    println!("   Message count: {}", request.messages.len());
24    println!(
25        "   Message[0]: {:?} - {}",
26        request.messages[0].role,
27        request.messages[0].content.as_text()
28    );
29    println!("   max_tokens: {:?}", request.max_tokens);
30
31    // Test OpenAI
32    println!("\n🤖 Testing OpenAI...");
33    match AiClient::new(Provider::OpenAI) {
34        Ok(client) => {
35            match client.chat_completion(request.clone()).await {
36                Ok(response) => {
37                    println!("✅ Success!");
38                    println!(
39                        "   Response: {}",
40                        response.choices[0].message.content.as_text()
41                    );
42                }
43                Err(e) => {
44                    println!("❌ Failed: {}", e);
45
46                    // If it's a 400 error, it indicates request format issues
47                    if e.to_string().contains("400") {
48                        println!("   This usually indicates incorrect request format");
49                        println!("   Let's check if the request contains necessary fields...");
50                    }
51                }
52            }
53        }
54        Err(e) => println!("❌ Client creation failed: {}", e),
55    }
56
57    Ok(())
58}
Source

pub fn with_temperature(self, temperature: f32) -> Self

Examples found in repository?
examples/cohere_stream.rs (line 18)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
More examples
Hide additional examples
examples/basic_usage.rs (line 30)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
examples/openai_test.rs (line 51)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🤖 OpenAI Provider Test");
8    println!("======================");
9
10    // Check API key
11    match std::env::var("OPENAI_API_KEY") {
12        Ok(_) => println!("✅ OPENAI_API_KEY detected"),
13        Err(_) => {
14            println!("❌ OPENAI_API_KEY environment variable not set");
15            println!("   Please set: export OPENAI_API_KEY=your_api_key");
16            return Ok(());
17        }
18    }
19
20    // Create OpenAI client
21    let client = AiClient::new(Provider::OpenAI)?;
22    println!("✅ OpenAI client created successfully");
23
24    // Get model list
25    println!("\n📋 Getting OpenAI model list...");
26    match client.list_models().await {
27        Ok(models) => {
28            println!("✅ Successfully got {} models", models.len());
29            println!("   Common models:");
30            for model in models.iter().filter(|m| m.contains("gpt")) {
31                println!("   • {}", model);
32            }
33        }
34        Err(e) => println!("❌ Failed to get model list: {}", e),
35    }
36
37    // Test chat completion
38    println!("\n💬 Testing chat completion...");
39    let request = ChatCompletionRequest::new(
40        "gpt-3.5-turbo".to_string(),
41        vec![Message {
42            role: Role::User,
43            content: Content::Text(
44                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
45                    .to_string(),
46            ),
47            function_call: None,
48        }],
49    )
50    .with_max_tokens(20)
51    .with_temperature(0.7);
52
53    match client.chat_completion(request).await {
54        Ok(response) => {
55            println!("✅ Chat completion successful!");
56            println!("   Model: {}", response.model);
57            println!(
58                "   Response: {}",
59                response.choices[0].message.content.as_text()
60            );
61            println!(
62                "   Token usage: {} (prompt: {}, completion: {})",
63                response.usage.total_tokens,
64                response.usage.prompt_tokens,
65                response.usage.completion_tokens
66            );
67        }
68        Err(e) => println!("❌ Chat completion failed: {}", e),
69    }
70
71    println!("\n🎯 OpenAI config-driven test completed!");
72    println!("   This demonstrates the power of config-driven architecture:");
73    println!("   • No need to write OpenAI-specific code");
74    println!("   • Just add configuration in ProviderConfigs");
75    println!("   • Automatically supports all OpenAI-compatible features");
76
77    Ok(())
78}
examples/function_call_openai.rs (line 36)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8    println!("🔧 OpenAI Function Calling example (ai-lib)");
9
10    // Ensure OPENAI_API_KEY is set in env before running
11    let client = AiClient::new(Provider::OpenAI)?;
12
13    // Build a simple user message
14    let user_msg = Message {
15        role: Role::User,
16        content: Content::Text("Please call the ascii_horse tool with size=3".to_string()),
17        function_call: None,
18    };
19
20    // Define a Tool (JSON Schema for parameters)
21    let ascii_horse_tool = Tool {
22        name: "ascii_horse".to_string(),
23        description: Some("Draws an ASCII horse of given size".to_string()),
24        parameters: Some(json!({
25            "type": "object",
26            "properties": {
27                "size": { "type": "integer", "description": "Size of the horse" }
28            },
29            "required": ["size"]
30        })),
31    };
32
33    let mut req = ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![user_msg]);
34    req.functions = Some(vec![ascii_horse_tool]);
35    req.function_call = Some(FunctionCallPolicy::Auto("auto".to_string()));
36    req = req.with_max_tokens(200).with_temperature(0.0);
37
38    println!("📤 Sending request to OpenAI (model={})", req.model);
39
40    let resp = client.chat_completion(req).await?;
41
42    // Handle a possible function call from the model: execute locally and send the result back
43    for choice in resp.choices {
44        let msg = choice.message;
45        if let Some(fc) = msg.function_call {
46            println!("🛠️  Model invoked function: {}", fc.name);
47            let args = fc.arguments.unwrap_or(serde_json::json!(null));
48            println!("   arguments: {}", args);
49
50            // Simple local tool: ascii_horse
51            if fc.name == "ascii_horse" {
52                // Parse size param
53                let size = args.get("size").and_then(|v| v.as_i64()).unwrap_or(3) as usize;
54                let horse = generate_ascii_horse(size);
55                println!("⚙️ Executed ascii_horse locally, output:\n{}", horse);
56
57                // Send follow-up message with tool result as assistant message
58                let tool_msg = Message {
59                    role: Role::Assistant,
60                    content: Content::Text(horse.clone()),
61                    function_call: None,
62                };
63
64                let mut followup =
65                    ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![tool_msg]);
66                followup = followup.with_max_tokens(200).with_temperature(0.0);
67                let follow_resp = client.chat_completion(followup).await?;
68                for fc_choice in follow_resp.choices {
69                    println!(
70                        "🗨️ Final model response: {}",
71                        fc_choice.message.content.as_text()
72                    );
73                }
74            }
75        } else {
76            println!("💬 Model message: {}", msg.content.as_text());
77        }
78    }
79
80    Ok(())
81}
examples/test_streaming.rs (line 32)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🌊 流式响应测试");
8    println!("================");
9
10    // 检查Groq API密钥
11    if std::env::var("GROQ_API_KEY").is_err() {
12        println!("❌ 未设置GROQ_API_KEY");
13        return Ok(());
14    }
15
16    // 创建Groq客户端
17    let client = AiClient::new(Provider::Groq)?;
18    println!("✅ Groq客户端创建成功");
19
20    // 创建流式请求
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text(
26                "Please write a short poem about AI in exactly 4 lines.".to_string(),
27            ),
28            function_call: None,
29        }],
30    )
31    .with_max_tokens(100)
32    .with_temperature(0.7);
33
34    println!("\n📤 发送流式请求...");
35    println!("   模型: {}", request.model);
36    println!("   消息: {}", request.messages[0].content.as_text());
37
38    // 获取流式响应
39    match client.chat_completion_stream(request).await {
40        Ok(mut stream) => {
41            println!("\n🌊 开始接收流式响应:");
42            println!("{}", "─".repeat(50));
43
44            let mut full_content = String::new();
45            let mut chunk_count = 0;
46
47            while let Some(result) = stream.next().await {
48                match result {
49                    Ok(chunk) => {
50                        chunk_count += 1;
51
52                        if let Some(choice) = chunk.choices.first() {
53                            if let Some(content) = &choice.delta.content {
54                                print!("{}", content);
55                                full_content.push_str(content);
56
57                                // 刷新输出
58                                use std::io::{self, Write};
59                                io::stdout().flush().unwrap();
60                            }
61
62                            // 检查是否完成
63                            if choice.finish_reason.is_some() {
64                                println!("\n{}", "─".repeat(50));
65                                println!("✅ 流式响应完成!");
66                                println!("   完成原因: {:?}", choice.finish_reason);
67                                break;
68                            }
69                        }
70                    }
71                    Err(e) => {
72                        println!("\n❌ 流式响应错误: {}", e);
73                        break;
74                    }
75                }
76            }
77
78            println!("\n📊 流式响应统计:");
79            println!("   数据块数量: {}", chunk_count);
80            println!("   总内容长度: {} 字符", full_content.len());
81            println!("   完整内容: \"{}\"", full_content.trim());
82        }
83        Err(e) => {
84            println!("❌ 流式请求失败: {}", e);
85        }
86    }
87
88    println!("\n💡 流式响应的优势:");
89    println!("   • 实时显示生成内容");
90    println!("   • 更好的用户体验");
91    println!("   • 可以提前停止生成");
92    println!("   • 适合长文本生成");
93
94    Ok(())
95}
examples/test_openai_specific.rs (line 65)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🤖 OpenAI 专项测试");
7    println!("==================");
8
9    // 检查OpenAI API密钥
10    match std::env::var("OPENAI_API_KEY") {
11        Ok(key) => {
12            let masked = format!("{}...{}", &key[..8], &key[key.len() - 4..]);
13            println!("🔑 OpenAI API Key: {}", masked);
14        }
15        Err(_) => {
16            println!("❌ 未设置OPENAI_API_KEY");
17            return Ok(());
18        }
19    }
20
21    // 创建OpenAI客户端
22    println!("\n📡 创建OpenAI客户端...");
23    let client = match AiClient::new(Provider::OpenAI) {
24        Ok(client) => {
25            println!("✅ 客户端创建成功");
26            client
27        }
28        Err(e) => {
29            println!("❌ 客户端创建失败: {}", e);
30            return Ok(());
31        }
32    };
33
34    // 测试模型列表
35    println!("\n📋 获取模型列表...");
36    match client.list_models().await {
37        Ok(models) => {
38            println!("✅ 成功获取 {} 个模型", models.len());
39
40            // 显示GPT模型
41            let gpt_models: Vec<_> = models
42                .iter()
43                .filter(|m| m.contains("gpt"))
44                .take(5)
45                .collect();
46            println!("   GPT模型: {:?}", gpt_models);
47        }
48        Err(e) => {
49            println!("❌ 获取模型列表失败: {}", e);
50            return Ok(());
51        }
52    }
53
54    // 测试聊天完成
55    println!("\n💬 测试聊天完成...");
56    let request = ChatCompletionRequest::new(
57        "gpt-3.5-turbo".to_string(),
58        vec![Message {
59            role: Role::User,
60            content: Content::Text("Summarize the following text: ...".to_string()),
61            function_call: None,
62        }],
63    )
64    .with_max_tokens(20)
65    .with_temperature(0.0); // 使用0温度确保一致性
66
67    match client.chat_completion(request).await {
68        Ok(response) => {
69            println!("✅ 聊天完成成功!");
70            println!("   模型: {}", response.model);
71            println!(
72                "   响应: '{}'",
73                response.choices[0].message.content.as_text()
74            );
75            println!(
76                "   Token使用: {} (prompt: {}, completion: {})",
77                response.usage.total_tokens,
78                response.usage.prompt_tokens,
79                response.usage.completion_tokens
80            );
81            println!("   完成原因: {:?}", response.choices[0].finish_reason);
82        }
83        Err(e) => {
84            println!("❌ 聊天完成失败: {}", e);
85
86            // 分析错误类型
87            let error_str = e.to_string();
88            if error_str.contains("400") {
89                println!("   → 这是请求格式错误");
90            } else if error_str.contains("401") {
91                println!("   → 这是认证错误,检查API密钥");
92            } else if error_str.contains("429") {
93                println!("   → 这是速率限制错误");
94            } else if error_str.contains("500") {
95                println!("   → 这是服务器错误");
96            }
97        }
98    }
99
100    println!("\n🎯 OpenAI测试完成!");
101
102    Ok(())
103}
Source

pub fn with_max_tokens(self, max_tokens: u32) -> Self

Examples found in repository?
examples/cohere_stream.rs (line 19)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    // Ensure COHERE_API_KEY env var is set if making real requests
8    let client = AiClient::new(Provider::Cohere)?;
9
10    let request = ChatCompletionRequest::new(
11        "command-xlarge-nightly".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Write a haiku about rust programming".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_temperature(0.7)
19    .with_max_tokens(60);
20
21    // List models
22    match client.list_models().await {
23        Ok(models) => println!("Models: {:?}", models),
24        Err(e) => eprintln!("Failed to list models: {}", e),
25    }
26
27    // Streaming
28    let mut stream = client.chat_completion_stream(request).await?;
29    while let Some(chunk) = stream.next().await {
30        match chunk {
31            Ok(c) => {
32                for choice in c.choices {
33                    if let Some(delta) = choice.delta.content {
34                        print!("{}", delta);
35                    }
36                }
37            }
38            Err(e) => {
39                eprintln!("Stream error: {}", e);
40                break;
41            }
42        }
43    }
44
45    Ok(())
46}
More examples
Hide additional examples
examples/basic_usage.rs (line 31)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🚀 AI-lib Basic Usage Example");
8    println!("================================");
9
10    // Switch model provider by changing Provider value
11    let client = AiClient::new(Provider::Groq)?;
12    println!(
13        "✅ Created client with provider: {:?}",
14        client.current_provider()
15    );
16
17    // Get list of supported models
18    let models = client.list_models().await?;
19    println!("📋 Available models: {:?}", models);
20
21    // Create chat request
22    let request = ChatCompletionRequest::new(
23        "llama3-8b-8192".to_string(),
24        vec![Message {
25            role: Role::User,
26            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27            function_call: None,
28        }],
29    )
30    .with_temperature(0.7)
31    .with_max_tokens(100);
32
33    println!("📤 Sending request to model: {}", request.model);
34
35    // Send request
36    let response = client.chat_completion(request).await?;
37
38    println!("📥 Received response:");
39    println!("   ID: {}", response.id);
40    println!("   Model: {}", response.model);
41    println!(
42        "   Content: {}",
43        response.choices[0].message.content.as_text()
44    );
45    println!("   Usage: {} tokens", response.usage.total_tokens);
46
47    Ok(())
48}
examples/debug_request.rs (line 19)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔍 Debug Request Format");
8    println!("======================");
9
10    // Create test request
11    let request = ChatCompletionRequest::new(
12        "gpt-3.5-turbo".to_string(),
13        vec![Message {
14            role: Role::User,
15            content: Content::Text("Hello!".to_string()),
16            function_call: None,
17        }],
18    )
19    .with_max_tokens(10);
20
21    println!("📤 Original Request:");
22    println!("   Model: {}", request.model);
23    println!("   Message count: {}", request.messages.len());
24    println!(
25        "   Message[0]: {:?} - {}",
26        request.messages[0].role,
27        request.messages[0].content.as_text()
28    );
29    println!("   max_tokens: {:?}", request.max_tokens);
30
31    // Test OpenAI
32    println!("\n🤖 Testing OpenAI...");
33    match AiClient::new(Provider::OpenAI) {
34        Ok(client) => {
35            match client.chat_completion(request.clone()).await {
36                Ok(response) => {
37                    println!("✅ Success!");
38                    println!(
39                        "   Response: {}",
40                        response.choices[0].message.content.as_text()
41                    );
42                }
43                Err(e) => {
44                    println!("❌ Failed: {}", e);
45
46                    // If it's a 400 error, it indicates request format issues
47                    if e.to_string().contains("400") {
48                        println!("   This usually indicates incorrect request format");
49                        println!("   Let's check if the request contains necessary fields...");
50                    }
51                }
52            }
53        }
54        Err(e) => println!("❌ Client creation failed: {}", e),
55    }
56
57    Ok(())
58}
examples/test_without_proxy.rs (line 28)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("🌐 测试不使用代理的连接");
7    println!("======================");
8
9    // 临时移除代理设置
10    std::env::remove_var("AI_PROXY_URL");
11
12    println!("ℹ️  已临时移除AI_PROXY_URL设置");
13
14    // 测试DeepSeek(国内可直连)
15    println!("\n🔍 测试DeepSeek (直连):");
16    match AiClient::new(Provider::DeepSeek) {
17        Ok(client) => {
18            let request = ChatCompletionRequest::new(
19                "deepseek-chat".to_string(),
20                vec![Message {
21                    role: Role::User,
22                    content: Content::Text(
23                        "Hello! Please respond with just 'Hi' to test.".to_string(),
24                    ),
25                    function_call: None,
26                }],
27            )
28            .with_max_tokens(5);
29
30            match client.chat_completion(request).await {
31                Ok(response) => {
32                    println!("✅ DeepSeek 直连成功!");
33                    println!("   响应: {}", response.choices[0].message.content.as_text());
34                    println!("   Token使用: {}", response.usage.total_tokens);
35                }
36                Err(e) => {
37                    println!("❌ DeepSeek 请求失败: {}", e);
38                    if e.to_string().contains("402") {
39                        println!("   (这是余额不足错误,说明连接正常)");
40                    }
41                }
42            }
43        }
44        Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
45    }
46
47    println!("\n💡 结论:");
48    println!("   • DeepSeek可以直连,不需要代理");
49    println!("   • OpenAI和Groq需要通过代理访问");
50    println!("   • 代理可能会修改请求内容,导致格式错误");
51    println!("   • 建议检查代理服务器的配置");
52
53    Ok(())
54}
examples/compare_requests.rs (line 18)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🔍 Compare Request Formats");
8    println!("==========================");
9
10    let request = ChatCompletionRequest::new(
11        "test-model".to_string(),
12        vec![Message {
13            role: Role::User,
14            content: Content::Text("Hello!".to_string()),
15            function_call: None,
16        }],
17    )
18    .with_max_tokens(10);
19
20    println!("📤 Test Request:");
21    println!("   Model: {}", request.model);
22    println!("   Message: {:?}", request.messages[0]);
23    println!("   max_tokens: {:?}", request.max_tokens);
24
25    // Test Groq (working normally)
26    println!("\n🟢 Groq (working normally):");
27    if let Ok(_groq_client) = AiClient::new(Provider::Groq) {
28        // Groq uses independent adapter, we know it works normally
29        println!("   ✅ Uses independent adapter (GroqAdapter)");
30        println!("   ✅ Request format correct");
31    }
32
33    // Test OpenAI (has issues)
34    println!("\n🔴 OpenAI (has issues):");
35    if let Ok(_openai_client) = AiClient::new(Provider::OpenAI) {
36        println!("   ❌ Uses config-driven adapter (GenericAdapter)");
37        println!("   ❌ Request format error: 'you must provide a model parameter'");
38        println!("   🔍 Possible issues:");
39        println!("      - JSON serialization problem");
40        println!("      - Field mapping error");
41        println!("      - Request body construction error");
42    }
43
44    println!("\n💡 Solutions:");
45    println!("   1. Check GenericAdapter's convert_request method");
46    println!("   2. Ensure JSON field names are correct");
47    println!("   3. Verify request body structure");
48    println!("   4. Consider creating independent adapter for OpenAI");
49
50    // Suggested fixes
51    println!("\n🔧 Suggested Fixes:");
52    println!("   Option 1: Fix GenericAdapter's request conversion logic");
53    println!("   Option 2: Create independent adapter for OpenAI (like Groq)");
54    println!("   Option 3: Add more debug information to locate the issue");
55
56    Ok(())
57}
examples/openai_test.rs (line 50)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7    println!("🤖 OpenAI Provider Test");
8    println!("======================");
9
10    // Check API key
11    match std::env::var("OPENAI_API_KEY") {
12        Ok(_) => println!("✅ OPENAI_API_KEY detected"),
13        Err(_) => {
14            println!("❌ OPENAI_API_KEY environment variable not set");
15            println!("   Please set: export OPENAI_API_KEY=your_api_key");
16            return Ok(());
17        }
18    }
19
20    // Create OpenAI client
21    let client = AiClient::new(Provider::OpenAI)?;
22    println!("✅ OpenAI client created successfully");
23
24    // Get model list
25    println!("\n📋 Getting OpenAI model list...");
26    match client.list_models().await {
27        Ok(models) => {
28            println!("✅ Successfully got {} models", models.len());
29            println!("   Common models:");
30            for model in models.iter().filter(|m| m.contains("gpt")) {
31                println!("   • {}", model);
32            }
33        }
34        Err(e) => println!("❌ Failed to get model list: {}", e),
35    }
36
37    // Test chat completion
38    println!("\n💬 Testing chat completion...");
39    let request = ChatCompletionRequest::new(
40        "gpt-3.5-turbo".to_string(),
41        vec![Message {
42            role: Role::User,
43            content: Content::Text(
44                "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
45                    .to_string(),
46            ),
47            function_call: None,
48        }],
49    )
50    .with_max_tokens(20)
51    .with_temperature(0.7);
52
53    match client.chat_completion(request).await {
54        Ok(response) => {
55            println!("✅ Chat completion successful!");
56            println!("   Model: {}", response.model);
57            println!(
58                "   Response: {}",
59                response.choices[0].message.content.as_text()
60            );
61            println!(
62                "   Token usage: {} (prompt: {}, completion: {})",
63                response.usage.total_tokens,
64                response.usage.prompt_tokens,
65                response.usage.completion_tokens
66            );
67        }
68        Err(e) => println!("❌ Chat completion failed: {}", e),
69    }
70
71    println!("\n🎯 OpenAI config-driven test completed!");
72    println!("   This demonstrates the power of config-driven architecture:");
73    println!("   • No need to write OpenAI-specific code");
74    println!("   • Just add configuration in ProviderConfigs");
75    println!("   • Automatically supports all OpenAI-compatible features");
76
77    Ok(())
78}
Source

pub fn ignore_previous(self) -> Self

Drop previous conversational messages while keeping system messages and the last non-system message. Useful to reset context while preserving system instructions.

Trait Implementations§

Source§

impl Clone for ChatCompletionRequest

Source§

fn clone(&self) -> ChatCompletionRequest

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ChatCompletionRequest

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<'de> Deserialize<'de> for ChatCompletionRequest

Source§

fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>
where __D: Deserializer<'de>,

Deserialize this value from the given Serde deserializer. Read more
Source§

impl Serialize for ChatCompletionRequest

Source§

fn serialize<__S>(&self, __serializer: __S) -> Result<__S::Ok, __S::Error>
where __S: Serializer,

Serialize this value into the given Serde serializer. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> DeserializeOwned for T
where T: for<'de> Deserialize<'de>,

Source§

impl<T> ErasedDestructor for T
where T: 'static,