pub enum Content {
Text(String),
Json(Value),
Image {
url: Option<String>,
mime: Option<String>,
name: Option<String>,
},
Audio {
url: Option<String>,
mime: Option<String>,
},
}
Expand description
Message content — moved to an enum to support multimodal and structured content
Variants§
Text(String)
Json(Value)
Generic JSON content for structured payloads (e.g. function call args)
Image
Reference to an image (url) or metadata; adapters may upload or inline as needed
Audio
Reference to audio content
Implementations§
Source§impl Content
impl Content
Sourcepub fn as_text(&self) -> String
pub fn as_text(&self) -> String
Return a best-effort textual representation for legacy code paths
Examples found in repository?
examples/multimodal_example.rs (line 25)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("Multimodal example: image + audio content in a message");
7
8 let _client = AiClient::new(Provider::Groq)?;
9
10 let request = ChatCompletionRequest::new(
11 "multimodal-model".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::new_image(
15 Some("https://example.com/dog.jpg".into()),
16 Some("image/jpeg".into()),
17 Some("dog.jpg".into()),
18 ),
19 function_call: None,
20 }],
21 );
22
23 println!(
24 "Prepared multimodal request; image URL: {}",
25 request.messages[0].content.as_text()
26 );
27
28 // Note: this example demonstrates the type usage only and does not call the API.
29 Ok(())
30}
More examples
examples/basic_usage.rs (line 43)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Basic Usage Example");
8 println!("================================");
9
10 // Switch model provider by changing Provider value
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Get list of supported models
18 let models = client.list_models().await?;
19 println!("📋 Available models: {:?}", models);
20
21 // Create chat request
22 let request = ChatCompletionRequest::new(
23 "llama3-8b-8192".to_string(),
24 vec![Message {
25 role: Role::User,
26 content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27 function_call: None,
28 }],
29 )
30 .with_temperature(0.7)
31 .with_max_tokens(100);
32
33 println!("📤 Sending request to model: {}", request.model);
34
35 // Send request
36 let response = client.chat_completion(request).await?;
37
38 println!("📥 Received response:");
39 println!(" ID: {}", response.id);
40 println!(" Model: {}", response.model);
41 println!(
42 " Content: {}",
43 response.choices[0].message.content.as_text()
44 );
45 println!(" Usage: {} tokens", response.usage.total_tokens);
46
47 Ok(())
48}
examples/debug_request.rs (line 27)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🔍 Debug Request Format");
8 println!("======================");
9
10 // Create test request
11 let request = ChatCompletionRequest::new(
12 "gpt-3.5-turbo".to_string(),
13 vec![Message {
14 role: Role::User,
15 content: Content::Text("Hello!".to_string()),
16 function_call: None,
17 }],
18 )
19 .with_max_tokens(10);
20
21 println!("📤 Original Request:");
22 println!(" Model: {}", request.model);
23 println!(" Message count: {}", request.messages.len());
24 println!(
25 " Message[0]: {:?} - {}",
26 request.messages[0].role,
27 request.messages[0].content.as_text()
28 );
29 println!(" max_tokens: {:?}", request.max_tokens);
30
31 // Test OpenAI
32 println!("\n🤖 Testing OpenAI...");
33 match AiClient::new(Provider::OpenAI) {
34 Ok(client) => {
35 match client.chat_completion(request.clone()).await {
36 Ok(response) => {
37 println!("✅ Success!");
38 println!(
39 " Response: {}",
40 response.choices[0].message.content.as_text()
41 );
42 }
43 Err(e) => {
44 println!("❌ Failed: {}", e);
45
46 // If it's a 400 error, it indicates request format issues
47 if e.to_string().contains("400") {
48 println!(" This usually indicates incorrect request format");
49 println!(" Let's check if the request contains necessary fields...");
50 }
51 }
52 }
53 }
54 Err(e) => println!("❌ Client creation failed: {}", e),
55 }
56
57 Ok(())
58}
examples/test_without_proxy.rs (line 33)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("🌐 测试不使用代理的连接");
7 println!("======================");
8
9 // 临时移除代理设置
10 std::env::remove_var("AI_PROXY_URL");
11
12 println!("ℹ️ 已临时移除AI_PROXY_URL设置");
13
14 // 测试DeepSeek(国内可直连)
15 println!("\n🔍 测试DeepSeek (直连):");
16 match AiClient::new(Provider::DeepSeek) {
17 Ok(client) => {
18 let request = ChatCompletionRequest::new(
19 "deepseek-chat".to_string(),
20 vec![Message {
21 role: Role::User,
22 content: Content::Text(
23 "Hello! Please respond with just 'Hi' to test.".to_string(),
24 ),
25 function_call: None,
26 }],
27 )
28 .with_max_tokens(5);
29
30 match client.chat_completion(request).await {
31 Ok(response) => {
32 println!("✅ DeepSeek 直连成功!");
33 println!(" 响应: {}", response.choices[0].message.content.as_text());
34 println!(" Token使用: {}", response.usage.total_tokens);
35 }
36 Err(e) => {
37 println!("❌ DeepSeek 请求失败: {}", e);
38 if e.to_string().contains("402") {
39 println!(" (这是余额不足错误,说明连接正常)");
40 }
41 }
42 }
43 }
44 Err(e) => println!("❌ DeepSeek 客户端创建失败: {}", e),
45 }
46
47 println!("\n💡 结论:");
48 println!(" • DeepSeek可以直连,不需要代理");
49 println!(" • OpenAI和Groq需要通过代理访问");
50 println!(" • 代理可能会修改请求内容,导致格式错误");
51 println!(" • 建议检查代理服务器的配置");
52
53 Ok(())
54}
examples/ascii_horse.rs (line 87)
24async fn main() -> Result<(), Box<dyn std::error::Error>> {
25 println!("Example: Function Calling with a local tool 'ascii_horse'");
26
27 // Define the tool
28 let tool = Tool {
29 name: "ascii_horse".to_string(),
30 description: Some("Return an ASCII art horse. Accepts size: 'small'|'large'.".to_string()),
31 parameters: Some(json!({
32 "type": "object",
33 "properties": {
34 "size": { "type": "string", "enum": ["small", "large"] }
35 },
36 "required": ["size"]
37 })),
38 };
39
40 // Build a request that offers the tool to the model
41 let mut request = ChatCompletionRequest::new(
42 "example-model".to_string(),
43 vec![Message {
44 role: Role::User,
45 content: Content::new_text("Please draw an ASCII horse for me."),
46 function_call: None,
47 }],
48 );
49 request.functions = Some(vec![tool.clone()]);
50 request.function_call = Some(FunctionCallPolicy::Auto("ascii_horse".to_string()));
51
52 println!(
53 "Prepared request with functions: {}",
54 serde_json::to_string_pretty(&request.functions).unwrap()
55 );
56
57 // Simulate model returning a function call (in a real run this would come from the provider)
58 let simulated_call = FunctionCall {
59 name: "ascii_horse".to_string(),
60 arguments: Some(json!({ "size": "small" })),
61 };
62 println!(
63 "Model requested function call: {}",
64 serde_json::to_string_pretty(&simulated_call).unwrap()
65 );
66
67 // Execute the local tool
68 let size_arg = simulated_call
69 .arguments
70 .as_ref()
71 .and_then(|v| v.get("size"))
72 .and_then(|s| s.as_str())
73 .unwrap_or("small");
74
75 let tool_output = ascii_horse(size_arg);
76
77 // Convert tool output to a Message and append to conversation
78 let tool_message = Message {
79 role: Role::Assistant,
80 content: Content::new_text(tool_output.clone()),
81 function_call: None,
82 };
83
84 // In a normal flow you'd send the updated messages back to the model to continue the conversation.
85 println!(
86 "Tool output (appended as assistant message):\n\n{}\n",
87 tool_message.content.as_text()
88 );
89
90 Ok(())
91}
examples/proxy_example.rs (line 41)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🌐 AI-lib Proxy Server Support Example");
8 println!("=====================================");
9
10 // Check proxy configuration
11 match std::env::var("AI_PROXY_URL") {
12 Ok(proxy_url) => {
13 println!("✅ Proxy configuration detected: {}", proxy_url);
14 println!(" All HTTP requests will go through this proxy server");
15 }
16 Err(_) => {
17 println!("ℹ️ AI_PROXY_URL environment variable not set");
18 println!(" To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19 }
20 }
21
22 println!("\n🚀 Creating AI client...");
23 let client = AiClient::new(Provider::Groq)?;
24 println!(
25 "✅ Client created successfully, provider: {:?}",
26 client.current_provider()
27 );
28
29 // Create test request
30 let request = ChatCompletionRequest::new(
31 "llama3-8b-8192".to_string(),
32 vec![Message {
33 role: Role::User,
34 content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35 function_call: None,
36 }],
37 );
38
39 println!("\n📤 Preparing to send request...");
40 println!(" Model: {}", request.model);
41 println!(" Message: {}", request.messages[0].content.as_text());
42
43 // Get model list (this request will also go through proxy)
44 match client.list_models().await {
45 Ok(models) => {
46 println!("\n📋 Model list obtained through proxy:");
47 for model in models {
48 println!(" • {}", model);
49 }
50 }
51 Err(e) => {
52 println!("\n⚠️ Failed to get model list: {}", e);
53 println!(" This may be due to:");
54 println!(" • GROQ_API_KEY environment variable not set");
55 println!(" • Proxy server configuration error");
56 println!(" • Network connection issue");
57 }
58 }
59
60 println!("\n💡 Proxy Configuration Instructions:");
61 println!(" • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62 println!(" • Supports HTTP and HTTPS proxies");
63 println!(" • Supports authenticated proxies: http://user:pass@proxy:port");
64 println!(" • All AI providers will automatically use this proxy configuration");
65
66 Ok(())
67}
Additional examples can be found in:
- examples/openai_test.rs
- examples/test_all_providers.rs
- examples/function_call_openai.rs
- examples/test_https_proxy.rs
- examples/test_anthropic.rs
- examples/test_streaming.rs
- examples/test_openai_specific.rs
- examples/test_gemini.rs
- examples/test_groq_generic.rs
- examples/test_streaming_clean.rs
- examples/test_proxy_working.rs
- examples/test_retry_mechanism.rs
- examples/batch_processing.rs
- examples/test_all_config_driven.rs
- examples/test_hybrid_architecture.rs
Sourcepub fn new_text<S: Into<String>>(s: S) -> Self
pub fn new_text<S: Into<String>>(s: S) -> Self
Convenience constructor for text content
Examples found in repository?
examples/ascii_horse.rs (line 45)
24async fn main() -> Result<(), Box<dyn std::error::Error>> {
25 println!("Example: Function Calling with a local tool 'ascii_horse'");
26
27 // Define the tool
28 let tool = Tool {
29 name: "ascii_horse".to_string(),
30 description: Some("Return an ASCII art horse. Accepts size: 'small'|'large'.".to_string()),
31 parameters: Some(json!({
32 "type": "object",
33 "properties": {
34 "size": { "type": "string", "enum": ["small", "large"] }
35 },
36 "required": ["size"]
37 })),
38 };
39
40 // Build a request that offers the tool to the model
41 let mut request = ChatCompletionRequest::new(
42 "example-model".to_string(),
43 vec![Message {
44 role: Role::User,
45 content: Content::new_text("Please draw an ASCII horse for me."),
46 function_call: None,
47 }],
48 );
49 request.functions = Some(vec![tool.clone()]);
50 request.function_call = Some(FunctionCallPolicy::Auto("ascii_horse".to_string()));
51
52 println!(
53 "Prepared request with functions: {}",
54 serde_json::to_string_pretty(&request.functions).unwrap()
55 );
56
57 // Simulate model returning a function call (in a real run this would come from the provider)
58 let simulated_call = FunctionCall {
59 name: "ascii_horse".to_string(),
60 arguments: Some(json!({ "size": "small" })),
61 };
62 println!(
63 "Model requested function call: {}",
64 serde_json::to_string_pretty(&simulated_call).unwrap()
65 );
66
67 // Execute the local tool
68 let size_arg = simulated_call
69 .arguments
70 .as_ref()
71 .and_then(|v| v.get("size"))
72 .and_then(|s| s.as_str())
73 .unwrap_or("small");
74
75 let tool_output = ascii_horse(size_arg);
76
77 // Convert tool output to a Message and append to conversation
78 let tool_message = Message {
79 role: Role::Assistant,
80 content: Content::new_text(tool_output.clone()),
81 function_call: None,
82 };
83
84 // In a normal flow you'd send the updated messages back to the model to continue the conversation.
85 println!(
86 "Tool output (appended as assistant message):\n\n{}\n",
87 tool_message.content.as_text()
88 );
89
90 Ok(())
91}
Sourcepub fn new_image(
url: Option<String>,
mime: Option<String>,
name: Option<String>,
) -> Self
pub fn new_image( url: Option<String>, mime: Option<String>, name: Option<String>, ) -> Self
Convenience constructor for image content
Examples found in repository?
examples/multimodal_example.rs (lines 14-18)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("Multimodal example: image + audio content in a message");
7
8 let _client = AiClient::new(Provider::Groq)?;
9
10 let request = ChatCompletionRequest::new(
11 "multimodal-model".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::new_image(
15 Some("https://example.com/dog.jpg".into()),
16 Some("image/jpeg".into()),
17 Some("dog.jpg".into()),
18 ),
19 function_call: None,
20 }],
21 );
22
23 println!(
24 "Prepared multimodal request; image URL: {}",
25 request.messages[0].content.as_text()
26 );
27
28 // Note: this example demonstrates the type usage only and does not call the API.
29 Ok(())
30}
Trait Implementations§
Source§impl<'de> Deserialize<'de> for Content
impl<'de> Deserialize<'de> for Content
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for Content
impl RefUnwindSafe for Content
impl Send for Content
impl Sync for Content
impl Unpin for Content
impl UnwindSafe for Content
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more