pub enum Content {
Text(String),
Json(Value),
Image {
url: Option<String>,
mime: Option<String>,
name: Option<String>,
},
Audio {
url: Option<String>,
mime: Option<String>,
},
}
Expand description
Message content — moved to an enum to support multimodal and structured content
Variants§
Text(String)
Json(Value)
Generic JSON content for structured payloads (e.g. function call args)
Image
Reference to an image (url) or metadata; adapters may upload or inline as needed
Audio
Reference to audio content
Implementations§
Source§impl Content
impl Content
Sourcepub fn as_text(&self) -> String
pub fn as_text(&self) -> String
Return a best-effort textual representation for legacy code paths
Examples found in repository?
examples/multimodal_example.rs (line 25)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("Multimodal example: image + audio content in a message");
7
8 let _client = AiClient::new(Provider::Groq)?;
9
10 let request = ChatCompletionRequest::new(
11 "multimodal-model".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::new_image(
15 Some("https://example.com/dog.jpg".into()),
16 Some("image/jpeg".into()),
17 Some("dog.jpg".into()),
18 ),
19 function_call: None,
20 }],
21 );
22
23 println!(
24 "Prepared multimodal request; image URL: {}",
25 request.messages[0].content.as_text()
26 );
27
28 // Note: this example demonstrates the type usage only and does not call the API.
29 Ok(())
30}
More examples
examples/basic_usage.rs (line 43)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🚀 AI-lib Basic Usage Example");
8 println!("================================");
9
10 // Switch model provider by changing Provider value
11 let client = AiClient::new(Provider::Groq)?;
12 println!(
13 "✅ Created client with provider: {:?}",
14 client.current_provider()
15 );
16
17 // Get list of supported models
18 let models = client.list_models().await?;
19 println!("📋 Available models: {:?}", models);
20
21 // Create chat request
22 let request = ChatCompletionRequest::new(
23 "llama3-8b-8192".to_string(),
24 vec![Message {
25 role: Role::User,
26 content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
27 function_call: None,
28 }],
29 )
30 .with_temperature(0.7)
31 .with_max_tokens(100);
32
33 println!("📤 Sending request to model: {}", request.model);
34
35 // Send request
36 let response = client.chat_completion(request).await?;
37
38 println!("📥 Received response:");
39 println!(" ID: {}", response.id);
40 println!(" Model: {}", response.model);
41 println!(
42 " Content: {}",
43 response.choices[0].message.content.as_text()
44 );
45 println!(" Usage: {} tokens", response.usage.total_tokens);
46
47 Ok(())
48}
examples/ascii_horse.rs (line 87)
24async fn main() -> Result<(), Box<dyn std::error::Error>> {
25 println!("Example: Function Calling with a local tool 'ascii_horse'");
26
27 // Define the tool
28 let tool = Tool {
29 name: "ascii_horse".to_string(),
30 description: Some("Return an ASCII art horse. Accepts size: 'small'|'large'.".to_string()),
31 parameters: Some(json!({
32 "type": "object",
33 "properties": {
34 "size": { "type": "string", "enum": ["small", "large"] }
35 },
36 "required": ["size"]
37 })),
38 };
39
40 // Build a request that offers the tool to the model
41 let mut request = ChatCompletionRequest::new(
42 "example-model".to_string(),
43 vec![Message {
44 role: Role::User,
45 content: Content::new_text("Please draw an ASCII horse for me."),
46 function_call: None,
47 }],
48 );
49 request.functions = Some(vec![tool.clone()]);
50 request.function_call = Some(FunctionCallPolicy::Auto("ascii_horse".to_string()));
51
52 println!(
53 "Prepared request with functions: {}",
54 serde_json::to_string_pretty(&request.functions).unwrap()
55 );
56
57 // Simulate model returning a function call (in a real run this would come from the provider)
58 let simulated_call = FunctionCall {
59 name: "ascii_horse".to_string(),
60 arguments: Some(json!({ "size": "small" })),
61 };
62 println!(
63 "Model requested function call: {}",
64 serde_json::to_string_pretty(&simulated_call).unwrap()
65 );
66
67 // Execute the local tool
68 let size_arg = simulated_call
69 .arguments
70 .as_ref()
71 .and_then(|v| v.get("size"))
72 .and_then(|s| s.as_str())
73 .unwrap_or("small");
74
75 let tool_output = ascii_horse(size_arg);
76
77 // Convert tool output to a Message and append to conversation
78 let tool_message = Message {
79 role: Role::Assistant,
80 content: Content::new_text(tool_output.clone()),
81 function_call: None,
82 };
83
84 // In a normal flow you'd send the updated messages back to the model to continue the conversation.
85 println!(
86 "Tool output (appended as assistant message):\n\n{}\n",
87 tool_message.content.as_text()
88 );
89
90 Ok(())
91}
examples/proxy_example.rs (line 41)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🌐 AI-lib Proxy Server Support Example");
8 println!("=====================================");
9
10 // Check proxy configuration
11 match std::env::var("AI_PROXY_URL") {
12 Ok(proxy_url) => {
13 println!("✅ Proxy configuration detected: {}", proxy_url);
14 println!(" All HTTP requests will go through this proxy server");
15 }
16 Err(_) => {
17 println!("ℹ️ AI_PROXY_URL environment variable not set");
18 println!(" To use proxy, set: export AI_PROXY_URL=http://proxy.example.com:8080");
19 }
20 }
21
22 println!("\n🚀 Creating AI client...");
23 let client = AiClient::new(Provider::Groq)?;
24 println!(
25 "✅ Client created successfully, provider: {:?}",
26 client.current_provider()
27 );
28
29 // Create test request
30 let request = ChatCompletionRequest::new(
31 "llama3-8b-8192".to_string(),
32 vec![Message {
33 role: Role::User,
34 content: Content::Text("Hello! This request may go through a proxy.".to_string()),
35 function_call: None,
36 }],
37 );
38
39 println!("\n📤 Preparing to send request...");
40 println!(" Model: {}", request.model);
41 println!(" Message: {}", request.messages[0].content.as_text());
42
43 // Get model list (this request will also go through proxy)
44 match client.list_models().await {
45 Ok(models) => {
46 println!("\n📋 Model list obtained through proxy:");
47 for model in models {
48 println!(" • {}", model);
49 }
50 }
51 Err(e) => {
52 println!("\n⚠️ Failed to get model list: {}", e);
53 println!(" This may be due to:");
54 println!(" • GROQ_API_KEY environment variable not set");
55 println!(" • Proxy server configuration error");
56 println!(" • Network connection issue");
57 }
58 }
59
60 println!("\n💡 Proxy Configuration Instructions:");
61 println!(" • Set environment variable: AI_PROXY_URL=http://your-proxy:port");
62 println!(" • Supports HTTP and HTTPS proxies");
63 println!(" • Supports authenticated proxies: http://user:pass@proxy:port");
64 println!(" • All AI providers will automatically use this proxy configuration");
65
66 Ok(())
67}
examples/function_call_openai.rs (line 71)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 println!("🔧 OpenAI Function Calling example (ai-lib)");
9
10 // Ensure OPENAI_API_KEY is set in env before running
11 let client = AiClient::new(Provider::OpenAI)?;
12
13 // Build a simple user message
14 let user_msg = Message {
15 role: Role::User,
16 content: Content::Text("Please call the ascii_horse tool with size=3".to_string()),
17 function_call: None,
18 };
19
20 // Define a Tool (JSON Schema for parameters)
21 let ascii_horse_tool = Tool {
22 name: "ascii_horse".to_string(),
23 description: Some("Draws an ASCII horse of given size".to_string()),
24 parameters: Some(json!({
25 "type": "object",
26 "properties": {
27 "size": { "type": "integer", "description": "Size of the horse" }
28 },
29 "required": ["size"]
30 })),
31 };
32
33 let mut req = ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![user_msg]);
34 req.functions = Some(vec![ascii_horse_tool]);
35 req.function_call = Some(FunctionCallPolicy::Auto("auto".to_string()));
36 req = req.with_max_tokens(200).with_temperature(0.0);
37
38 println!("📤 Sending request to OpenAI (model={})", req.model);
39
40 let resp = client.chat_completion(req).await?;
41
42 // Handle a possible function call from the model: execute locally and send the result back
43 for choice in resp.choices {
44 let msg = choice.message;
45 if let Some(fc) = msg.function_call {
46 println!("🛠️ Model invoked function: {}", fc.name);
47 let args = fc.arguments.unwrap_or(serde_json::json!(null));
48 println!(" arguments: {}", args);
49
50 // Simple local tool: ascii_horse
51 if fc.name == "ascii_horse" {
52 // Parse size param
53 let size = args.get("size").and_then(|v| v.as_i64()).unwrap_or(3) as usize;
54 let horse = generate_ascii_horse(size);
55 println!("⚙️ Executed ascii_horse locally, output:\n{}", horse);
56
57 // Send follow-up message with tool result as assistant message
58 let tool_msg = Message {
59 role: Role::Assistant,
60 content: Content::Text(horse.clone()),
61 function_call: None,
62 };
63
64 let mut followup =
65 ChatCompletionRequest::new("gpt-4o-mini".to_string(), vec![tool_msg]);
66 followup = followup.with_max_tokens(200).with_temperature(0.0);
67 let follow_resp = client.chat_completion(followup).await?;
68 for fc_choice in follow_resp.choices {
69 println!(
70 "🗨️ Final model response: {}",
71 fc_choice.message.content.as_text()
72 );
73 }
74 }
75 } else {
76 println!("💬 Model message: {}", msg.content.as_text());
77 }
78 }
79
80 Ok(())
81}
examples/model_override_demo.rs (line 43)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Check environment variables
9 if std::env::var("GROQ_API_KEY").is_err() {
10 println!("❌ Please set GROQ_API_KEY environment variable");
11 println!(" Example: export GROQ_API_KEY=your_api_key_here");
12 return Ok(());
13 }
14
15 println!("🚀 Model Override Feature Demo");
16 println!("==============================");
17 println!();
18
19 // 1. Basic usage - maintain original simplicity
20 println!("📋 1. Basic Usage - Using Default Model");
21 let reply = AiClient::quick_chat_text(Provider::Groq, "Hello!").await?;
22 println!(" ✅ Response: {}", reply);
23 println!();
24
25 // 2. Explicitly specify model
26 println!("📋 2. Explicitly Specify Model");
27 let reply = AiClient::quick_chat_text_with_model(
28 Provider::Groq,
29 "Hello!",
30 "llama-3.1-8b-instant"
31 ).await?;
32 println!(" ✅ Response: {}", reply);
33 println!();
34
35 // 3. Using ModelOptions
36 println!("📋 3. Using ModelOptions");
37 let client = AiClient::new(Provider::Groq)?;
38 let mut request = client.build_simple_request("Hello!");
39 request.model = "llama-3.1-70b-versatile".to_string();
40
41 let response = client.chat_completion(request).await?;
42
43 let reply = response.choices[0].message.content.as_text();
44 println!(" ✅ Response: {}", reply);
45 println!();
46
47 // 4. AiClientBuilder custom default model
48 println!("📋 4. AiClientBuilder Custom Default Model");
49 let client = AiClient::builder(Provider::Groq)
50 .with_default_chat_model("llama-3.1-8b-instant")
51 .build()?;
52
53 let request = client.build_simple_request("Hello!");
54 println!(" Using model: {}", request.model);
55
56 let response = client.chat_completion(request).await?;
57 match &response.choices[0].message.content {
58 Content::Text(text) => {
59 println!(" ✅ Response: {}", text);
60 }
61 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
62 }
63 println!();
64
65 // 5. Explicitly specify model in build_simple_request
66 println!("📋 5. Explicitly Specify Model in build_simple_request");
67 let client = AiClient::new(Provider::Groq)?;
68 let request = client.build_simple_request_with_model("Hello!", "llama-3.1-70b-versatile");
69
70 println!(" Using model: {}", request.model);
71
72 let response = client.chat_completion(request).await?;
73 match &response.choices[0].message.content {
74 Content::Text(text) => {
75 println!(" ✅ Response: {}", text);
76 }
77 _ => println!(" ✅ Response: {:?}", response.choices[0].message.content),
78 }
79 println!();
80
81 println!("🎉 Demo completed!");
82 println!("==================");
83 println!("✅ All model override features are working correctly");
84 println!("✅ Backward compatibility is guaranteed");
85 println!("✅ Flexible model specification methods are provided");
86
87 Ok(())
88}
Additional examples can be found in:
Sourcepub fn new_text<S: Into<String>>(s: S) -> Self
pub fn new_text<S: Into<String>>(s: S) -> Self
Convenience constructor for text content
Examples found in repository?
examples/ascii_horse.rs (line 45)
24async fn main() -> Result<(), Box<dyn std::error::Error>> {
25 println!("Example: Function Calling with a local tool 'ascii_horse'");
26
27 // Define the tool
28 let tool = Tool {
29 name: "ascii_horse".to_string(),
30 description: Some("Return an ASCII art horse. Accepts size: 'small'|'large'.".to_string()),
31 parameters: Some(json!({
32 "type": "object",
33 "properties": {
34 "size": { "type": "string", "enum": ["small", "large"] }
35 },
36 "required": ["size"]
37 })),
38 };
39
40 // Build a request that offers the tool to the model
41 let mut request = ChatCompletionRequest::new(
42 "example-model".to_string(),
43 vec![Message {
44 role: Role::User,
45 content: Content::new_text("Please draw an ASCII horse for me."),
46 function_call: None,
47 }],
48 );
49 request.functions = Some(vec![tool.clone()]);
50 request.function_call = Some(FunctionCallPolicy::Auto("ascii_horse".to_string()));
51
52 println!(
53 "Prepared request with functions: {}",
54 serde_json::to_string_pretty(&request.functions).unwrap()
55 );
56
57 // Simulate model returning a function call (in a real run this would come from the provider)
58 let simulated_call = FunctionCall {
59 name: "ascii_horse".to_string(),
60 arguments: Some(json!({ "size": "small" })),
61 };
62 println!(
63 "Model requested function call: {}",
64 serde_json::to_string_pretty(&simulated_call).unwrap()
65 );
66
67 // Execute the local tool
68 let size_arg = simulated_call
69 .arguments
70 .as_ref()
71 .and_then(|v| v.get("size"))
72 .and_then(|s| s.as_str())
73 .unwrap_or("small");
74
75 let tool_output = ascii_horse(size_arg);
76
77 // Convert tool output to a Message and append to conversation
78 let tool_message = Message {
79 role: Role::Assistant,
80 content: Content::new_text(tool_output.clone()),
81 function_call: None,
82 };
83
84 // In a normal flow you'd send the updated messages back to the model to continue the conversation.
85 println!(
86 "Tool output (appended as assistant message):\n\n{}\n",
87 tool_message.content.as_text()
88 );
89
90 Ok(())
91}
Sourcepub fn new_image(
url: Option<String>,
mime: Option<String>,
name: Option<String>,
) -> Self
pub fn new_image( url: Option<String>, mime: Option<String>, name: Option<String>, ) -> Self
Convenience constructor for image content
Examples found in repository?
examples/multimodal_example.rs (lines 14-18)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("Multimodal example: image + audio content in a message");
7
8 let _client = AiClient::new(Provider::Groq)?;
9
10 let request = ChatCompletionRequest::new(
11 "multimodal-model".to_string(),
12 vec![Message {
13 role: Role::User,
14 content: Content::new_image(
15 Some("https://example.com/dog.jpg".into()),
16 Some("image/jpeg".into()),
17 Some("dog.jpg".into()),
18 ),
19 function_call: None,
20 }],
21 );
22
23 println!(
24 "Prepared multimodal request; image URL: {}",
25 request.messages[0].content.as_text()
26 );
27
28 // Note: this example demonstrates the type usage only and does not call the API.
29 Ok(())
30}
Trait Implementations§
Source§impl<'de> Deserialize<'de> for Content
impl<'de> Deserialize<'de> for Content
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for Content
impl RefUnwindSafe for Content
impl Send for Content
impl Sync for Content
impl Unpin for Content
impl UnwindSafe for Content
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more