batch_processing/
batch_processing.rs1use ai_lib::types::common::Content;
2use ai_lib::{AiClient, ChatCompletionRequest, Message, Provider, Role};
3
4#[tokio::main]
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("š AI-lib Batch Processing Example");
7 println!("==================================");
8
9 let client = AiClient::new(Provider::Groq)?;
11 println!(
12 "ā
Created client with provider: {:?}",
13 client.current_provider()
14 );
15
16 let requests = vec![
18 ChatCompletionRequest::new(
19 "llama3-8b-8192".to_string(),
20 vec![Message {
21 role: Role::User,
22 content: Content::Text("What is the capital of France?".to_string()),
23 function_call: None,
24 }],
25 )
26 .with_temperature(0.7)
27 .with_max_tokens(50),
28
29 ChatCompletionRequest::new(
30 "llama3-8b-8192".to_string(),
31 vec![Message {
32 role: Role::User,
33 content: Content::Text("What is 2 + 2?".to_string()),
34 function_call: None,
35 }],
36 )
37 .with_temperature(0.1)
38 .with_max_tokens(20),
39
40 ChatCompletionRequest::new(
41 "llama3-8b-8192".to_string(),
42 vec![Message {
43 role: Role::User,
44 content: Content::Text("Tell me a short joke.".to_string()),
45 function_call: None,
46 }],
47 )
48 .with_temperature(0.9)
49 .with_max_tokens(100),
50
51 ChatCompletionRequest::new(
52 "llama3-8b-8192".to_string(),
53 vec![Message {
54 role: Role::User,
55 content: Content::Text("What is the largest planet in our solar system?".to_string()),
56 function_call: None,
57 }],
58 )
59 .with_temperature(0.5)
60 .with_max_tokens(60),
61 ];
62
63 println!("š¤ Prepared {} requests for batch processing", requests.len());
64
65 println!("\nš Method 1: Batch processing with concurrency limit (2)");
67 let start_time = std::time::Instant::now();
68
69 let responses = client.chat_completion_batch(requests.clone(), Some(2)).await?;
70
71 let duration = start_time.elapsed();
72 println!("ā±ļø Batch processing completed in {:?}", duration);
73
74 for (i, response) in responses.iter().enumerate() {
76 match response {
77 Ok(resp) => {
78 println!(
79 "ā
Request {}: {}",
80 i + 1,
81 resp.choices[0].message.content.as_text()
82 );
83 }
84 Err(e) => {
85 println!("ā Request {} failed: {}", i + 1, e);
86 }
87 }
88 }
89
90 println!("\nš§ Method 2: Smart batch processing");
92 let start_time = std::time::Instant::now();
93
94 let responses = client.chat_completion_batch_smart(requests.clone()).await?;
95
96 let duration = start_time.elapsed();
97 println!("ā±ļø Smart batch processing completed in {:?}", duration);
98
99 let successful: Vec<_> = responses.iter().filter_map(|r| r.as_ref().ok()).collect();
101 let failed: Vec<_> = responses.iter().enumerate().filter_map(|(i, r)| {
102 r.as_ref().err().map(|e| (i, e))
103 }).collect();
104
105 println!("š Results:");
106 println!(" ā
Successful: {}/{}", successful.len(), responses.len());
107 println!(" ā Failed: {}/{}", failed.len(), responses.len());
108 println!(" š Success rate: {:.1}%", (successful.len() as f64 / responses.len() as f64) * 100.0);
109
110 println!("\nš Method 3: Unlimited concurrent batch processing");
112 let start_time = std::time::Instant::now();
113
114 let responses = client.chat_completion_batch(requests, None).await?;
115
116 let duration = start_time.elapsed();
117 println!("ā±ļø Unlimited concurrent processing completed in {:?}", duration);
118
119 for (i, response) in responses.iter().enumerate() {
121 match response {
122 Ok(resp) => {
123 println!(
124 "ā
Request {}: {}",
125 i + 1,
126 resp.choices[0].message.content.as_text()
127 );
128 }
129 Err(e) => {
130 println!("ā Request {} failed: {}", i + 1, e);
131 }
132 }
133 }
134
135 println!("\nš Batch processing example completed successfully!");
136 Ok(())
137}