chat_comprehensive/
chat_comprehensive.rs

1#![allow(clippy::uninlined_format_args)]
2//! Comprehensive chat completions example.
3//!
4//! This example demonstrates advanced chat completion features including:
5//! - Multi-turn conversation management
6//! - Message history tracking
7//! - System, user, and assistant messages
8//! - Streaming chat responses
9//! - Token usage tracking
10//! - Error handling patterns
11//!
12//! Run with: `cargo run --example chat_comprehensive`
13
14use openai_ergonomic::{Client, Error, Response};
15use std::collections::VecDeque;
16use std::io::{self, Write};
17
18/// Represents a conversation turn with role and content.
19#[derive(Debug, Clone)]
20struct ConversationTurn {
21    role: String,
22    content: String,
23    token_count: Option<i32>,
24}
25
26/// Manages conversation history and token tracking.
27#[derive(Debug)]
28struct ConversationManager {
29    history: VecDeque<ConversationTurn>,
30    max_history: usize,
31    total_tokens_used: i32,
32    system_message: Option<String>,
33}
34
35impl ConversationManager {
36    /// Create a new conversation manager with optional system message.
37    const fn new(system_message: Option<String>, max_history: usize) -> Self {
38        Self {
39            history: VecDeque::new(),
40            max_history,
41            total_tokens_used: 0,
42            system_message,
43        }
44    }
45
46    /// Add a user message to the conversation history.
47    fn add_user_message(&mut self, content: String) {
48        self.add_turn(ConversationTurn {
49            role: "user".to_string(),
50            content,
51            token_count: None,
52        });
53    }
54
55    /// Add an assistant message to the conversation history.
56    fn add_assistant_message(&mut self, content: String, token_count: Option<i32>) {
57        self.add_turn(ConversationTurn {
58            role: "assistant".to_string(),
59            content,
60            token_count,
61        });
62    }
63
64    /// Add a turn to the history, managing the maximum size.
65    fn add_turn(&mut self, turn: ConversationTurn) {
66        if self.history.len() >= self.max_history {
67            self.history.pop_front();
68        }
69        self.history.push_back(turn);
70    }
71
72    /// Update total token usage from a response.
73    fn update_token_usage(&mut self, prompt_tokens: i32, completion_tokens: i32) {
74        let total = prompt_tokens + completion_tokens;
75        self.total_tokens_used += total;
76    }
77
78    /// Display conversation history.
79    fn display_history(&self) {
80        println!("\n=== Conversation History ===");
81
82        if let Some(ref system) = self.system_message {
83            println!("System: {system}");
84            println!();
85        }
86
87        for (i, turn) in self.history.iter().enumerate() {
88            let token_info = turn
89                .token_count
90                .map_or_else(String::new, |tokens| format!(" ({tokens} tokens)"));
91
92            println!(
93                "{}. {}{}: {}",
94                i + 1,
95                turn.role
96                    .chars()
97                    .next()
98                    .unwrap()
99                    .to_uppercase()
100                    .collect::<String>()
101                    + &turn.role[1..],
102                token_info,
103                turn.content
104            );
105        }
106
107        println!("\nTotal tokens used: {}", self.total_tokens_used);
108        println!("Messages in history: {}", self.history.len());
109        println!("=============================\n");
110    }
111
112    /// Get conversation turns for API request.
113    fn get_conversation_for_api(&self) -> Vec<(String, String)> {
114        let mut messages = Vec::new();
115
116        // Add system message if present
117        if let Some(ref system) = self.system_message {
118            messages.push(("system".to_string(), system.clone()));
119        }
120
121        // Add conversation history
122        for turn in &self.history {
123            messages.push((turn.role.clone(), turn.content.clone()));
124        }
125
126        messages
127    }
128}
129
130#[tokio::main]
131async fn main() -> Result<(), Box<dyn std::error::Error>> {
132    println!("OpenAI Ergonomic - Comprehensive Chat Example");
133    println!("============================================");
134    println!();
135
136    // Create client from environment variables
137    let client = match Client::from_env() {
138        Ok(client_builder) => {
139            println!("āœ“ Client initialized successfully");
140            client_builder.build()
141        }
142        Err(e) => {
143            eprintln!("āœ— Failed to initialize client: {e}");
144            eprintln!("Make sure OPENAI_API_KEY environment variable is set");
145            return Err(e.into());
146        }
147    };
148
149    // Initialize conversation manager with system message
150    let system_message = "You are a helpful AI assistant. Provide concise, informative responses. \
151                          Always be polite and professional. If asked about your capabilities, \
152                          explain what you can help with clearly."
153        .to_string();
154
155    let mut conversation = ConversationManager::new(Some(system_message), 10);
156
157    println!("āœ“ Conversation manager initialized (max history: 10 messages)");
158    println!("āœ“ System message configured");
159    println!();
160
161    // Demonstrate conversation features
162    demonstrate_basic_chat(&client, &mut conversation).await?;
163    demonstrate_multi_turn_chat(&client, &mut conversation).await?;
164    demonstrate_streaming_chat(&client, &mut conversation).await?;
165    demonstrate_token_tracking(&client, &mut conversation).await?;
166    demonstrate_error_handling(&client).await?;
167
168    // Final conversation summary
169    conversation.display_history();
170
171    println!("šŸŽ‰ Chat comprehensive example completed successfully!");
172    println!("This example demonstrated:");
173    println!("  • Multi-turn conversation management");
174    println!("  • Message history tracking and rotation");
175    println!("  • System message configuration");
176    println!("  • Token usage monitoring");
177    println!("  • Error handling patterns");
178    println!("  • Streaming response handling");
179
180    Ok(())
181}
182
183/// Demonstrate basic chat completion.
184async fn demonstrate_basic_chat(
185    client: &Client,
186    conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188    println!("šŸ“ Example 1: Basic Chat Completion");
189    println!("----------------------------------");
190
191    let user_message = "Hello! Can you explain what you can help me with?";
192    conversation.add_user_message(user_message.to_string());
193
194    println!("User: {user_message}");
195    print!("Assistant: ");
196    io::stdout().flush()?;
197
198    // Build the chat request with conversation history
199    let messages = conversation.get_conversation_for_api();
200    let mut chat_builder = client.chat();
201
202    for (role, content) in messages {
203        match role.as_str() {
204            "system" => chat_builder = chat_builder.system(content),
205            "user" => chat_builder = chat_builder.user(content),
206            "assistant" => chat_builder = chat_builder.assistant(content),
207            _ => {} // Ignore unknown roles
208        }
209    }
210
211    // Send the request
212    let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214    if let Some(content) = response.content() {
215        println!("{content}");
216        conversation.add_assistant_message(content.to_string(), None);
217
218        // Track token usage if available
219        if let Some(usage) = response.usage() {
220            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221        }
222    } else {
223        println!("No response content received");
224    }
225
226    println!();
227    Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232    client: &Client,
233    conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235    println!("šŸ’¬ Example 2: Multi-turn Conversation");
236    println!("------------------------------------");
237
238    let questions = vec![
239        "What's the capital of France?",
240        "What's the population of that city?",
241        "Can you tell me an interesting fact about it?",
242    ];
243
244    for question in questions {
245        conversation.add_user_message(question.to_string());
246
247        println!("User: {question}");
248        print!("Assistant: ");
249        io::stdout().flush()?;
250
251        // Build chat request with full conversation history
252        let messages = conversation.get_conversation_for_api();
253        let mut chat_builder = client.chat();
254
255        for (role, content) in messages {
256            match role.as_str() {
257                "system" => chat_builder = chat_builder.system(content),
258                "user" => chat_builder = chat_builder.user(content),
259                "assistant" => chat_builder = chat_builder.assistant(content),
260                _ => {}
261            }
262        }
263
264        let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266        if let Some(content) = response.content() {
267            println!("{content}");
268            conversation.add_assistant_message(content.to_string(), None);
269
270            // Track token usage
271            if let Some(usage) = response.usage() {
272                conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273            }
274        }
275
276        println!();
277        // Small delay between questions for readability
278        tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279    }
280
281    Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286    _client: &Client,
287    conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289    println!("šŸ”„ Example 3: Streaming Chat Response");
290    println!("------------------------------------");
291
292    // Add user message for streaming example
293    let streaming_question = "Can you write a short poem about programming?";
294    conversation.add_user_message(streaming_question.to_string());
295
296    println!("User: {streaming_question}");
297    println!("Assistant (streaming): ");
298
299    // Note: Streaming is not yet fully implemented in the client
300    // This is a placeholder showing the intended API
301    println!("🚧 Streaming functionality is being implemented...");
302    println!("Future implementation will show real-time token-by-token responses");
303
304    // Simulate what streaming would look like
305    let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307    // Simulate typing effect
308    for char in simulated_response.chars() {
309        print!("{char}");
310        io::stdout().flush()?;
311        tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312    }
313    println!("\n");
314
315    // Add the response to conversation history
316    conversation.add_assistant_message(simulated_response.to_string(), None);
317
318    Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323    client: &Client,
324    conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326    println!("šŸ“Š Example 4: Token Usage Tracking");
327    println!("---------------------------------");
328
329    let efficiency_question = "In one sentence, what is machine learning?";
330    conversation.add_user_message(efficiency_question.to_string());
331
332    println!("User: {efficiency_question}");
333    print!("Assistant: ");
334    io::stdout().flush()?;
335
336    // Build chat request
337    let messages = conversation.get_conversation_for_api();
338    let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340    for (role, content) in messages {
341        match role.as_str() {
342            "system" => chat_builder = chat_builder.system(content),
343            "user" => chat_builder = chat_builder.user(content),
344            "assistant" => chat_builder = chat_builder.assistant(content),
345            _ => {}
346        }
347    }
348
349    let response = client.send_chat(chat_builder).await?;
350
351    if let Some(content) = response.content() {
352        println!("{content}");
353
354        // Display detailed token usage
355        if let Some(usage) = response.usage() {
356            println!("\nšŸ“ˆ Token Usage Breakdown:");
357            println!("  Prompt tokens: {}", usage.prompt_tokens);
358            println!("  Completion tokens: {}", usage.completion_tokens);
359            println!("  Total tokens: {}", usage.total_tokens);
360
361            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363            conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364        } else {
365            conversation.add_assistant_message(content.to_string(), None);
366        }
367    }
368
369    println!();
370    Ok(())
371}
372
373/// Demonstrate error handling patterns.
374async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
375    println!("āš ļø  Example 5: Error Handling Patterns");
376    println!("------------------------------------");
377
378    println!("Testing various error scenarios...\n");
379
380    // Test 1: Invalid model
381    println!("Test 1: Invalid model name");
382    let invalid_model_builder = client.chat()
383        .user("Hello")
384        // Note: We can't easily test invalid model without modifying the builder
385        // This shows the pattern for handling errors
386        .temperature(0.7);
387
388    match client.send_chat(invalid_model_builder).await {
389        Ok(_) => println!("āœ“ Request succeeded (model validation not yet implemented)"),
390        Err(e) => match &e {
391            Error::Api {
392                status, message, ..
393            } => {
394                println!("āœ— API Error ({status}): {message}");
395            }
396            Error::Http(reqwest_err) => {
397                println!("āœ— HTTP Error: {reqwest_err}");
398            }
399            Error::InvalidRequest(msg) => {
400                println!("āœ— Invalid Request: {msg}");
401            }
402            _ => {
403                println!("āœ— Unexpected Error: {e}");
404            }
405        },
406    }
407
408    // Test 2: Empty message validation
409    println!("\nTest 2: Empty message validation");
410    let empty_builder = client.chat(); // No messages added
411
412    match client.send_chat(empty_builder).await {
413        Ok(_) => println!("āœ— Empty request unexpectedly succeeded"),
414        Err(Error::InvalidRequest(msg)) => {
415            println!("āœ“ Validation caught empty request: {msg}");
416        }
417        Err(e) => {
418            println!("āœ— Unexpected error type: {e}");
419        }
420    }
421
422    // Test 3: Configuration errors
423    println!("\nTest 3: Configuration validation");
424    println!("āœ“ Client configuration is valid (created successfully)");
425
426    println!("\nšŸ›”ļø  Error handling patterns demonstrated:");
427    println!("  • API error classification");
428    println!("  • Request validation");
429    println!("  • Network error handling");
430    println!("  • Configuration validation");
431
432    println!();
433    Ok(())
434}