1#![allow(clippy::uninlined_format_args)]
2use openai_ergonomic::{Client, Error, Response};
15use std::collections::VecDeque;
16use std::io::{self, Write};
17
18#[derive(Debug, Clone)]
20struct ConversationTurn {
21 role: String,
22 content: String,
23 token_count: Option<i32>,
24}
25
26#[derive(Debug)]
28struct ConversationManager {
29 history: VecDeque<ConversationTurn>,
30 max_history: usize,
31 total_tokens_used: i32,
32 system_message: Option<String>,
33}
34
35impl ConversationManager {
36 const fn new(system_message: Option<String>, max_history: usize) -> Self {
38 Self {
39 history: VecDeque::new(),
40 max_history,
41 total_tokens_used: 0,
42 system_message,
43 }
44 }
45
46 fn add_user_message(&mut self, content: String) {
48 self.add_turn(ConversationTurn {
49 role: "user".to_string(),
50 content,
51 token_count: None,
52 });
53 }
54
55 fn add_assistant_message(&mut self, content: String, token_count: Option<i32>) {
57 self.add_turn(ConversationTurn {
58 role: "assistant".to_string(),
59 content,
60 token_count,
61 });
62 }
63
64 fn add_turn(&mut self, turn: ConversationTurn) {
66 if self.history.len() >= self.max_history {
67 self.history.pop_front();
68 }
69 self.history.push_back(turn);
70 }
71
72 fn update_token_usage(&mut self, prompt_tokens: i32, completion_tokens: i32) {
74 let total = prompt_tokens + completion_tokens;
75 self.total_tokens_used += total;
76 }
77
78 fn display_history(&self) {
80 println!("\n=== Conversation History ===");
81
82 if let Some(ref system) = self.system_message {
83 println!("System: {system}");
84 println!();
85 }
86
87 for (i, turn) in self.history.iter().enumerate() {
88 let token_info = turn
89 .token_count
90 .map_or_else(String::new, |tokens| format!(" ({tokens} tokens)"));
91
92 println!(
93 "{}. {}{}: {}",
94 i + 1,
95 turn.role
96 .chars()
97 .next()
98 .unwrap()
99 .to_uppercase()
100 .collect::<String>()
101 + &turn.role[1..],
102 token_info,
103 turn.content
104 );
105 }
106
107 println!("\nTotal tokens used: {}", self.total_tokens_used);
108 println!("Messages in history: {}", self.history.len());
109 println!("=============================\n");
110 }
111
112 fn get_conversation_for_api(&self) -> Vec<(String, String)> {
114 let mut messages = Vec::new();
115
116 if let Some(ref system) = self.system_message {
118 messages.push(("system".to_string(), system.clone()));
119 }
120
121 for turn in &self.history {
123 messages.push((turn.role.clone(), turn.content.clone()));
124 }
125
126 messages
127 }
128}
129
130#[tokio::main]
131async fn main() -> Result<(), Box<dyn std::error::Error>> {
132 println!("OpenAI Ergonomic - Comprehensive Chat Example");
133 println!("============================================");
134 println!();
135
136 let client = match Client::from_env() {
138 Ok(client_builder) => {
139 println!("ā Client initialized successfully");
140 client_builder.build()
141 }
142 Err(e) => {
143 eprintln!("ā Failed to initialize client: {e}");
144 eprintln!("Make sure OPENAI_API_KEY environment variable is set");
145 return Err(e.into());
146 }
147 };
148
149 let system_message = "You are a helpful AI assistant. Provide concise, informative responses. \
151 Always be polite and professional. If asked about your capabilities, \
152 explain what you can help with clearly."
153 .to_string();
154
155 let mut conversation = ConversationManager::new(Some(system_message), 10);
156
157 println!("ā Conversation manager initialized (max history: 10 messages)");
158 println!("ā System message configured");
159 println!();
160
161 demonstrate_basic_chat(&client, &mut conversation).await?;
163 demonstrate_multi_turn_chat(&client, &mut conversation).await?;
164 demonstrate_streaming_chat(&client, &mut conversation).await?;
165 demonstrate_token_tracking(&client, &mut conversation).await?;
166 demonstrate_error_handling(&client).await?;
167
168 conversation.display_history();
170
171 println!("š Chat comprehensive example completed successfully!");
172 println!("This example demonstrated:");
173 println!(" ⢠Multi-turn conversation management");
174 println!(" ⢠Message history tracking and rotation");
175 println!(" ⢠System message configuration");
176 println!(" ⢠Token usage monitoring");
177 println!(" ⢠Error handling patterns");
178 println!(" ⢠Streaming response handling");
179
180 Ok(())
181}
182
183async fn demonstrate_basic_chat(
185 client: &Client,
186 conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188 println!("š Example 1: Basic Chat Completion");
189 println!("----------------------------------");
190
191 let user_message = "Hello! Can you explain what you can help me with?";
192 conversation.add_user_message(user_message.to_string());
193
194 println!("User: {user_message}");
195 print!("Assistant: ");
196 io::stdout().flush()?;
197
198 let messages = conversation.get_conversation_for_api();
200 let mut chat_builder = client.chat();
201
202 for (role, content) in messages {
203 match role.as_str() {
204 "system" => chat_builder = chat_builder.system(content),
205 "user" => chat_builder = chat_builder.user(content),
206 "assistant" => chat_builder = chat_builder.assistant(content),
207 _ => {} }
209 }
210
211 let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214 if let Some(content) = response.content() {
215 println!("{content}");
216 conversation.add_assistant_message(content.to_string(), None);
217
218 if let Some(usage) = response.usage() {
220 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221 }
222 } else {
223 println!("No response content received");
224 }
225
226 println!();
227 Ok(())
228}
229
230async fn demonstrate_multi_turn_chat(
232 client: &Client,
233 conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235 println!("š¬ Example 2: Multi-turn Conversation");
236 println!("------------------------------------");
237
238 let questions = vec![
239 "What's the capital of France?",
240 "What's the population of that city?",
241 "Can you tell me an interesting fact about it?",
242 ];
243
244 for question in questions {
245 conversation.add_user_message(question.to_string());
246
247 println!("User: {question}");
248 print!("Assistant: ");
249 io::stdout().flush()?;
250
251 let messages = conversation.get_conversation_for_api();
253 let mut chat_builder = client.chat();
254
255 for (role, content) in messages {
256 match role.as_str() {
257 "system" => chat_builder = chat_builder.system(content),
258 "user" => chat_builder = chat_builder.user(content),
259 "assistant" => chat_builder = chat_builder.assistant(content),
260 _ => {}
261 }
262 }
263
264 let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266 if let Some(content) = response.content() {
267 println!("{content}");
268 conversation.add_assistant_message(content.to_string(), None);
269
270 if let Some(usage) = response.usage() {
272 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273 }
274 }
275
276 println!();
277 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279 }
280
281 Ok(())
282}
283
284async fn demonstrate_streaming_chat(
286 _client: &Client,
287 conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289 println!("š Example 3: Streaming Chat Response");
290 println!("------------------------------------");
291
292 let streaming_question = "Can you write a short poem about programming?";
294 conversation.add_user_message(streaming_question.to_string());
295
296 println!("User: {streaming_question}");
297 println!("Assistant (streaming): ");
298
299 println!("š§ Streaming functionality is being implemented...");
302 println!("Future implementation will show real-time token-by-token responses");
303
304 let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307 for char in simulated_response.chars() {
309 print!("{char}");
310 io::stdout().flush()?;
311 tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312 }
313 println!("\n");
314
315 conversation.add_assistant_message(simulated_response.to_string(), None);
317
318 Ok(())
319}
320
321async fn demonstrate_token_tracking(
323 client: &Client,
324 conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326 println!("š Example 4: Token Usage Tracking");
327 println!("---------------------------------");
328
329 let efficiency_question = "In one sentence, what is machine learning?";
330 conversation.add_user_message(efficiency_question.to_string());
331
332 println!("User: {efficiency_question}");
333 print!("Assistant: ");
334 io::stdout().flush()?;
335
336 let messages = conversation.get_conversation_for_api();
338 let mut chat_builder = client.chat().max_completion_tokens(50); for (role, content) in messages {
341 match role.as_str() {
342 "system" => chat_builder = chat_builder.system(content),
343 "user" => chat_builder = chat_builder.user(content),
344 "assistant" => chat_builder = chat_builder.assistant(content),
345 _ => {}
346 }
347 }
348
349 let response = client.send_chat(chat_builder).await?;
350
351 if let Some(content) = response.content() {
352 println!("{content}");
353
354 if let Some(usage) = response.usage() {
356 println!("\nš Token Usage Breakdown:");
357 println!(" Prompt tokens: {}", usage.prompt_tokens);
358 println!(" Completion tokens: {}", usage.completion_tokens);
359 println!(" Total tokens: {}", usage.total_tokens);
360
361 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363 conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364 } else {
365 conversation.add_assistant_message(content.to_string(), None);
366 }
367 }
368
369 println!();
370 Ok(())
371}
372
373async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
375 println!("ā ļø Example 5: Error Handling Patterns");
376 println!("------------------------------------");
377
378 println!("Testing various error scenarios...\n");
379
380 println!("Test 1: Invalid model name");
382 let invalid_model_builder = client.chat()
383 .user("Hello")
384 .temperature(0.7);
387
388 match client.send_chat(invalid_model_builder).await {
389 Ok(_) => println!("ā Request succeeded (model validation not yet implemented)"),
390 Err(e) => match &e {
391 Error::Api {
392 status, message, ..
393 } => {
394 println!("ā API Error ({status}): {message}");
395 }
396 Error::Http(reqwest_err) => {
397 println!("ā HTTP Error: {reqwest_err}");
398 }
399 Error::InvalidRequest(msg) => {
400 println!("ā Invalid Request: {msg}");
401 }
402 _ => {
403 println!("ā Unexpected Error: {e}");
404 }
405 },
406 }
407
408 println!("\nTest 2: Empty message validation");
410 let empty_builder = client.chat(); match client.send_chat(empty_builder).await {
413 Ok(_) => println!("ā Empty request unexpectedly succeeded"),
414 Err(Error::InvalidRequest(msg)) => {
415 println!("ā Validation caught empty request: {msg}");
416 }
417 Err(e) => {
418 println!("ā Unexpected error type: {e}");
419 }
420 }
421
422 println!("\nTest 3: Configuration validation");
424 println!("ā Client configuration is valid (created successfully)");
425
426 println!("\nš”ļø Error handling patterns demonstrated:");
427 println!(" ⢠API error classification");
428 println!(" ⢠Request validation");
429 println!(" ⢠Network error handling");
430 println!(" ⢠Configuration validation");
431
432 println!();
433 Ok(())
434}