1use openai_ergonomic::responses::tool_function;
31use openai_ergonomic::{Client, Error, Response, Result, ToolCallExt};
32use serde_json::json;
33use std::io::{self, Write};
34
35#[tokio::main]
36#[allow(clippy::too_many_lines)] async fn main() -> Result<()> {
38 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!("š OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 println!("š Step 1: Setting up the client");
49
50 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!("ā
Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!("ā Failed to create client: {e}");
58 eprintln!("š” Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 println!("\nš Step 2: Basic chat completion");
69
70 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!("ā
Got response!");
77 if let Some(content) = chat_response.content() {
78 println!("š¤ AI: {content}");
79 }
80
81 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 "š Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!("ā Chat completion failed: {e}");
91 }
93 }
94
95 println!("\nš Step 3: Chat with system context");
100
101 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!("ā
Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!("šØāš« Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!("ā Contextual chat failed: {e}");
117 }
118 }
119
120 println!("\nš Step 4: Streaming response (real-time)");
125
126 print!("š AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); }
156 Err(e) => {
157 println!("\nā Failed to get streaming response: {e}");
158 }
159 }
160
161 println!("\nš Step 5: Using tools/functions");
166
167 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!("ā
Got response with potential tool calls!");
198
199 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!("š§ AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 println!(" š” In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!("š¤ AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!("ā Tool calling example failed: {e}");
219 }
220 }
221
222 println!("\nš Step 6: Error handling patterns");
227
228 let builder = client.chat_simple(""); let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!("ā
Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!("š¤ AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!("ā API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!("š” This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!("ā Rate limited - you're sending requests too fast");
248 println!("š” In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!("ā HTTP/Network error");
252 println!("š” Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!("ā Other error: {e}");
256 }
257 }
258
259 println!("\nš Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\nš¤ User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!("š¤ Assistant: {content}");
294 }
295
296 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 "š This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!("ā Question {} failed: {}", i + 1, e);
307 }
309 }
310 }
311
312 println!("\nš Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!("ā
Created an OpenAI client");
320 println!("ā
Made basic chat completions");
321 println!("ā
Used streaming responses");
322 println!("ā
Implemented tool/function calling");
323 println!("ā
Handled errors gracefully");
324 println!("ā
Built a complete conversational AI");
325 println!("\nš Total tokens used in examples: {total_tokens}");
326 println!(
327 "š° Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\nš Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}
339
340#[allow(dead_code)]
345async fn robust_chat_call(client: &Client, message: &str) -> Result<String> {
346 const MAX_RETRIES: usize = 3;
347 let mut last_error = None;
348
349 for attempt in 1..=MAX_RETRIES {
350 let builder = client.chat_simple(message);
351 match client.send_chat(builder).await {
352 Ok(response) => {
353 if let Some(content) = response.content() {
354 return Ok(content.to_string());
355 }
356 return Err(Error::Api {
357 status: 200,
358 message: "No content in response".to_string(),
359 error_type: None,
360 error_code: None,
361 });
362 }
363 Err(Error::RateLimit { .. }) if attempt < MAX_RETRIES => {
364 let delay = std::time::Duration::from_millis(1000 * attempt as u64);
366 tokio::time::sleep(delay).await;
367 tokio::time::sleep(std::time::Duration::from_millis(500)).await;
369 }
370 Err(e) => {
371 last_error = Some(e);
372 if attempt < MAX_RETRIES {
373 tokio::time::sleep(std::time::Duration::from_millis(500)).await;
375 }
376 }
377 }
378 }
379
380 Err(last_error.unwrap_or_else(|| Error::Api {
381 status: 0,
382 message: "Unknown error after retries".to_string(),
383 error_type: None,
384 error_code: None,
385 }))
386}