quickstart/
quickstart.rs

1//! # `OpenAI` Ergonomic Quickstart Guide
2//!
3//! This example demonstrates how to get started with the openai-ergonomic crate
4//! in under 5 minutes. It covers the most common use cases and patterns you'll
5//! need for building AI-powered applications.
6//!
7//! ## Setup Instructions
8//!
9//! 1. Set your `OpenAI` API key:
10//!    ```bash
11//!    export OPENAI_API_KEY="sk-your-api-key-here"
12//!    ```
13//!
14//! 2. Run this example:
15//!    ```bash
16//!    cargo run --example quickstart
17//!    ```
18//!
19//! ## What This Example Shows
20//!
21//! - Environment setup and client creation
22//! - Basic chat completions
23//! - Streaming responses (real-time text generation)
24//! - Function/tool calling for external data
25//! - Robust error handling patterns
26//! - Usage tracking and cost monitoring
27//!
28//! This example is designed to be your first step into building with `OpenAI`.
29
30use openai_ergonomic::responses::tool_function;
31use openai_ergonomic::{Client, Error, Response, Result, ToolCallExt};
32use serde_json::json;
33use std::io::{self, Write};
34
35#[tokio::main]
36#[allow(clippy::too_many_lines)] // This is an example showing many features
37async fn main() -> Result<()> {
38    // Initialize logging to see what's happening under the hood
39    tracing_subscriber::fmt().with_env_filter("info").init();
40
41    println!("šŸš€ OpenAI Ergonomic Quickstart");
42    println!("==============================\n");
43
44    // ==========================================
45    // 1. ENVIRONMENT SETUP & CLIENT CREATION
46    // ==========================================
47
48    println!("šŸ“‹ Step 1: Setting up the client");
49
50    // The simplest way to get started - reads OPENAI_API_KEY from environment
51    let client = match Client::from_env() {
52        Ok(client_builder) => {
53            println!("āœ… Client created successfully!");
54            client_builder.build()
55        }
56        Err(e) => {
57            eprintln!("āŒ Failed to create client: {e}");
58            eprintln!("šŸ’” Make sure you've set OPENAI_API_KEY environment variable");
59            eprintln!("   Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60            return Err(e);
61        }
62    };
63
64    // ==========================================
65    // 2. BASIC CHAT COMPLETION
66    // ==========================================
67
68    println!("\nšŸ“‹ Step 2: Basic chat completion");
69
70    // The simplest way to get a response from ChatGPT
71    let builder = client.chat_simple("What is Rust programming language in one sentence?");
72    let response = client.send_chat(builder).await;
73
74    match response {
75        Ok(chat_response) => {
76            println!("āœ… Got response!");
77            if let Some(content) = chat_response.content() {
78                println!("šŸ¤– AI: {content}");
79            }
80
81            // Show usage information for cost tracking
82            if let Some(usage) = &chat_response.inner().usage {
83                println!(
84                    "šŸ“Š Usage: {} prompt + {} completion = {} total tokens",
85                    usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86                );
87            }
88        }
89        Err(e) => {
90            println!("āŒ Chat completion failed: {e}");
91            // Continue with other examples even if this one fails
92        }
93    }
94
95    // ==========================================
96    // 3. CHAT WITH SYSTEM MESSAGE
97    // ==========================================
98
99    println!("\nšŸ“‹ Step 3: Chat with system context");
100
101    // System messages help set the AI's behavior and context
102    let builder = client.chat_with_system(
103        "You are a helpful coding mentor who explains things simply",
104        "Explain what a HashMap is in Rust",
105    );
106    let response = client.send_chat(builder).await;
107
108    match response {
109        Ok(chat_response) => {
110            println!("āœ… Got contextual response!");
111            if let Some(content) = chat_response.content() {
112                println!("šŸ‘Øā€šŸ« Mentor: {content}");
113            }
114        }
115        Err(e) => {
116            println!("āŒ Contextual chat failed: {e}");
117        }
118    }
119
120    // ==========================================
121    // 4. STREAMING RESPONSES
122    // ==========================================
123
124    println!("\nšŸ“‹ Step 4: Streaming response (real-time)");
125
126    // Streaming lets you see the response as it's being generated
127    // This is great for chatbots and interactive applications
128    print!("šŸ”„ AI is typing");
129    io::stdout().flush().unwrap();
130
131    let builder = client
132        .responses()
133        .user("Write a short haiku about programming")
134        .temperature(0.7)
135        .stream(true);
136    // Note: Full streaming implementation is in development
137    // For now, we'll demonstrate non-streaming responses with real-time simulation
138    let response = client.send_responses(builder).await;
139
140    match response {
141        Ok(chat_response) => {
142            print!(": ");
143            io::stdout().flush().unwrap();
144
145            // Simulate streaming by printing character by character
146            if let Some(content) = chat_response.content() {
147                for char in content.chars() {
148                    print!("{char}");
149                    io::stdout().flush().unwrap();
150                    // Small delay to simulate streaming
151                    tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152                }
153            }
154            println!(); // New line after "streaming"
155        }
156        Err(e) => {
157            println!("\nāŒ Failed to get streaming response: {e}");
158        }
159    }
160
161    // ==========================================
162    // 5. FUNCTION/TOOL CALLING
163    // ==========================================
164
165    println!("\nšŸ“‹ Step 5: Using tools/functions");
166
167    // Tools let the AI call external functions to get real data
168    // Here we define a weather function as an example
169    let weather_tool = tool_function(
170        "get_current_weather",
171        "Get the current weather for a given location",
172        json!({
173            "type": "object",
174            "properties": {
175                "location": {
176                    "type": "string",
177                    "description": "The city name, e.g. 'San Francisco, CA'"
178                },
179                "unit": {
180                    "type": "string",
181                    "enum": ["celsius", "fahrenheit"],
182                    "description": "Temperature unit"
183                }
184            },
185            "required": ["location"]
186        }),
187    );
188
189    let builder = client
190        .responses()
191        .user("What's the weather like in Tokyo?")
192        .tool(weather_tool);
193    let response = client.send_responses(builder).await;
194
195    match response {
196        Ok(chat_response) => {
197            println!("āœ… Got response with potential tool calls!");
198
199            // Check if the AI wants to call our weather function
200            let tool_calls = chat_response.tool_calls();
201            if !tool_calls.is_empty() {
202                println!("šŸ”§ AI requested tool calls:");
203                for tool_call in tool_calls {
204                    let function_name = tool_call.function_name();
205                    println!("   Function: {function_name}");
206                    let function_args = tool_call.function_arguments();
207                    println!("   Arguments: {function_args}");
208
209                    // In a real app, you'd execute the function here
210                    // and send the result back to the AI
211                    println!("   šŸ’” In a real app, you'd call your weather API here");
212                }
213            } else if let Some(content) = chat_response.content() {
214                println!("šŸ¤– AI: {content}");
215            }
216        }
217        Err(e) => {
218            println!("āŒ Tool calling example failed: {e}");
219        }
220    }
221
222    // ==========================================
223    // 6. ERROR HANDLING PATTERNS
224    // ==========================================
225
226    println!("\nšŸ“‹ Step 6: Error handling patterns");
227
228    // Show how to handle different types of errors gracefully
229    let builder = client.chat_simple(""); // Empty message might cause an error
230    let bad_response = client.send_chat(builder).await;
231
232    match bad_response {
233        Ok(response) => {
234            println!("āœ… Unexpectedly succeeded with empty message");
235            if let Some(content) = response.content() {
236                println!("šŸ¤– AI: {content}");
237            }
238        }
239        Err(Error::Api {
240            status, message, ..
241        }) => {
242            println!("āŒ API Error (HTTP {status}):");
243            println!("   Message: {message}");
244            println!("šŸ’” This is normal - we sent an invalid request");
245        }
246        Err(Error::RateLimit { .. }) => {
247            println!("āŒ Rate limited - you're sending requests too fast");
248            println!("šŸ’” In a real app, you'd implement exponential backoff");
249        }
250        Err(Error::Http(_)) => {
251            println!("āŒ HTTP/Network error");
252            println!("šŸ’” Check your internet connection and API key");
253        }
254        Err(e) => {
255            println!("āŒ Other error: {e}");
256        }
257    }
258
259    // ==========================================
260    // 7. COMPLETE REAL-WORLD EXAMPLE
261    // ==========================================
262
263    println!("\nšŸ“‹ Step 7: Complete real-world example");
264    println!("Building a simple AI assistant that can:");
265    println!("- Answer questions with context");
266    println!("- Track conversation costs");
267    println!("- Handle errors gracefully");
268
269    let mut total_tokens = 0;
270
271    // Simulate a conversation with context and cost tracking
272    let questions = [
273        "What is the capital of France?",
274        "What's special about that city?",
275        "How many people live there?",
276    ];
277
278    for (i, question) in questions.iter().enumerate() {
279        println!("\nšŸ‘¤ User: {question}");
280
281        let builder = client
282            .responses()
283            .system(
284                "You are a knowledgeable geography expert. Keep answers concise but informative.",
285            )
286            .user(*question)
287            .temperature(0.1); // Lower temperature for more factual responses
288        let response = client.send_responses(builder).await;
289
290        match response {
291            Ok(chat_response) => {
292                if let Some(content) = chat_response.content() {
293                    println!("šŸ¤– Assistant: {content}");
294                }
295
296                // Track token usage for cost monitoring
297                if let Some(usage) = chat_response.usage() {
298                    total_tokens += usage.total_tokens;
299                    println!(
300                        "šŸ“Š This exchange: {} tokens (Running total: {})",
301                        usage.total_tokens, total_tokens
302                    );
303                }
304            }
305            Err(e) => {
306                println!("āŒ Question {} failed: {}", i + 1, e);
307                // In a real app, you might retry or log this error
308            }
309        }
310    }
311
312    // ==========================================
313    // 8. WRAP UP & NEXT STEPS
314    // ==========================================
315
316    println!("\nšŸŽ‰ Quickstart Complete!");
317    println!("======================");
318    println!("You've successfully:");
319    println!("āœ… Created an OpenAI client");
320    println!("āœ… Made basic chat completions");
321    println!("āœ… Used streaming responses");
322    println!("āœ… Implemented tool/function calling");
323    println!("āœ… Handled errors gracefully");
324    println!("āœ… Built a complete conversational AI");
325    println!("\nšŸ“Š Total tokens used in examples: {total_tokens}");
326    println!(
327        "šŸ’° Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328        f64::from(total_tokens) * 0.03 / 1000.0
329    );
330
331    println!("\nšŸš€ Next Steps:");
332    println!("- Check out other examples in the examples/ directory");
333    println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334    println!("- Explore advanced features like vision, audio, and assistants");
335    println!("- Build your own AI-powered applications!");
336
337    Ok(())
338}
339
340/// Example helper function demonstrating custom error handling.
341///
342/// In real applications, you might want to wrap API calls in functions
343/// like this to add custom retry logic, logging, or error transformation.
344#[allow(dead_code)]
345async fn robust_chat_call(client: &Client, message: &str) -> Result<String> {
346    const MAX_RETRIES: usize = 3;
347    let mut last_error = None;
348
349    for attempt in 1..=MAX_RETRIES {
350        let builder = client.chat_simple(message);
351        match client.send_chat(builder).await {
352            Ok(response) => {
353                if let Some(content) = response.content() {
354                    return Ok(content.to_string());
355                }
356                return Err(Error::Api {
357                    status: 200,
358                    message: "No content in response".to_string(),
359                    error_type: None,
360                    error_code: None,
361                });
362            }
363            Err(Error::RateLimit { .. }) if attempt < MAX_RETRIES => {
364                // Exponential backoff for rate limits
365                let delay = std::time::Duration::from_millis(1000 * attempt as u64);
366                tokio::time::sleep(delay).await;
367                // Brief delay before retry
368                tokio::time::sleep(std::time::Duration::from_millis(500)).await;
369            }
370            Err(e) => {
371                last_error = Some(e);
372                if attempt < MAX_RETRIES {
373                    // Brief delay before retry
374                    tokio::time::sleep(std::time::Duration::from_millis(500)).await;
375                }
376            }
377        }
378    }
379
380    Err(last_error.unwrap_or_else(|| Error::Api {
381        status: 0,
382        message: "Unknown error after retries".to_string(),
383        error_type: None,
384        error_code: None,
385    }))
386}