1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
//! # `OpenAI` Ergonomic Quickstart Guide
//!
//! This example demonstrates how to get started with the openai-ergonomic crate
//! in under 5 minutes. It covers the most common use cases and patterns you'll
//! need for building AI-powered applications.
//!
//! ## Setup Instructions
//!
//! 1. Set your `OpenAI` API key:
//! ```bash
//! export OPENAI_API_KEY="sk-your-api-key-here"
//! ```
//!
//! 2. Run this example:
//! ```bash
//! cargo run --example quickstart
//! ```
//!
//! ## What This Example Shows
//!
//! - Environment setup and client creation
//! - Basic chat completions
//! - Streaming responses (real-time text generation)
//! - Function/tool calling for external data
//! - Robust error handling patterns
//! - Usage tracking and cost monitoring
//!
//! This example is designed to be your first step into building with `OpenAI`.
use futures::StreamExt;
use openai_ergonomic::responses::tool_function;
use openai_ergonomic::{Client, Error, Response, Result, ToolCallExt};
use serde_json::json;
use std::io::{self, Write};
#[tokio::main]
#[allow(clippy::too_many_lines)] // This is an example showing many features
async fn main() -> Result<()> {
// Initialize logging to see what's happening under the hood
tracing_subscriber::fmt().with_env_filter("info").init();
println!(" OpenAI Ergonomic Quickstart");
println!("==============================\n");
// ==========================================
// 1. ENVIRONMENT SETUP & CLIENT CREATION
// ==========================================
println!(" Step 1: Setting up the client");
// The simplest way to get started - reads OPENAI_API_KEY from environment
let client = match Client::from_env() {
Ok(client_builder) => {
println!(" Client created successfully!");
client_builder.build()
}
Err(e) => {
eprintln!(" Failed to create client: {e}");
eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
return Err(e);
}
};
// ==========================================
// 2. BASIC CHAT COMPLETION
// ==========================================
println!("\n Step 2: Basic chat completion");
// The simplest way to get a response from ChatGPT
let builder = client.chat_simple("What is Rust programming language in one sentence?");
let response = client.send_chat(builder).await;
match response {
Ok(chat_response) => {
println!(" Got response!");
if let Some(content) = chat_response.content() {
println!(" AI: {content}");
}
// Show usage information for cost tracking
if let Some(usage) = &chat_response.inner().usage {
println!(
" Usage: {} prompt + {} completion = {} total tokens",
usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
);
}
}
Err(e) => {
println!(" Chat completion failed: {e}");
// Continue with other examples even if this one fails
}
}
// ==========================================
// 3. CHAT WITH SYSTEM MESSAGE
// ==========================================
println!("\n Step 3: Chat with system context");
// System messages help set the AI's behavior and context
let builder = client.chat_with_system(
"You are a helpful coding mentor who explains things simply",
"Explain what a HashMap is in Rust",
);
let response = client.send_chat(builder).await;
match response {
Ok(chat_response) => {
println!(" Got contextual response!");
if let Some(content) = chat_response.content() {
println!(" Mentor: {content}");
}
}
Err(e) => {
println!(" Contextual chat failed: {e}");
}
}
// ==========================================
// 4. STREAMING RESPONSES
// ==========================================
println!("\n Step 4: Streaming response (real-time)");
// Streaming lets you see the response as it's being generated
// This is great for chatbots and interactive applications
print!(" AI is typing: ");
io::stdout().flush().unwrap();
let builder = client
.responses()
.user("Write a short haiku about programming")
.temperature(0.7);
// Use send_responses_stream for real streaming
let stream_result = client.send_responses_stream(builder).await;
match stream_result {
Ok(mut stream) => {
// Process each chunk as it arrives
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
if let Some(content) = chunk.content() {
print!("{content}");
io::stdout().flush().unwrap();
}
}
Err(e) => {
println!("\n Error processing chunk: {e}");
break;
}
}
}
println!(); // New line after streaming
}
Err(e) => {
println!("\n Failed to get streaming response: {e}");
}
}
// ==========================================
// 5. FUNCTION/TOOL CALLING
// ==========================================
println!("\n Step 5: Using tools/functions");
// Tools let the AI call external functions to get real data
// Here we define a weather function as an example
let weather_tool = tool_function(
"get_current_weather",
"Get the current weather for a given location",
json!({
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. 'San Francisco, CA'"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["location"]
}),
);
let builder = client
.responses()
.user("What's the weather like in Tokyo?")
.tool(weather_tool);
let response = client.send_responses(builder).await;
match response {
Ok(chat_response) => {
println!(" Got response with potential tool calls!");
// Check if the AI wants to call our weather function
let tool_calls = chat_response.tool_calls();
if !tool_calls.is_empty() {
println!(" AI requested tool calls:");
for tool_call in tool_calls {
let function_name = tool_call.function_name();
println!(" Function: {function_name}");
let function_args = tool_call.function_arguments();
println!(" Arguments: {function_args}");
// In a real app, you'd execute the function here
// and send the result back to the AI
println!(" In a real app, you'd call your weather API here");
}
} else if let Some(content) = chat_response.content() {
println!(" AI: {content}");
}
}
Err(e) => {
println!(" Tool calling example failed: {e}");
}
}
// ==========================================
// 6. ERROR HANDLING PATTERNS
// ==========================================
println!("\n Step 6: Error handling patterns");
// Show how to handle different types of errors gracefully
let builder = client.chat_simple(""); // Empty message might cause an error
let bad_response = client.send_chat(builder).await;
match bad_response {
Ok(response) => {
println!(" Unexpectedly succeeded with empty message");
if let Some(content) = response.content() {
println!(" AI: {content}");
}
}
Err(Error::Api {
status, message, ..
}) => {
println!(" API Error (HTTP {status}):");
println!(" Message: {message}");
println!(" This is normal - we sent an invalid request");
}
Err(Error::RateLimit { .. }) => {
println!(" Rate limited - you're sending requests too fast");
println!(" In a real app, you'd implement exponential backoff");
}
Err(Error::Http(_)) => {
println!(" HTTP/Network error");
println!(" Check your internet connection and API key");
}
Err(e) => {
println!(" Other error: {e}");
}
}
// ==========================================
// 7. COMPLETE REAL-WORLD EXAMPLE
// ==========================================
println!("\n Step 7: Complete real-world example");
println!("Building a simple AI assistant that can:");
println!("- Answer questions with context");
println!("- Track conversation costs");
println!("- Handle errors gracefully");
let mut total_tokens = 0;
// Simulate a conversation with context and cost tracking
let questions = [
"What is the capital of France?",
"What's special about that city?",
"How many people live there?",
];
for (i, question) in questions.iter().enumerate() {
println!("\n User: {question}");
let builder = client
.responses()
.system(
"You are a knowledgeable geography expert. Keep answers concise but informative.",
)
.user(*question)
.temperature(0.1); // Lower temperature for more factual responses
let response = client.send_responses(builder).await;
match response {
Ok(chat_response) => {
if let Some(content) = chat_response.content() {
println!(" Assistant: {content}");
}
// Track token usage for cost monitoring
if let Some(usage) = chat_response.usage() {
total_tokens += usage.total_tokens;
println!(
" This exchange: {} tokens (Running total: {})",
usage.total_tokens, total_tokens
);
}
}
Err(e) => {
println!(" Question {} failed: {}", i + 1, e);
// In a real app, you might retry or log this error
}
}
}
// ==========================================
// 8. WRAP UP & NEXT STEPS
// ==========================================
println!("\n Quickstart Complete!");
println!("======================");
println!("You've successfully:");
println!(" Created an OpenAI client");
println!(" Made basic chat completions");
println!(" Used streaming responses");
println!(" Implemented tool/function calling");
println!(" Handled errors gracefully");
println!(" Built a complete conversational AI");
println!("\n Total tokens used in examples: {total_tokens}");
println!(
" Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
f64::from(total_tokens) * 0.03 / 1000.0
);
println!("\n Next Steps:");
println!("- Check out other examples in the examples/ directory");
println!("- Read the documentation: https://docs.rs/openai-ergonomic");
println!("- Explore advanced features like vision, audio, and assistants");
println!("- Build your own AI-powered applications!");
Ok(())
}
/// Example helper function demonstrating custom error handling.
///
/// In real applications, you might want to wrap API calls in functions
/// like this to add custom retry logic, logging, or error transformation.
#[allow(dead_code)]
async fn robust_chat_call(client: &Client, message: &str) -> Result<String> {
const MAX_RETRIES: usize = 3;
let mut last_error = None;
for attempt in 1..=MAX_RETRIES {
let builder = client.chat_simple(message);
match client.send_chat(builder).await {
Ok(response) => {
if let Some(content) = response.content() {
return Ok(content.to_string());
}
return Err(Error::Api {
status: 200,
message: "No content in response".to_string(),
error_type: None,
error_code: None,
});
}
Err(Error::RateLimit { .. }) if attempt < MAX_RETRIES => {
// Exponential backoff for rate limits
let delay = std::time::Duration::from_millis(1000 * attempt as u64);
tokio::time::sleep(delay).await;
// Brief delay before retry
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
Err(e) => {
last_error = Some(e);
if attempt < MAX_RETRIES {
// Brief delay before retry
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}
}
}
Err(last_error.unwrap_or_else(|| Error::Api {
status: 0,
message: "Unknown error after retries".to_string(),
error_type: None,
error_code: None,
}))
}