pub struct ChatCompletionResponseWrapper { /* private fields */ }Expand description
Wrapper for chat completion responses with ergonomic helpers.
Implementations§
Source§impl ChatCompletionResponseWrapper
impl ChatCompletionResponseWrapper
Sourcepub fn new(response: CreateChatCompletionResponse) -> Self
pub fn new(response: CreateChatCompletionResponse) -> Self
Create a new response wrapper.
Sourcepub fn with_base_url(
response: CreateChatCompletionResponse,
base_url: String,
) -> Self
pub fn with_base_url( response: CreateChatCompletionResponse, base_url: String, ) -> Self
Create a response wrapper with a base URL for generating links.
Sourcepub fn content(&self) -> Option<&str>
pub fn content(&self) -> Option<&str>
Get the first message content from the response.
Examples found in repository?
examples/error_handling.rs (line 68)
57async fn basic_error_handling() {
58 let client = match Client::from_env() {
59 Ok(client_builder) => client_builder.build(),
60 Err(e) => {
61 println!("Failed to create client: {}", e);
62 return;
63 }
64 };
65
66 match client.send_chat(client.chat_simple("Hello")).await {
67 Ok(response) => {
68 if let Some(content) = response.content() {
69 println!("Success: {}", content);
70 } else {
71 println!("Success: (no content)");
72 }
73 }
74 Err(e) => println!("Error: {}", e),
75 }
76}
77
78async fn pattern_matching_errors() {
79 let Ok(client_builder) = Client::from_env() else {
80 return;
81 };
82 let client = client_builder.build();
83
84 // Simulate various errors by using invalid parameters
85 let builder = client.chat().user("test");
86 let result = client.send_chat(builder).await;
87
88 match result {
89 Ok(_) => println!("Unexpected success"),
90 Err(e) => match e {
91 Error::Api { message, .. } => {
92 println!("API Error: {}", message);
93 }
94 Error::RateLimit(message) => {
95 println!("Rate limited: {}", message);
96 }
97 Error::Authentication(message) => {
98 println!("Authentication failed: {}", message);
99 }
100 Error::Http(source) => {
101 println!("Network error: {}", source);
102 }
103 Error::Json(source) => {
104 println!("Serialization error: {}", source);
105 }
106 Error::Stream(message) => {
107 println!("Stream error: {}", message);
108 }
109 Error::InvalidRequest(message) => {
110 println!("Invalid request: {}", message);
111 }
112 Error::Config(message) => {
113 println!("Configuration error: {}", message);
114 }
115 _ => {
116 println!("Other error: {}", e);
117 }
118 },
119 }
120}
121
122async fn rate_limit_handling() {
123 const MAX_RETRIES: u32 = 3;
124
125 let Ok(client_builder) = Client::from_env() else {
126 return;
127 };
128 let client = client_builder.build();
129
130 // Retry logic for rate limiting
131 let mut retries = 0;
132
133 loop {
134 match client.send_chat(client.chat_simple("Hello")).await {
135 Ok(response) => {
136 if let Some(content) = response.content() {
137 println!("Success: {}", content);
138 } else {
139 println!("Success: (no content)");
140 }
141 break;
142 }
143 Err(Error::RateLimit(_message)) => {
144 if retries >= MAX_RETRIES {
145 println!("Max retries exceeded");
146 break;
147 }
148
149 let wait_time = Duration::from_secs(1);
150 println!("Rate limited. Waiting {:?} before retry...", wait_time);
151 sleep(wait_time).await;
152 retries += 1;
153 }
154 Err(e) => {
155 println!("Other error: {}", e);
156 break;
157 }
158 }
159 }
160}
161
162async fn token_limit_handling() {
163 let Ok(client_builder) = Client::from_env() else {
164 return;
165 };
166 let client = client_builder.build();
167
168 // Generate a very long prompt that might exceed token limits
169 let long_text = "Lorem ipsum ".repeat(10000);
170
171 match client.send_chat(client.chat_simple(&long_text)).await {
172 Ok(_) => println!("Processed long text successfully"),
173 Err(Error::InvalidRequest(message)) if message.contains("token") => {
174 println!("Token limit issue: {}", message);
175
176 // Retry with truncated text
177 let truncated = &long_text[..1000];
178 println!("Retrying with truncated text...");
179
180 match client.send_chat(client.chat_simple(truncated)).await {
181 Ok(response) => {
182 if let Some(content) = response.content() {
183 println!("Success with truncated: {}", content);
184 } else {
185 println!("Success with truncated: (no content)");
186 }
187 }
188 Err(e) => println!("Still failed: {}", e),
189 }
190 }
191 Err(e) => println!("Other error: {}", e),
192 }
193}
194
195async fn auth_error_handling() -> Result<()> {
196 // Try with invalid API key
197 let config = Config::builder().api_key("invalid-api-key").build();
198 let invalid_client = Client::builder(config)?.build();
199
200 match invalid_client
201 .send_chat(invalid_client.chat_simple("Hello"))
202 .await
203 {
204 Ok(_) => println!("Unexpected success"),
205 Err(Error::Authentication(message)) => {
206 println!("Authentication failed as expected: {}", message);
207
208 // Suggest remediation
209 println!("Suggestions:");
210 println!("1. Check your OPENAI_API_KEY environment variable");
211 println!("2. Verify API key at https://platform.openai.com/api-keys");
212 println!("3. Ensure your API key has necessary permissions");
213 }
214 Err(e) => println!("Unexpected error type: {}", e),
215 }
216
217 Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221 use openai_ergonomic::Config;
222 use reqwest_middleware::ClientBuilder;
223
224 // Create a reqwest client with very short timeout to simulate network issues
225 let reqwest_client = reqwest::Client::builder()
226 .timeout(Duration::from_secs(1))
227 .build()
228 .expect("Failed to build reqwest client");
229
230 let http_client = ClientBuilder::new(reqwest_client).build();
231
232 let config = Config::builder()
233 .api_key("test-key")
234 .http_client(http_client)
235 .build();
236
237 let client = Client::builder(config)?.build();
238
239 match client.send_chat(client.chat_simple("Hello")).await {
240 Ok(_) => println!("Unexpected success"),
241 Err(Error::Http(source)) => {
242 println!("Network error as expected: {}", source);
243
244 // Implement exponential backoff
245 let mut backoff = Duration::from_millis(100);
246 for attempt in 1..=3 {
247 println!("Retry attempt {} after {:?}", attempt, backoff);
248 sleep(backoff).await;
249 backoff *= 2;
250
251 // In real scenario, retry with proper timeout
252 // match client.send_chat(client.chat_simple("Hello")).await { ... }
253 }
254 }
255 Err(e) => println!("Other error: {}", e),
256 }
257
258 Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262 let client = Client::from_env()?.build();
263
264 // Wrap errors with custom context
265 let result = client
266 .send_chat(client.chat_simple("Analyze this data"))
267 .await
268 .map_err(|e| {
269 eprintln!("Context: Failed during data analysis task");
270 eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271 eprintln!("Original error: {}", e);
272 e
273 })?;
274
275 if let Some(content) = result.content() {
276 println!("Result: {}", content);
277 } else {
278 println!("Result: (no content)");
279 }
280 Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284 let client = Client::from_env()?.build();
285
286 // Strategy 1: Fallback to simpler model
287 let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288 println!("Fallback strategy result: {}", result);
289
290 // Strategy 2: Circuit breaker pattern
291 let circuit_breaker = CircuitBreaker::new();
292 if circuit_breaker.is_open() {
293 println!("Circuit breaker is open, skipping API calls");
294 return Ok(());
295 }
296
297 match client.send_chat(client.chat_simple("Test")).await {
298 Ok(response) => {
299 circuit_breaker.record_success();
300 if let Some(content) = response.content() {
301 println!("Circuit breaker success: {}", content);
302 } else {
303 println!("Circuit breaker success: (no content)");
304 }
305 }
306 Err(e) => {
307 circuit_breaker.record_failure();
308 println!("Circuit breaker failure: {}", e);
309 }
310 }
311
312 // Strategy 3: Request hedging (parallel requests with first success wins)
313 let hedge_result = hedged_request(&client).await?;
314 println!("Hedged request result: {}", hedge_result);
315
316 Ok(())
317}
318
319async fn try_with_fallback(client: &Client, primary: &str, _fallback: &str) -> Result<String> {
320 // Try primary model first
321 let builder = client.chat().user("Hello");
322 match client.send_chat(builder).await {
323 Ok(response) => Ok(response.content().unwrap_or("").to_string()),
324 Err(e) => {
325 println!("Primary model failed ({}): {}, trying fallback", primary, e);
326
327 // Try fallback model
328 let fallback_builder = client.chat().user("Hello");
329 client
330 .send_chat(fallback_builder)
331 .await
332 .map(|r| r.content().unwrap_or("").to_string())
333 }
334 }
335}
336
337async fn hedged_request(client: &Client) -> Result<String> {
338 use futures::future::select;
339 use std::pin::pin;
340
341 // Launch two requests in parallel
342 let request1 = async {
343 client
344 .send_chat(client.chat_simple("Hello from request 1"))
345 .await
346 };
347 let request2 = async {
348 client
349 .send_chat(client.chat_simple("Hello from request 2"))
350 .await
351 };
352
353 let fut1 = pin!(request1);
354 let fut2 = pin!(request2);
355
356 // Return first successful response
357 match select(fut1, fut2).await {
358 futures::future::Either::Left((result, _)) => {
359 println!("Request 1 completed first");
360 result.map(|r| r.content().unwrap_or("").to_string())
361 }
362 futures::future::Either::Right((result, _)) => {
363 println!("Request 2 completed first");
364 result.map(|r| r.content().unwrap_or("").to_string())
365 }
366 }
367}More examples
examples/retry_patterns.rs (line 71)
63async fn simple_retry(client: &Client) -> Result<()> {
64 const MAX_RETRIES: u32 = 3;
65
66 for attempt in 1..=MAX_RETRIES {
67 println!("Attempt {}/{}", attempt, MAX_RETRIES);
68
69 match client.send_chat(client.chat_simple("Hello")).await {
70 Ok(response) => {
71 if let Some(content) = response.content() {
72 println!("Success: {}", content);
73 } else {
74 println!("Success: (no content)");
75 }
76 return Ok(());
77 }
78 Err(e) if attempt < MAX_RETRIES => {
79 println!("Failed (attempt {}): {}. Retrying...", attempt, e);
80 sleep(Duration::from_secs(1)).await;
81 }
82 Err(e) => {
83 println!("All retries exhausted");
84 return Err(e);
85 }
86 }
87 }
88
89 Ok(())
90}
91
92async fn exponential_backoff(client: &Client) -> Result<()> {
93 const MAX_RETRIES: u32 = 5;
94 const BASE_DELAY: Duration = Duration::from_millis(100);
95 const MAX_DELAY: Duration = Duration::from_secs(32);
96
97 let mut delay = BASE_DELAY;
98
99 for attempt in 1..=MAX_RETRIES {
100 match client
101 .send_chat(client.chat_simple("Hello with backoff"))
102 .await
103 {
104 Ok(response) => {
105 if let Some(content) = response.content() {
106 println!("Success after {} attempts: {}", attempt, content);
107 } else {
108 println!("Success after {} attempts: (no content)", attempt);
109 }
110 return Ok(());
111 }
112 Err(Error::RateLimit(_message)) => {
113 // Use default delay for rate limiting
114 let wait_time = delay;
115 println!(
116 "Rate limited (attempt {}). Waiting {:?}...",
117 attempt, wait_time
118 );
119 sleep(wait_time).await;
120
121 // Double the delay for next attempt
122 delay = (delay * 2).min(MAX_DELAY);
123 }
124 Err(e) if attempt < MAX_RETRIES => {
125 println!("Error (attempt {}): {}. Waiting {:?}...", attempt, e, delay);
126 sleep(delay).await;
127
128 // Exponential increase with cap
129 delay = (delay * 2).min(MAX_DELAY);
130 }
131 Err(e) => return Err(e),
132 }
133 }
134
135 Ok(())
136}
137
138async fn retry_with_jitter(client: &Client) -> Result<()> {
139 const MAX_RETRIES: u32 = 5;
140 const BASE_DELAY_MS: u64 = 100;
141
142 for attempt in 1..=MAX_RETRIES {
143 match client
144 .send_chat(client.chat_simple("Hello with jitter"))
145 .await
146 {
147 Ok(response) => {
148 if let Some(content) = response.content() {
149 println!("Success: {}", content);
150 } else {
151 println!("Success: (no content)");
152 }
153 return Ok(());
154 }
155 Err(e) if attempt < MAX_RETRIES => {
156 // Calculate delay with jitter using random() instead of thread_rng for Send compatibility
157 let base = BASE_DELAY_MS * 2_u64.pow(attempt - 1);
158 let jitter = rand::random::<u64>() % (base / 2 + 1);
159 let delay = Duration::from_millis(base + jitter);
160
161 println!(
162 "Attempt {} failed: {}. Retrying in {:?} (with jitter)...",
163 attempt, e, delay
164 );
165 sleep(delay).await;
166 }
167 Err(e) => return Err(e),
168 }
169 }
170
171 Ok(())
172}
173
174async fn circuit_breaker_example(client: &Client) -> Result<()> {
175 let circuit_breaker = Arc::new(CircuitBreaker::new(3, Duration::from_secs(5)));
176
177 for i in 1..=10 {
178 println!("Request {}: ", i);
179
180 // Check circuit state
181 match circuit_breaker
182 .call(|| async {
183 client
184 .send_chat(client.chat_simple("Circuit breaker test"))
185 .await
186 })
187 .await
188 {
189 Ok(response) => {
190 if let Some(content) = response.content() {
191 println!(" Success: {}", content);
192 } else {
193 println!(" Success: (no content)");
194 }
195 }
196 Err(CircuitBreakerError::Open) => {
197 println!(" Circuit is OPEN - skipping request");
198 sleep(Duration::from_secs(1)).await;
199 }
200 Err(CircuitBreakerError::RequestFailed(e)) => {
201 println!(" Request failed: {}", e);
202 }
203 }
204
205 // Small delay between requests
206 sleep(Duration::from_millis(500)).await;
207 }
208
209 Ok(())
210}
211
212async fn timeout_management(client: &Client) {
213 // Example 1: Per-request timeout
214 println!("Per-request timeout:");
215 match timeout(
216 Duration::from_secs(5),
217 client.send_chat(client.chat_simple("Hello")),
218 )
219 .await
220 {
221 Ok(Ok(response)) => {
222 if let Some(content) = response.content() {
223 println!("Response received: {}", content);
224 } else {
225 println!("Response received: (no content)");
226 }
227 }
228 Ok(Err(e)) => println!("API error: {}", e),
229 Err(_) => println!("Request timed out after 5 seconds"),
230 }
231
232 // Example 2: Deadline-based timeout
233 println!("\nDeadline-based timeout:");
234 let deadline = Instant::now() + Duration::from_secs(10);
235
236 while Instant::now() < deadline {
237 let remaining = deadline - Instant::now();
238 println!("Time remaining: {:?}", remaining);
239
240 match timeout(
241 remaining,
242 client.send_chat(client.chat_simple("Quick response")),
243 )
244 .await
245 {
246 Ok(Ok(response)) => {
247 if let Some(content) = response.content() {
248 println!("Got response: {}", content);
249 } else {
250 println!("Got response: (no content)");
251 }
252 break;
253 }
254 Ok(Err(e)) => {
255 println!("Error: {}. Retrying...", e);
256 sleep(Duration::from_secs(1)).await;
257 }
258 Err(_) => {
259 println!("Deadline exceeded");
260 break;
261 }
262 }
263 }
264
265 // Example 3: Adaptive timeout
266 println!("\nAdaptive timeout:");
267 let mut adaptive_timeout = Duration::from_secs(2);
268
269 for _attempt in 1..=3 {
270 let start = Instant::now();
271
272 match timeout(
273 adaptive_timeout,
274 client.send_chat(client.chat_simple("Adaptive")),
275 )
276 .await
277 {
278 Ok(Ok(response)) => {
279 let elapsed = start.elapsed();
280 println!(
281 "Success in {:?}. Next timeout would be {:?}.",
282 elapsed,
283 elapsed * 2
284 );
285 // Adjust timeout based on actual response time for potential future requests
286 // adaptive_timeout = elapsed * 2; // Not used since we break out of the loop
287 if let Some(content) = response.content() {
288 println!("Response: {}", content);
289 } else {
290 println!("Response: (no content)");
291 }
292 break;
293 }
294 Ok(Err(e)) => println!("Error: {}", e),
295 Err(_) => {
296 println!(
297 "Timeout after {:?}. Increasing for next attempt.",
298 adaptive_timeout
299 );
300 adaptive_timeout *= 2;
301 }
302 }
303 }
304}
305
306async fn request_hedging(client: &Client) -> Result<()> {
307 use futures::future::{select, Either};
308 use std::pin::pin;
309
310 println!("Launching hedged requests...");
311
312 // Launch multiple requests with staggered starts
313 let request1 = async {
314 println!("Request 1 started");
315 client
316 .send_chat(client.chat_simple("Hedged request 1"))
317 .await
318 };
319
320 let request2 = async {
321 sleep(Duration::from_millis(200)).await;
322 println!("Request 2 started (200ms delay)");
323 client
324 .send_chat(client.chat_simple("Hedged request 2"))
325 .await
326 };
327
328 let fut1 = pin!(request1);
329 let fut2 = pin!(request2);
330
331 // Return first successful response
332 match select(fut1, fut2).await {
333 Either::Left((result, _)) => {
334 println!("Request 1 won the race");
335 result.map(|r| {
336 if let Some(content) = r.content() {
337 println!("Result: {}", content);
338 } else {
339 println!("Result: (no content)");
340 }
341 })
342 }
343 Either::Right((result, _)) => {
344 println!("Request 2 won the race");
345 result.map(|r| {
346 if let Some(content) = r.content() {
347 println!("Result: {}", content);
348 } else {
349 println!("Result: (no content)");
350 }
351 })
352 }
353 }
354}
355
356async fn fallback_chain(client: &Client) -> Result<()> {
357 // Define fallback chain
358 let strategies = vec![
359 ("GPT-4o", "gpt-4o", 1024),
360 ("GPT-4o-mini", "gpt-4o-mini", 512),
361 ("GPT-3.5", "gpt-3.5-turbo", 256),
362 ];
363
364 let prompt = "Explain quantum computing";
365
366 for (name, _model, max_tokens) in strategies {
367 println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369 let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370 match client.send_chat(builder).await {
371 Ok(response) => {
372 println!("Success with {}", name);
373 if let Some(content) = response.content() {
374 println!("Response: {}...", &content[..content.len().min(100)]);
375 }
376 return Ok(());
377 }
378 Err(e) => {
379 println!("Failed with {}: {}", name, e);
380 }
381 }
382 }
383
384 println!("All fallback strategies exhausted");
385 Ok(())
386}
387
388async fn idempotency_example(_client: &Client) -> Result<()> {
389 // Generate idempotency key
390 let idempotency_key = generate_idempotency_key();
391 println!("Using idempotency key: {}", idempotency_key);
392
393 // Simulate retrying the same request
394 for attempt in 1..=3 {
395 println!("\nAttempt {} with same idempotency key", attempt);
396
397 // In a real implementation, you'd pass the idempotency key in headers
398 let mut headers = std::collections::HashMap::new();
399 headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400 println!(" Would send {} headers", headers.len());
401
402 let config = Config::builder()
403 .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404 .build();
405
406 // Note: Headers (including idempotency key) are not yet supported in current API
407
408 let client_with_idempotency = Client::builder(config)?.build();
409
410 match client_with_idempotency
411 .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412 .await
413 {
414 Ok(response) => {
415 if let Some(content) = response.content() {
416 println!("Response: {}", content);
417 } else {
418 println!("Response: (no content)");
419 }
420 // Server should return same response for same idempotency key
421 }
422 Err(e) => println!("Error: {}", e),
423 }
424
425 if attempt < 3 {
426 sleep(Duration::from_secs(1)).await;
427 }
428 }
429
430 Ok(())
431}examples/tool_calling.rs (line 204)
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212 // This example demonstrates proper multi-turn tool calling with full message history
213
214 println!("=== Conversation with Tools (Full Implementation) ===");
215
216 // Initialize the conversation
217 let mut builder = client
218 .chat()
219 .user("What's the weather in Tokyo?")
220 .tools(vec![get_weather_tool()]);
221
222 // First request - the model will call the tool
223 let response = client.send_chat(builder.clone()).await?;
224
225 // Check for tool calls
226 let tool_calls = response.tool_calls();
227 if !tool_calls.is_empty() {
228 println!("Step 1: Model requests tool call");
229 for tool_call in &tool_calls {
230 println!(" Tool: {}", tool_call.function_name());
231 println!(" Args: {}", tool_call.function_arguments());
232 }
233
234 // IMPORTANT: Add the assistant's response (with tool calls) to the history
235 // This is the key step for maintaining proper conversation context!
236 builder = builder.assistant_with_tool_calls(
237 response.content().unwrap_or(""),
238 tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239 );
240
241 // Execute the tools and add results
242 println!("\nStep 2: Execute tools and add results to conversation");
243 for tool_call in tool_calls {
244 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245 let result = execute_weather_function(params)?;
246 println!(" Tool result: {}", result);
247
248 // Add the tool result to the conversation history
249 builder = builder.tool(tool_call.id(), result);
250 }
251
252 // Send the follow-up request with tool results
253 println!("\nStep 3: Send follow-up request with tool results");
254 let final_response = client
255 .send_chat(builder.tools(vec![get_weather_tool()]))
256 .await?;
257
258 if let Some(content) = final_response.content() {
259 println!(" Final assistant response: {}", content);
260 }
261 }
262
263 println!("\nNote: This demonstrates the complete tool calling loop with proper");
264 println!("message history management using assistant_with_tool_calls()");
265
266 Ok(())
267}examples/auth_patterns.rs (line 82)
61async fn env_var_auth() -> Result<()> {
62 // Standard environment variables:
63 // - OPENAI_API_KEY: Your API key
64 // - OPENAI_ORG_ID: Optional organization ID
65 // - OPENAI_PROJECT_ID: Optional project ID
66 // - OPENAI_BASE_URL: Optional custom base URL
67
68 // Check if environment variables are set
69 if env::var("OPENAI_API_KEY").is_err() {
70 println!("Warning: OPENAI_API_KEY not set");
71 println!("Set it with: export OPENAI_API_KEY=your-key-here");
72 return Ok(());
73 }
74
75 // Create client from environment
76 let client = Client::from_env()?.build();
77 println!("Client created from environment variables");
78
79 // Test the client
80 match client.send_chat(client.chat_simple("Hello")).await {
81 Ok(response) => {
82 if let Some(content) = response.content() {
83 println!("Response: {}", content);
84 } else {
85 println!("Response: (no content)");
86 }
87 }
88 Err(e) => println!("Error: {}", e),
89 }
90
91 Ok(())
92}
93
94async fn direct_api_key() -> Result<()> {
95 // Create client with direct API key
96 let api_key = "sk-your-api-key-here"; // Replace with actual key
97 let config = Config::builder().api_key(api_key).build();
98 let client = Client::builder(config)?.build();
99
100 println!("Client created with direct API key");
101
102 // Note: This will fail with invalid key
103 match client.send_chat(client.chat_simple("Hello")).await {
104 Ok(response) => {
105 if let Some(content) = response.content() {
106 println!("Response: {}", content);
107 } else {
108 println!("Response: (no content)");
109 }
110 }
111 Err(e) => println!("Expected error with demo key: {}", e),
112 }
113
114 Ok(())
115}examples/models.rs (line 190)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160 // Task-specific model recommendations
161 let task_models = vec![
162 ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163 ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164 ("Code generation", "gpt-4o", "Excellent code understanding"),
165 ("Vision tasks", "gpt-4o", "Native vision support"),
166 (
167 "Quick responses",
168 "gpt-4o-mini",
169 "Low latency, good quality",
170 ),
171 (
172 "Bulk processing",
173 "gpt-3.5-turbo",
174 "Best cost/performance ratio",
175 ),
176 ];
177
178 for (task, model, reason) in task_models {
179 println!("Task: {}", task);
180 println!(" Recommended: {}", model);
181 println!(" Reason: {}", reason);
182
183 // Demo the model
184 let builder = client
185 .chat()
186 .user(format!("Say 'Hello from {}'", model))
187 .max_completion_tokens(10);
188 let response = client.send_chat(builder).await?;
189
190 if let Some(content) = response.content() {
191 println!(" Response: {}\n", content);
192 }
193 }
194
195 Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199 let models = get_model_registry();
200 let test_prompt = "Explain the theory of relativity in one sentence";
201 let estimated_input_tokens = 15;
202 let estimated_output_tokens = 50;
203
204 println!("Cost comparison for same task:");
205 println!("Prompt: '{}'\n", test_prompt);
206
207 let mut costs = Vec::new();
208
209 for (name, info) in &models {
210 if !info.deprecated {
211 let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212 let output_cost =
213 (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214 let total_cost = input_cost + output_cost;
215
216 costs.push((name.clone(), total_cost));
217 }
218 }
219
220 costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222 println!("{:<20} {:>15}", "Model", "Estimated Cost");
223 println!("{:-<35}", "");
224 for (model, cost) in costs {
225 println!("{:<20} ${:>14.6}", model, cost);
226 }
227
228 // Demonstrate cheapest vs best
229 println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230 let builder = client.chat().user(test_prompt);
231 let cheap_response = client.send_chat(builder).await?;
232
233 if let Some(content) = cheap_response.content() {
234 println!("Response: {}", content);
235 }
236
237 Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241 use std::time::Instant;
242
243 let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244 let test_prompt = "Write a haiku about programming";
245
246 println!("Performance comparison:");
247 println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248 println!("{:-<45}", "");
249
250 for model in models_to_test {
251 let start = Instant::now();
252
253 let builder = client.chat().user(test_prompt);
254 let response = client.send_chat(builder).await?;
255
256 let elapsed = start.elapsed();
257
258 if let Some(usage) = response.usage() {
259 let total_tokens = f64::from(usage.total_tokens);
260 let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262 println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263 }
264 }
265
266 Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270 // Handle deprecated model migration
271 let deprecated_mappings = HashMap::from([
272 ("text-davinci-003", "gpt-3.5-turbo"),
273 ("gpt-4-32k", "gpt-4o"),
274 ("gpt-4-vision-preview", "gpt-4o"),
275 ]);
276
277 let requested_model = "text-davinci-003"; // Deprecated model
278
279 if let Some(replacement) = deprecated_mappings.get(requested_model) {
280 println!(
281 "Warning: {} is deprecated. Using {} instead.",
282 requested_model, replacement
283 );
284
285 let builder = client.chat().user("Hello from migrated model");
286 let response = client.send_chat(builder).await?;
287
288 if let Some(content) = response.content() {
289 println!("Response from {}: {}", replacement, content);
290 }
291 }
292
293 Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297 // Select model based on runtime conditions
298
299 #[derive(Debug)]
300 struct RequestContext {
301 urgency: Urgency,
302 complexity: Complexity,
303 budget: Budget,
304 needs_vision: bool,
305 }
306
307 #[derive(Debug)]
308 enum Urgency {
309 Low,
310 Medium,
311 High,
312 }
313
314 #[derive(Debug)]
315 enum Complexity {
316 Simple,
317 Moderate,
318 Complex,
319 }
320
321 #[derive(Debug)]
322 enum Budget {
323 Tight,
324 Normal,
325 Flexible,
326 }
327
328 const fn select_model(ctx: &RequestContext) -> &'static str {
329 match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330 // High urgency + simple = fast cheap model, or tight budget = cheapest
331 (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333 // Complex + flexible budget = best model
334 (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336 // Vision required
337 _ if ctx.needs_vision => "gpt-4o",
338
339 // Default balanced choice
340 _ => "gpt-4o-mini",
341 }
342 }
343
344 // Example contexts
345 let contexts = [
346 RequestContext {
347 urgency: Urgency::High,
348 complexity: Complexity::Simple,
349 budget: Budget::Tight,
350 needs_vision: false,
351 },
352 RequestContext {
353 urgency: Urgency::Low,
354 complexity: Complexity::Complex,
355 budget: Budget::Flexible,
356 needs_vision: false,
357 },
358 RequestContext {
359 urgency: Urgency::Medium,
360 complexity: Complexity::Moderate,
361 budget: Budget::Normal,
362 needs_vision: true,
363 },
364 ];
365
366 for (i, ctx) in contexts.iter().enumerate() {
367 let model = select_model(ctx);
368 println!("Context {}: {:?}", i + 1, ctx);
369 println!(" Selected model: {}", model);
370
371 let builder = client
372 .chat()
373 .user(format!("Hello from dynamically selected {}", model))
374 .max_completion_tokens(20);
375 let response = client.send_chat(builder).await?;
376
377 if let Some(content) = response.content() {
378 println!(" Response: {}\n", content);
379 }
380 }
381
382 Ok(())
383}examples/responses_comprehensive.rs (line 132)
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!(" Assistant: {content}");
134 } else {
135 println!(" No content in response");
136 }
137
138 // Show response metadata
139 println!(" Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Additional examples can be found in:
- examples/vision_chat.rs
- examples/moderations.rs
- examples/chat_comprehensive.rs
- examples/structured_outputs.rs
- examples/tool_calling_simple.rs
- examples/langfuse_simple.rs
- examples/tool_calling_multiturn.rs
- examples/azure_comprehensive.rs
- examples/azure_openai.rs
- examples/http_middleware_retry.rs
- examples/langfuse.rs
- examples/quickstart.rs
Sourcepub fn choices(&self) -> &[CreateChatCompletionResponseChoicesInner]
pub fn choices(&self) -> &[CreateChatCompletionResponseChoicesInner]
Get all choices from the response.
Sourcepub fn tool_calls(&self) -> Vec<&ChatCompletionMessageToolCallsInner>
pub fn tool_calls(&self) -> Vec<&ChatCompletionMessageToolCallsInner>
Get tool calls from the first choice, if any.
Examples found in repository?
examples/tool_calling.rs (line 136)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129 let builder = client
130 .chat()
131 .user("What's the weather like in San Francisco?")
132 .tools(vec![get_weather_tool()]);
133 let response = client.send_chat(builder).await?;
134
135 // Check for tool calls
136 let tool_calls = response.tool_calls();
137 if !tool_calls.is_empty() {
138 for tool_call in tool_calls {
139 println!("Tool called: {}", tool_call.function_name());
140 println!("Arguments: {}", tool_call.function_arguments());
141
142 // Execute the function
143 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144 let result = execute_weather_function(params)?;
145 println!("Function result: {}", result);
146 }
147 }
148
149 Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153 let builder = client
154 .chat()
155 .user("What's the weather in NYC and what time is it there?")
156 .tools(vec![get_weather_tool(), get_time_tool()]);
157 let response = client.send_chat(builder).await?;
158
159 for tool_call in response.tool_calls() {
160 match tool_call.function_name() {
161 "get_weather" => {
162 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163 let result = execute_weather_function(params)?;
164 println!("Weather result: {}", result);
165 }
166 "get_current_time" => {
167 let params: serde_json::Value =
168 serde_json::from_str(tool_call.function_arguments())?;
169 if let Some(timezone) = params["timezone"].as_str() {
170 let result = execute_time_function(timezone);
171 println!("Time result: {}", result);
172 }
173 }
174 _ => println!("Unknown tool: {}", tool_call.function_name()),
175 }
176 }
177
178 Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212 // This example demonstrates proper multi-turn tool calling with full message history
213
214 println!("=== Conversation with Tools (Full Implementation) ===");
215
216 // Initialize the conversation
217 let mut builder = client
218 .chat()
219 .user("What's the weather in Tokyo?")
220 .tools(vec![get_weather_tool()]);
221
222 // First request - the model will call the tool
223 let response = client.send_chat(builder.clone()).await?;
224
225 // Check for tool calls
226 let tool_calls = response.tool_calls();
227 if !tool_calls.is_empty() {
228 println!("Step 1: Model requests tool call");
229 for tool_call in &tool_calls {
230 println!(" Tool: {}", tool_call.function_name());
231 println!(" Args: {}", tool_call.function_arguments());
232 }
233
234 // IMPORTANT: Add the assistant's response (with tool calls) to the history
235 // This is the key step for maintaining proper conversation context!
236 builder = builder.assistant_with_tool_calls(
237 response.content().unwrap_or(""),
238 tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239 );
240
241 // Execute the tools and add results
242 println!("\nStep 2: Execute tools and add results to conversation");
243 for tool_call in tool_calls {
244 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245 let result = execute_weather_function(params)?;
246 println!(" Tool result: {}", result);
247
248 // Add the tool result to the conversation history
249 builder = builder.tool(tool_call.id(), result);
250 }
251
252 // Send the follow-up request with tool results
253 println!("\nStep 3: Send follow-up request with tool results");
254 let final_response = client
255 .send_chat(builder.tools(vec![get_weather_tool()]))
256 .await?;
257
258 if let Some(content) = final_response.content() {
259 println!(" Final assistant response: {}", content);
260 }
261 }
262
263 println!("\nNote: This demonstrates the complete tool calling loop with proper");
264 println!("message history management using assistant_with_tool_calls()");
265
266 Ok(())
267}
268
269fn streaming_with_tools(_client: &Client) {
270 println!("Streaming response with tools:");
271
272 // Note: Streaming with tool calls is more complex and requires
273 // proper handling of partial tool call chunks. For now, this is
274 // a placeholder showing the concept.
275
276 println!("This would demonstrate streaming tool calls if streaming API was available");
277 println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
278}
279
280async fn parallel_tool_calls(client: &Client) -> Result<()> {
281 let builder = client
282 .chat()
283 .user("Check the weather in Tokyo, London, and New York")
284 .tools(vec![get_weather_tool()]);
285 let response = client.send_chat(builder).await?;
286
287 // Modern models can call multiple tools in parallel
288 let tool_calls = response.tool_calls();
289 println!("Parallel tool calls: {}", tool_calls.len());
290
291 // Collect arguments first to avoid lifetime issues
292 let args_vec: Vec<String> = tool_calls
293 .iter()
294 .map(|tc| tc.function_arguments().to_string())
295 .collect();
296
297 // Execute all in parallel using tokio
298 let mut handles = Vec::new();
299 for args in args_vec {
300 let handle = tokio::spawn(async move {
301 let params: WeatherParams = serde_json::from_str(&args)?;
302 execute_weather_function(params)
303 });
304 handles.push(handle);
305 }
306
307 // Wait for all results
308 for (i, handle) in handles.into_iter().enumerate() {
309 match handle.await {
310 Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
311 Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
312 Err(e) => println!("Task {} panicked: {}", i + 1, e),
313 }
314 }
315
316 Ok(())
317}More examples
examples/tool_calling_simple.rs (line 55)
42async fn main() -> Result<()> {
43 println!("=== Tool Calling Example ===");
44
45 let client = Client::from_env()?.build();
46
47 // Simple tool call
48 let builder = client
49 .chat()
50 .user("What's the weather like in San Francisco?")
51 .tools(vec![get_weather_tool()]);
52 let response = client.send_chat(builder).await?;
53
54 // Check for tool calls
55 let tool_calls = response.tool_calls();
56 if !tool_calls.is_empty() {
57 for tool_call in tool_calls {
58 println!("Tool called: {}", tool_call.function_name());
59 println!("Arguments: {}", tool_call.function_arguments());
60
61 // Execute the function
62 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
63 let result = execute_weather_function(¶ms);
64 println!("Function result: {}", result);
65 }
66 } else if let Some(content) = response.content() {
67 println!("Response: {}", content);
68 }
69
70 // Forced tool choice
71 println!("\n=== Forced Tool Choice ===");
72 let builder = client
73 .chat()
74 .user("Tell me about Paris")
75 .tools(vec![get_weather_tool()])
76 .tool_choice(ToolChoiceHelper::specific("get_weather"));
77 let response = client.send_chat(builder).await?;
78
79 for tool_call in response.tool_calls() {
80 println!("Forced tool: {}", tool_call.function_name());
81 }
82
83 // No tools
84 println!("\n=== No Tools Mode ===");
85 let builder = client
86 .chat()
87 .user("What's the weather?")
88 .tools(vec![get_weather_tool()])
89 .tool_choice(ToolChoiceHelper::none());
90 let response = client.send_chat(builder).await?;
91
92 if let Some(content) = response.content() {
93 println!("Response without tools: {}", content);
94 }
95
96 Ok(())
97}examples/tool_calling_multiturn.rs (line 182)
160async fn handle_tool_loop(
161 client: &Client,
162 mut chat_builder: openai_ergonomic::builders::chat::ChatCompletionBuilder,
163 tools: &[openai_client_base::models::ChatCompletionTool],
164 storage: &Arc<Mutex<HashMap<String, String>>>,
165) -> Result<String> {
166 const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
167 let mut iteration = 0;
168
169 loop {
170 iteration += 1;
171 if iteration > MAX_ITERATIONS {
172 return Err(std::io::Error::other("Max iterations reached in tool loop").into());
173 }
174
175 println!("\n [Iteration {}]", iteration);
176
177 // Send request with tools
178 let request = chat_builder.clone().tools(tools.to_vec());
179 let response = client.send_chat(request).await?;
180
181 // Check if there are tool calls
182 let tool_calls = response.tool_calls();
183 if tool_calls.is_empty() {
184 // No more tool calls, return the final response
185 if let Some(content) = response.content() {
186 return Ok(content.to_string());
187 }
188 return Err(std::io::Error::other("No content in final response").into());
189 }
190
191 // Process tool calls
192 println!(" Tool calls: {}", tool_calls.len());
193
194 // IMPORTANT: Add assistant message with tool calls to history
195 // This is the key step that maintains proper conversation context!
196 chat_builder = chat_builder.assistant_with_tool_calls(
197 response.content().unwrap_or(""),
198 tool_calls.iter().map(|tc| (*tc).clone()).collect(),
199 );
200
201 // Execute each tool call and add results to history
202 for tool_call in tool_calls {
203 let tool_name = tool_call.function_name();
204 let tool_args = tool_call.function_arguments();
205 let tool_id = tool_call.id();
206
207 println!(" → {}: {}", tool_name, tool_args);
208
209 let result = match execute_tool(tool_name, tool_args, storage) {
210 Ok(result) => {
211 println!(" ✓ Result: {}", result);
212 result
213 }
214 Err(e) => {
215 let error_msg = format!("Error: {}", e);
216 eprintln!(" ✗ {}", error_msg);
217 error_msg
218 }
219 };
220
221 // Add tool result to the conversation
222 chat_builder = chat_builder.tool(tool_id, result);
223 }
224 }
225}examples/responses_comprehensive.rs (line 214)
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}examples/quickstart.rs (line 200)
37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn is_refusal(&self) -> bool
pub fn is_refusal(&self) -> bool
Check if the response was refused.
Sourcepub fn finish_reason(&self) -> Option<String>
pub fn finish_reason(&self) -> Option<String>
Get the finish reason for the first choice.
Examples found in repository?
examples/responses_comprehensive.rs (line 144)
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!(" Assistant: {content}");
134 } else {
135 println!(" No content in response");
136 }
137
138 // Show response metadata
139 println!(" Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}Sourcepub fn inner(&self) -> &CreateChatCompletionResponse
pub fn inner(&self) -> &CreateChatCompletionResponse
Get the inner response object.
Examples found in repository?
examples/quickstart.rs (line 82)
37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Trait Implementations§
Source§impl Clone for ChatCompletionResponseWrapper
impl Clone for ChatCompletionResponseWrapper
Source§fn clone(&self) -> ChatCompletionResponseWrapper
fn clone(&self) -> ChatCompletionResponseWrapper
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreAuto Trait Implementations§
impl Freeze for ChatCompletionResponseWrapper
impl RefUnwindSafe for ChatCompletionResponseWrapper
impl Send for ChatCompletionResponseWrapper
impl Sync for ChatCompletionResponseWrapper
impl Unpin for ChatCompletionResponseWrapper
impl UnwindSafe for ChatCompletionResponseWrapper
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more