pub struct ChatCompletionResponseWrapper { /* private fields */ }Expand description
Wrapper for chat completion responses with ergonomic helpers.
Implementations§
Source§impl ChatCompletionResponseWrapper
impl ChatCompletionResponseWrapper
Sourcepub fn new(response: CreateChatCompletionResponse) -> Self
pub fn new(response: CreateChatCompletionResponse) -> Self
Create a new response wrapper.
Sourcepub fn with_base_url(
response: CreateChatCompletionResponse,
base_url: String,
) -> Self
pub fn with_base_url( response: CreateChatCompletionResponse, base_url: String, ) -> Self
Create a response wrapper with a base URL for generating links.
Sourcepub fn content(&self) -> Option<&str>
pub fn content(&self) -> Option<&str>
Get the first message content from the response.
Examples found in repository?
examples/error_handling.rs (line 68)
57async fn basic_error_handling() {
58 let client = match Client::from_env() {
59 Ok(client_builder) => client_builder.build(),
60 Err(e) => {
61 println!("Failed to create client: {}", e);
62 return;
63 }
64 };
65
66 match client.send_chat(client.chat_simple("Hello")).await {
67 Ok(response) => {
68 if let Some(content) = response.content() {
69 println!("Success: {}", content);
70 } else {
71 println!("Success: (no content)");
72 }
73 }
74 Err(e) => println!("Error: {}", e),
75 }
76}
77
78async fn pattern_matching_errors() {
79 let Ok(client_builder) = Client::from_env() else {
80 return;
81 };
82 let client = client_builder.build();
83
84 // Simulate various errors by using invalid parameters
85 let builder = client.chat().user("test");
86 let result = client.send_chat(builder).await;
87
88 match result {
89 Ok(_) => println!("Unexpected success"),
90 Err(e) => match e {
91 Error::Api { message, .. } => {
92 println!("API Error: {}", message);
93 }
94 Error::RateLimit(message) => {
95 println!("Rate limited: {}", message);
96 }
97 Error::Authentication(message) => {
98 println!("Authentication failed: {}", message);
99 }
100 Error::Http(source) => {
101 println!("Network error: {}", source);
102 }
103 Error::Json(source) => {
104 println!("Serialization error: {}", source);
105 }
106 Error::Stream(message) => {
107 println!("Stream error: {}", message);
108 }
109 Error::InvalidRequest(message) => {
110 println!("Invalid request: {}", message);
111 }
112 Error::Config(message) => {
113 println!("Configuration error: {}", message);
114 }
115 _ => {
116 println!("Other error: {}", e);
117 }
118 },
119 }
120}
121
122async fn rate_limit_handling() {
123 const MAX_RETRIES: u32 = 3;
124
125 let Ok(client_builder) = Client::from_env() else {
126 return;
127 };
128 let client = client_builder.build();
129
130 // Retry logic for rate limiting
131 let mut retries = 0;
132
133 loop {
134 match client.send_chat(client.chat_simple("Hello")).await {
135 Ok(response) => {
136 if let Some(content) = response.content() {
137 println!("Success: {}", content);
138 } else {
139 println!("Success: (no content)");
140 }
141 break;
142 }
143 Err(Error::RateLimit(_message)) => {
144 if retries >= MAX_RETRIES {
145 println!("Max retries exceeded");
146 break;
147 }
148
149 let wait_time = Duration::from_secs(1);
150 println!("Rate limited. Waiting {:?} before retry...", wait_time);
151 sleep(wait_time).await;
152 retries += 1;
153 }
154 Err(e) => {
155 println!("Other error: {}", e);
156 break;
157 }
158 }
159 }
160}
161
162async fn token_limit_handling() {
163 let Ok(client_builder) = Client::from_env() else {
164 return;
165 };
166 let client = client_builder.build();
167
168 // Generate a very long prompt that might exceed token limits
169 let long_text = "Lorem ipsum ".repeat(10000);
170
171 match client.send_chat(client.chat_simple(&long_text)).await {
172 Ok(_) => println!("Processed long text successfully"),
173 Err(Error::InvalidRequest(message)) if message.contains("token") => {
174 println!("Token limit issue: {}", message);
175
176 // Retry with truncated text
177 let truncated = &long_text[..1000];
178 println!("Retrying with truncated text...");
179
180 match client.send_chat(client.chat_simple(truncated)).await {
181 Ok(response) => {
182 if let Some(content) = response.content() {
183 println!("Success with truncated: {}", content);
184 } else {
185 println!("Success with truncated: (no content)");
186 }
187 }
188 Err(e) => println!("Still failed: {}", e),
189 }
190 }
191 Err(e) => println!("Other error: {}", e),
192 }
193}
194
195async fn auth_error_handling() -> Result<()> {
196 // Try with invalid API key
197 let config = Config::builder().api_key("invalid-api-key").build();
198 let invalid_client = Client::builder(config)?.build();
199
200 match invalid_client
201 .send_chat(invalid_client.chat_simple("Hello"))
202 .await
203 {
204 Ok(_) => println!("Unexpected success"),
205 Err(Error::Authentication(message)) => {
206 println!("Authentication failed as expected: {}", message);
207
208 // Suggest remediation
209 println!("Suggestions:");
210 println!("1. Check your OPENAI_API_KEY environment variable");
211 println!("2. Verify API key at https://platform.openai.com/api-keys");
212 println!("3. Ensure your API key has necessary permissions");
213 }
214 Err(e) => println!("Unexpected error type: {}", e),
215 }
216
217 Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221 use openai_ergonomic::Config;
222 use reqwest_middleware::ClientBuilder;
223
224 // Create a reqwest client with very short timeout to simulate network issues
225 let reqwest_client = reqwest::Client::builder()
226 .timeout(Duration::from_secs(1))
227 .build()
228 .expect("Failed to build reqwest client");
229
230 let http_client = ClientBuilder::new(reqwest_client).build();
231
232 let config = Config::builder()
233 .api_key("test-key")
234 .http_client(http_client)
235 .build();
236
237 let client = Client::builder(config)?.build();
238
239 match client.send_chat(client.chat_simple("Hello")).await {
240 Ok(_) => println!("Unexpected success"),
241 Err(Error::Http(source)) => {
242 println!("Network error as expected: {}", source);
243
244 // Implement exponential backoff
245 let mut backoff = Duration::from_millis(100);
246 for attempt in 1..=3 {
247 println!("Retry attempt {} after {:?}", attempt, backoff);
248 sleep(backoff).await;
249 backoff *= 2;
250
251 // In real scenario, retry with proper timeout
252 // match client.send_chat(client.chat_simple("Hello")).await { ... }
253 }
254 }
255 Err(e) => println!("Other error: {}", e),
256 }
257
258 Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262 let client = Client::from_env()?.build();
263
264 // Wrap errors with custom context
265 let result = client
266 .send_chat(client.chat_simple("Analyze this data"))
267 .await
268 .map_err(|e| {
269 eprintln!("Context: Failed during data analysis task");
270 eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271 eprintln!("Original error: {}", e);
272 e
273 })?;
274
275 if let Some(content) = result.content() {
276 println!("Result: {}", content);
277 } else {
278 println!("Result: (no content)");
279 }
280 Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284 let client = Client::from_env()?.build();
285
286 // Strategy 1: Fallback to simpler model
287 let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288 println!("Fallback strategy result: {}", result);
289
290 // Strategy 2: Circuit breaker pattern
291 let circuit_breaker = CircuitBreaker::new();
292 if circuit_breaker.is_open() {
293 println!("Circuit breaker is open, skipping API calls");
294 return Ok(());
295 }
296
297 match client.send_chat(client.chat_simple("Test")).await {
298 Ok(response) => {
299 circuit_breaker.record_success();
300 if let Some(content) = response.content() {
301 println!("Circuit breaker success: {}", content);
302 } else {
303 println!("Circuit breaker success: (no content)");
304 }
305 }
306 Err(e) => {
307 circuit_breaker.record_failure();
308 println!("Circuit breaker failure: {}", e);
309 }
310 }
311
312 // Strategy 3: Request hedging (parallel requests with first success wins)
313 let hedge_result = hedged_request(&client).await?;
314 println!("Hedged request result: {}", hedge_result);
315
316 Ok(())
317}
318
319async fn try_with_fallback(client: &Client, primary: &str, _fallback: &str) -> Result<String> {
320 // Try primary model first
321 let builder = client.chat().user("Hello");
322 match client.send_chat(builder).await {
323 Ok(response) => Ok(response.content().unwrap_or("").to_string()),
324 Err(e) => {
325 println!("Primary model failed ({}): {}, trying fallback", primary, e);
326
327 // Try fallback model
328 let fallback_builder = client.chat().user("Hello");
329 client
330 .send_chat(fallback_builder)
331 .await
332 .map(|r| r.content().unwrap_or("").to_string())
333 }
334 }
335}
336
337async fn hedged_request(client: &Client) -> Result<String> {
338 use futures::future::select;
339 use std::pin::pin;
340
341 // Launch two requests in parallel
342 let request1 = async {
343 client
344 .send_chat(client.chat_simple("Hello from request 1"))
345 .await
346 };
347 let request2 = async {
348 client
349 .send_chat(client.chat_simple("Hello from request 2"))
350 .await
351 };
352
353 let fut1 = pin!(request1);
354 let fut2 = pin!(request2);
355
356 // Return first successful response
357 match select(fut1, fut2).await {
358 futures::future::Either::Left((result, _)) => {
359 println!("Request 1 completed first");
360 result.map(|r| r.content().unwrap_or("").to_string())
361 }
362 futures::future::Either::Right((result, _)) => {
363 println!("Request 2 completed first");
364 result.map(|r| r.content().unwrap_or("").to_string())
365 }
366 }
367}More examples
examples/retry_patterns.rs (line 71)
63async fn simple_retry(client: &Client) -> Result<()> {
64 const MAX_RETRIES: u32 = 3;
65
66 for attempt in 1..=MAX_RETRIES {
67 println!("Attempt {}/{}", attempt, MAX_RETRIES);
68
69 match client.send_chat(client.chat_simple("Hello")).await {
70 Ok(response) => {
71 if let Some(content) = response.content() {
72 println!("Success: {}", content);
73 } else {
74 println!("Success: (no content)");
75 }
76 return Ok(());
77 }
78 Err(e) if attempt < MAX_RETRIES => {
79 println!("Failed (attempt {}): {}. Retrying...", attempt, e);
80 sleep(Duration::from_secs(1)).await;
81 }
82 Err(e) => {
83 println!("All retries exhausted");
84 return Err(e);
85 }
86 }
87 }
88
89 Ok(())
90}
91
92async fn exponential_backoff(client: &Client) -> Result<()> {
93 const MAX_RETRIES: u32 = 5;
94 const BASE_DELAY: Duration = Duration::from_millis(100);
95 const MAX_DELAY: Duration = Duration::from_secs(32);
96
97 let mut delay = BASE_DELAY;
98
99 for attempt in 1..=MAX_RETRIES {
100 match client
101 .send_chat(client.chat_simple("Hello with backoff"))
102 .await
103 {
104 Ok(response) => {
105 if let Some(content) = response.content() {
106 println!("Success after {} attempts: {}", attempt, content);
107 } else {
108 println!("Success after {} attempts: (no content)", attempt);
109 }
110 return Ok(());
111 }
112 Err(Error::RateLimit(_message)) => {
113 // Use default delay for rate limiting
114 let wait_time = delay;
115 println!(
116 "Rate limited (attempt {}). Waiting {:?}...",
117 attempt, wait_time
118 );
119 sleep(wait_time).await;
120
121 // Double the delay for next attempt
122 delay = (delay * 2).min(MAX_DELAY);
123 }
124 Err(e) if attempt < MAX_RETRIES => {
125 println!("Error (attempt {}): {}. Waiting {:?}...", attempt, e, delay);
126 sleep(delay).await;
127
128 // Exponential increase with cap
129 delay = (delay * 2).min(MAX_DELAY);
130 }
131 Err(e) => return Err(e),
132 }
133 }
134
135 Ok(())
136}
137
138async fn retry_with_jitter(client: &Client) -> Result<()> {
139 const MAX_RETRIES: u32 = 5;
140 const BASE_DELAY_MS: u64 = 100;
141
142 for attempt in 1..=MAX_RETRIES {
143 match client
144 .send_chat(client.chat_simple("Hello with jitter"))
145 .await
146 {
147 Ok(response) => {
148 if let Some(content) = response.content() {
149 println!("Success: {}", content);
150 } else {
151 println!("Success: (no content)");
152 }
153 return Ok(());
154 }
155 Err(e) if attempt < MAX_RETRIES => {
156 // Calculate delay with jitter using random() instead of thread_rng for Send compatibility
157 let base = BASE_DELAY_MS * 2_u64.pow(attempt - 1);
158 let jitter = rand::random::<u64>() % (base / 2 + 1);
159 let delay = Duration::from_millis(base + jitter);
160
161 println!(
162 "Attempt {} failed: {}. Retrying in {:?} (with jitter)...",
163 attempt, e, delay
164 );
165 sleep(delay).await;
166 }
167 Err(e) => return Err(e),
168 }
169 }
170
171 Ok(())
172}
173
174async fn circuit_breaker_example(client: &Client) -> Result<()> {
175 let circuit_breaker = Arc::new(CircuitBreaker::new(3, Duration::from_secs(5)));
176
177 for i in 1..=10 {
178 println!("Request {}: ", i);
179
180 // Check circuit state
181 match circuit_breaker
182 .call(|| async {
183 client
184 .send_chat(client.chat_simple("Circuit breaker test"))
185 .await
186 })
187 .await
188 {
189 Ok(response) => {
190 if let Some(content) = response.content() {
191 println!(" Success: {}", content);
192 } else {
193 println!(" Success: (no content)");
194 }
195 }
196 Err(CircuitBreakerError::Open) => {
197 println!(" Circuit is OPEN - skipping request");
198 sleep(Duration::from_secs(1)).await;
199 }
200 Err(CircuitBreakerError::RequestFailed(e)) => {
201 println!(" Request failed: {}", e);
202 }
203 }
204
205 // Small delay between requests
206 sleep(Duration::from_millis(500)).await;
207 }
208
209 Ok(())
210}
211
212async fn timeout_management(client: &Client) {
213 // Example 1: Per-request timeout
214 println!("Per-request timeout:");
215 match timeout(
216 Duration::from_secs(5),
217 client.send_chat(client.chat_simple("Hello")),
218 )
219 .await
220 {
221 Ok(Ok(response)) => {
222 if let Some(content) = response.content() {
223 println!("Response received: {}", content);
224 } else {
225 println!("Response received: (no content)");
226 }
227 }
228 Ok(Err(e)) => println!("API error: {}", e),
229 Err(_) => println!("Request timed out after 5 seconds"),
230 }
231
232 // Example 2: Deadline-based timeout
233 println!("\nDeadline-based timeout:");
234 let deadline = Instant::now() + Duration::from_secs(10);
235
236 while Instant::now() < deadline {
237 let remaining = deadline - Instant::now();
238 println!("Time remaining: {:?}", remaining);
239
240 match timeout(
241 remaining,
242 client.send_chat(client.chat_simple("Quick response")),
243 )
244 .await
245 {
246 Ok(Ok(response)) => {
247 if let Some(content) = response.content() {
248 println!("Got response: {}", content);
249 } else {
250 println!("Got response: (no content)");
251 }
252 break;
253 }
254 Ok(Err(e)) => {
255 println!("Error: {}. Retrying...", e);
256 sleep(Duration::from_secs(1)).await;
257 }
258 Err(_) => {
259 println!("Deadline exceeded");
260 break;
261 }
262 }
263 }
264
265 // Example 3: Adaptive timeout
266 println!("\nAdaptive timeout:");
267 let mut adaptive_timeout = Duration::from_secs(2);
268
269 for _attempt in 1..=3 {
270 let start = Instant::now();
271
272 match timeout(
273 adaptive_timeout,
274 client.send_chat(client.chat_simple("Adaptive")),
275 )
276 .await
277 {
278 Ok(Ok(response)) => {
279 let elapsed = start.elapsed();
280 println!(
281 "Success in {:?}. Next timeout would be {:?}.",
282 elapsed,
283 elapsed * 2
284 );
285 // Adjust timeout based on actual response time for potential future requests
286 // adaptive_timeout = elapsed * 2; // Not used since we break out of the loop
287 if let Some(content) = response.content() {
288 println!("Response: {}", content);
289 } else {
290 println!("Response: (no content)");
291 }
292 break;
293 }
294 Ok(Err(e)) => println!("Error: {}", e),
295 Err(_) => {
296 println!(
297 "Timeout after {:?}. Increasing for next attempt.",
298 adaptive_timeout
299 );
300 adaptive_timeout *= 2;
301 }
302 }
303 }
304}
305
306async fn request_hedging(client: &Client) -> Result<()> {
307 use futures::future::{select, Either};
308 use std::pin::pin;
309
310 println!("Launching hedged requests...");
311
312 // Launch multiple requests with staggered starts
313 let request1 = async {
314 println!("Request 1 started");
315 client
316 .send_chat(client.chat_simple("Hedged request 1"))
317 .await
318 };
319
320 let request2 = async {
321 sleep(Duration::from_millis(200)).await;
322 println!("Request 2 started (200ms delay)");
323 client
324 .send_chat(client.chat_simple("Hedged request 2"))
325 .await
326 };
327
328 let fut1 = pin!(request1);
329 let fut2 = pin!(request2);
330
331 // Return first successful response
332 match select(fut1, fut2).await {
333 Either::Left((result, _)) => {
334 println!("Request 1 won the race");
335 result.map(|r| {
336 if let Some(content) = r.content() {
337 println!("Result: {}", content);
338 } else {
339 println!("Result: (no content)");
340 }
341 })
342 }
343 Either::Right((result, _)) => {
344 println!("Request 2 won the race");
345 result.map(|r| {
346 if let Some(content) = r.content() {
347 println!("Result: {}", content);
348 } else {
349 println!("Result: (no content)");
350 }
351 })
352 }
353 }
354}
355
356async fn fallback_chain(client: &Client) -> Result<()> {
357 // Define fallback chain
358 let strategies = vec![
359 ("GPT-4o", "gpt-4o", 1024),
360 ("GPT-4o-mini", "gpt-4o-mini", 512),
361 ("GPT-3.5", "gpt-3.5-turbo", 256),
362 ];
363
364 let prompt = "Explain quantum computing";
365
366 for (name, _model, max_tokens) in strategies {
367 println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369 let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370 match client.send_chat(builder).await {
371 Ok(response) => {
372 println!("Success with {}", name);
373 if let Some(content) = response.content() {
374 println!("Response: {}...", &content[..content.len().min(100)]);
375 }
376 return Ok(());
377 }
378 Err(e) => {
379 println!("Failed with {}: {}", name, e);
380 }
381 }
382 }
383
384 println!("All fallback strategies exhausted");
385 Ok(())
386}
387
388async fn idempotency_example(_client: &Client) -> Result<()> {
389 // Generate idempotency key
390 let idempotency_key = generate_idempotency_key();
391 println!("Using idempotency key: {}", idempotency_key);
392
393 // Simulate retrying the same request
394 for attempt in 1..=3 {
395 println!("\nAttempt {} with same idempotency key", attempt);
396
397 // In a real implementation, you'd pass the idempotency key in headers
398 let mut headers = std::collections::HashMap::new();
399 headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400 println!(" Would send {} headers", headers.len());
401
402 let config = Config::builder()
403 .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404 .build();
405
406 // Note: Headers (including idempotency key) are not yet supported in current API
407
408 let client_with_idempotency = Client::builder(config)?.build();
409
410 match client_with_idempotency
411 .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412 .await
413 {
414 Ok(response) => {
415 if let Some(content) = response.content() {
416 println!("Response: {}", content);
417 } else {
418 println!("Response: (no content)");
419 }
420 // Server should return same response for same idempotency key
421 }
422 Err(e) => println!("Error: {}", e),
423 }
424
425 if attempt < 3 {
426 sleep(Duration::from_secs(1)).await;
427 }
428 }
429
430 Ok(())
431}examples/tool_calling.rs (line 204)
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}examples/auth_patterns.rs (line 82)
61async fn env_var_auth() -> Result<()> {
62 // Standard environment variables:
63 // - OPENAI_API_KEY: Your API key
64 // - OPENAI_ORG_ID: Optional organization ID
65 // - OPENAI_PROJECT_ID: Optional project ID
66 // - OPENAI_BASE_URL: Optional custom base URL
67
68 // Check if environment variables are set
69 if env::var("OPENAI_API_KEY").is_err() {
70 println!("Warning: OPENAI_API_KEY not set");
71 println!("Set it with: export OPENAI_API_KEY=your-key-here");
72 return Ok(());
73 }
74
75 // Create client from environment
76 let client = Client::from_env()?.build();
77 println!("Client created from environment variables");
78
79 // Test the client
80 match client.send_chat(client.chat_simple("Hello")).await {
81 Ok(response) => {
82 if let Some(content) = response.content() {
83 println!("Response: {}", content);
84 } else {
85 println!("Response: (no content)");
86 }
87 }
88 Err(e) => println!("Error: {}", e),
89 }
90
91 Ok(())
92}
93
94async fn direct_api_key() -> Result<()> {
95 // Create client with direct API key
96 let api_key = "sk-your-api-key-here"; // Replace with actual key
97 let config = Config::builder().api_key(api_key).build();
98 let client = Client::builder(config)?.build();
99
100 println!("Client created with direct API key");
101
102 // Note: This will fail with invalid key
103 match client.send_chat(client.chat_simple("Hello")).await {
104 Ok(response) => {
105 if let Some(content) = response.content() {
106 println!("Response: {}", content);
107 } else {
108 println!("Response: (no content)");
109 }
110 }
111 Err(e) => println!("Expected error with demo key: {}", e),
112 }
113
114 Ok(())
115}examples/models.rs (line 194)
163async fn model_selection_by_task(client: &Client) -> Result<()> {
164 // Task-specific model recommendations
165 let task_models = vec![
166 ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
167 ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
168 ("Code generation", "gpt-4o", "Excellent code understanding"),
169 ("Vision tasks", "gpt-4o", "Native vision support"),
170 (
171 "Quick responses",
172 "gpt-4o-mini",
173 "Low latency, good quality",
174 ),
175 (
176 "Bulk processing",
177 "gpt-3.5-turbo",
178 "Best cost/performance ratio",
179 ),
180 ];
181
182 for (task, model, reason) in task_models {
183 println!("Task: {}", task);
184 println!(" Recommended: {}", model);
185 println!(" Reason: {}", reason);
186
187 // Demo the model
188 let builder = client
189 .chat()
190 .user(format!("Say 'Hello from {}'", model))
191 .max_completion_tokens(10);
192 let response = client.send_chat(builder).await?;
193
194 if let Some(content) = response.content() {
195 println!(" Response: {}\n", content);
196 }
197 }
198
199 Ok(())
200}
201
202async fn cost_optimization(client: &Client) -> Result<()> {
203 let models = get_model_registry();
204 let test_prompt = "Explain the theory of relativity in one sentence";
205 let estimated_input_tokens = 15;
206 let estimated_output_tokens = 50;
207
208 println!("Cost comparison for same task:");
209 println!("Prompt: '{}'\n", test_prompt);
210
211 let mut costs = Vec::new();
212
213 for (name, info) in &models {
214 if !info.deprecated {
215 let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
216 let output_cost =
217 (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
218 let total_cost = input_cost + output_cost;
219
220 costs.push((name.clone(), total_cost));
221 }
222 }
223
224 costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
225
226 println!("{:<20} {:>15}", "Model", "Estimated Cost");
227 println!("{:-<35}", "");
228 for (model, cost) in costs {
229 println!("{:<20} ${:>14.6}", model, cost);
230 }
231
232 // Demonstrate cheapest vs best
233 println!("\nRunning with cheapest model (gpt-3.5-turbo):");
234 let builder = client.chat().user(test_prompt);
235 let cheap_response = client.send_chat(builder).await?;
236
237 if let Some(content) = cheap_response.content() {
238 println!("Response: {}", content);
239 }
240
241 Ok(())
242}
243
244async fn performance_testing(client: &Client) -> Result<()> {
245 use std::time::Instant;
246
247 let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
248 let test_prompt = "Write a haiku about programming";
249
250 println!("Performance comparison:");
251 println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
252 println!("{:-<45}", "");
253
254 for model in models_to_test {
255 let start = Instant::now();
256
257 let builder = client.chat().user(test_prompt);
258 let response = client.send_chat(builder).await?;
259
260 let elapsed = start.elapsed();
261
262 if let Some(usage) = response.usage() {
263 let total_tokens = f64::from(usage.total_tokens);
264 let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
265
266 println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
267 }
268 }
269
270 Ok(())
271}
272
273async fn model_migration(client: &Client) -> Result<()> {
274 // Handle deprecated model migration
275 let deprecated_mappings = HashMap::from([
276 ("text-davinci-003", "gpt-3.5-turbo"),
277 ("gpt-4-32k", "gpt-4o"),
278 ("gpt-4-vision-preview", "gpt-4o"),
279 ]);
280
281 let requested_model = "text-davinci-003"; // Deprecated model
282
283 if let Some(replacement) = deprecated_mappings.get(requested_model) {
284 println!(
285 "Warning: {} is deprecated. Using {} instead.",
286 requested_model, replacement
287 );
288
289 let builder = client.chat().user("Hello from migrated model");
290 let response = client.send_chat(builder).await?;
291
292 if let Some(content) = response.content() {
293 println!("Response from {}: {}", replacement, content);
294 }
295 }
296
297 Ok(())
298}
299
300async fn dynamic_model_selection(client: &Client) -> Result<()> {
301 // Select model based on runtime conditions
302
303 #[derive(Debug)]
304 struct RequestContext {
305 urgency: Urgency,
306 complexity: Complexity,
307 budget: Budget,
308 needs_vision: bool,
309 }
310
311 #[derive(Debug)]
312 enum Urgency {
313 Low,
314 Medium,
315 High,
316 }
317
318 #[derive(Debug)]
319 enum Complexity {
320 Simple,
321 Moderate,
322 Complex,
323 }
324
325 #[derive(Debug)]
326 enum Budget {
327 Tight,
328 Normal,
329 Flexible,
330 }
331
332 const fn select_model(ctx: &RequestContext) -> &'static str {
333 match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
334 // High urgency + simple = fast cheap model, or tight budget = cheapest
335 (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
336
337 // Complex + flexible budget = best model
338 (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
339
340 // Vision required
341 _ if ctx.needs_vision => "gpt-4o",
342
343 // Default balanced choice
344 _ => "gpt-4o-mini",
345 }
346 }
347
348 // Example contexts
349 let contexts = [
350 RequestContext {
351 urgency: Urgency::High,
352 complexity: Complexity::Simple,
353 budget: Budget::Tight,
354 needs_vision: false,
355 },
356 RequestContext {
357 urgency: Urgency::Low,
358 complexity: Complexity::Complex,
359 budget: Budget::Flexible,
360 needs_vision: false,
361 },
362 RequestContext {
363 urgency: Urgency::Medium,
364 complexity: Complexity::Moderate,
365 budget: Budget::Normal,
366 needs_vision: true,
367 },
368 ];
369
370 for (i, ctx) in contexts.iter().enumerate() {
371 let model = select_model(ctx);
372 println!("Context {}: {:?}", i + 1, ctx);
373 println!(" Selected model: {}", model);
374
375 let builder = client
376 .chat()
377 .user(format!("Hello from dynamically selected {}", model))
378 .max_completion_tokens(20);
379 let response = client.send_chat(builder).await?;
380
381 if let Some(content) = response.content() {
382 println!(" Response: {}\n", content);
383 }
384 }
385
386 Ok(())
387}examples/responses_comprehensive.rs (line 132)
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!("🤖 Assistant: {content}");
134 } else {
135 println!("⚠️ No content in response");
136 }
137
138 // Show response metadata
139 println!("📊 Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!("🔧 Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!("🤖 Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!("🌐 Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!("🤖 Assistant response: {content}");
285 }
286
287 println!("💡 Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!("📊 Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n📝 Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!("📊 Analysis result: {content}");
381 }
382
383 Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!("🎨 Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n🧠 Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n📋 Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn choices(&self) -> &[CreateChatCompletionResponseChoicesInner]
pub fn choices(&self) -> &[CreateChatCompletionResponseChoicesInner]
Get all choices from the response.
Sourcepub fn tool_calls(&self) -> Vec<&ChatCompletionMessageToolCallsInner>
pub fn tool_calls(&self) -> Vec<&ChatCompletionMessageToolCallsInner>
Get tool calls from the first choice, if any.
Examples found in repository?
examples/tool_calling.rs (line 136)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129 let builder = client
130 .chat()
131 .user("What's the weather like in San Francisco?")
132 .tools(vec![get_weather_tool()]);
133 let response = client.send_chat(builder).await?;
134
135 // Check for tool calls
136 let tool_calls = response.tool_calls();
137 if !tool_calls.is_empty() {
138 for tool_call in tool_calls {
139 println!("Tool called: {}", tool_call.function_name());
140 println!("Arguments: {}", tool_call.function_arguments());
141
142 // Execute the function
143 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144 let result = execute_weather_function(params)?;
145 println!("Function result: {}", result);
146 }
147 }
148
149 Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153 let builder = client
154 .chat()
155 .user("What's the weather in NYC and what time is it there?")
156 .tools(vec![get_weather_tool(), get_time_tool()]);
157 let response = client.send_chat(builder).await?;
158
159 for tool_call in response.tool_calls() {
160 match tool_call.function_name() {
161 "get_weather" => {
162 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163 let result = execute_weather_function(params)?;
164 println!("Weather result: {}", result);
165 }
166 "get_current_time" => {
167 let params: serde_json::Value =
168 serde_json::from_str(tool_call.function_arguments())?;
169 if let Some(timezone) = params["timezone"].as_str() {
170 let result = execute_time_function(timezone);
171 println!("Time result: {}", result);
172 }
173 }
174 _ => println!("Unknown tool: {}", tool_call.function_name()),
175 }
176 }
177
178 Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212 // This is a simplified version that demonstrates the concept
213 // without getting into the complexities of message history management
214
215 println!("=== Conversation with Tools (Simplified) ===");
216
217 // First request with tool call
218 let builder = client
219 .chat()
220 .user("What's the weather in Tokyo?")
221 .tools(vec![get_weather_tool()]);
222 let response = client.send_chat(builder).await?;
223
224 // Check for tool calls and simulate responses
225 for tool_call in response.tool_calls() {
226 println!("Tool called: {}", tool_call.function_name());
227 println!("Arguments: {}", tool_call.function_arguments());
228
229 // In a real implementation, you would:
230 // 1. Parse the arguments
231 // 2. Execute the actual function
232 // 3. Create tool messages with results
233 // 4. Send another request with the tool results
234
235 println!("Simulated weather result: Sunny, 24°C");
236 }
237
238 println!("Note: Full conversation with tool results requires complex message handling");
239 println!("This simplified version demonstrates tool calling detection");
240
241 Ok(())
242}
243
244fn streaming_with_tools(_client: &Client) {
245 println!("Streaming response with tools:");
246
247 // Note: Streaming with tool calls is more complex and requires
248 // proper handling of partial tool call chunks. For now, this is
249 // a placeholder showing the concept.
250
251 println!("This would demonstrate streaming tool calls if streaming API was available");
252 println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
253}
254
255async fn parallel_tool_calls(client: &Client) -> Result<()> {
256 let builder = client
257 .chat()
258 .user("Check the weather in Tokyo, London, and New York")
259 .tools(vec![get_weather_tool()]);
260 let response = client.send_chat(builder).await?;
261
262 // Modern models can call multiple tools in parallel
263 let tool_calls = response.tool_calls();
264 println!("Parallel tool calls: {}", tool_calls.len());
265
266 // Collect arguments first to avoid lifetime issues
267 let args_vec: Vec<String> = tool_calls
268 .iter()
269 .map(|tc| tc.function_arguments().to_string())
270 .collect();
271
272 // Execute all in parallel using tokio
273 let mut handles = Vec::new();
274 for args in args_vec {
275 let handle = tokio::spawn(async move {
276 let params: WeatherParams = serde_json::from_str(&args)?;
277 execute_weather_function(params)
278 });
279 handles.push(handle);
280 }
281
282 // Wait for all results
283 for (i, handle) in handles.into_iter().enumerate() {
284 match handle.await {
285 Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
286 Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
287 Err(e) => println!("Task {} panicked: {}", i + 1, e),
288 }
289 }
290
291 Ok(())
292}More examples
examples/tool_calling_simple.rs (line 55)
42async fn main() -> Result<()> {
43 println!("=== Tool Calling Example ===");
44
45 let client = Client::from_env()?.build();
46
47 // Simple tool call
48 let builder = client
49 .chat()
50 .user("What's the weather like in San Francisco?")
51 .tools(vec![get_weather_tool()]);
52 let response = client.send_chat(builder).await?;
53
54 // Check for tool calls
55 let tool_calls = response.tool_calls();
56 if !tool_calls.is_empty() {
57 for tool_call in tool_calls {
58 println!("Tool called: {}", tool_call.function_name());
59 println!("Arguments: {}", tool_call.function_arguments());
60
61 // Execute the function
62 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
63 let result = execute_weather_function(¶ms);
64 println!("Function result: {}", result);
65 }
66 } else if let Some(content) = response.content() {
67 println!("Response: {}", content);
68 }
69
70 // Forced tool choice
71 println!("\n=== Forced Tool Choice ===");
72 let builder = client
73 .chat()
74 .user("Tell me about Paris")
75 .tools(vec![get_weather_tool()])
76 .tool_choice(ToolChoiceHelper::specific("get_weather"));
77 let response = client.send_chat(builder).await?;
78
79 for tool_call in response.tool_calls() {
80 println!("Forced tool: {}", tool_call.function_name());
81 }
82
83 // No tools
84 println!("\n=== No Tools Mode ===");
85 let builder = client
86 .chat()
87 .user("What's the weather?")
88 .tools(vec![get_weather_tool()])
89 .tool_choice(ToolChoiceHelper::none());
90 let response = client.send_chat(builder).await?;
91
92 if let Some(content) = response.content() {
93 println!("Response without tools: {}", content);
94 }
95
96 Ok(())
97}examples/responses_comprehensive.rs (line 214)
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!("🔧 Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!("🤖 Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!("🌐 Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!("🤖 Assistant response: {content}");
285 }
286
287 println!("💡 Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}examples/quickstart.rs (line 200)
37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!("🚀 OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!("📋 Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!("✅ Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!("❌ Failed to create client: {e}");
58 eprintln!("💡 Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n📋 Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!("✅ Got response!");
77 if let Some(content) = chat_response.content() {
78 println!("🤖 AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 "📊 Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!("❌ Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n📋 Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!("✅ Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!("👨🏫 Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!("❌ Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n📋 Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!("🔄 AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n❌ Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n📋 Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!("✅ Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!("🔧 AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" 💡 In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!("🤖 AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!("❌ Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n📋 Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!("✅ Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!("🤖 AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!("❌ API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!("💡 This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!("❌ Rate limited - you're sending requests too fast");
248 println!("💡 In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!("❌ HTTP/Network error");
252 println!("💡 Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!("❌ Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n📋 Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n👤 User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!("🤖 Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 "📊 This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!("❌ Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n🎉 Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!("✅ Created an OpenAI client");
320 println!("✅ Made basic chat completions");
321 println!("✅ Used streaming responses");
322 println!("✅ Implemented tool/function calling");
323 println!("✅ Handled errors gracefully");
324 println!("✅ Built a complete conversational AI");
325 println!("\n📊 Total tokens used in examples: {total_tokens}");
326 println!(
327 "💰 Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n🚀 Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn is_refusal(&self) -> bool
pub fn is_refusal(&self) -> bool
Check if the response was refused.
Sourcepub fn finish_reason(&self) -> Option<String>
pub fn finish_reason(&self) -> Option<String>
Get the finish reason for the first choice.
Examples found in repository?
examples/responses_comprehensive.rs (line 144)
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!("🤖 Assistant: {content}");
134 } else {
135 println!("⚠️ No content in response");
136 }
137
138 // Show response metadata
139 println!("📊 Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}Sourcepub fn inner(&self) -> &CreateChatCompletionResponse
pub fn inner(&self) -> &CreateChatCompletionResponse
Get the inner response object.
Examples found in repository?
examples/quickstart.rs (line 82)
37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!("🚀 OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!("📋 Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!("✅ Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!("❌ Failed to create client: {e}");
58 eprintln!("💡 Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n📋 Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!("✅ Got response!");
77 if let Some(content) = chat_response.content() {
78 println!("🤖 AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 "📊 Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!("❌ Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n📋 Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!("✅ Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!("👨🏫 Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!("❌ Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n📋 Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!("🔄 AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n❌ Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n📋 Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!("✅ Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!("🔧 AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" 💡 In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!("🤖 AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!("❌ Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n📋 Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!("✅ Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!("🤖 AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!("❌ API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!("💡 This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!("❌ Rate limited - you're sending requests too fast");
248 println!("💡 In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!("❌ HTTP/Network error");
252 println!("💡 Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!("❌ Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n📋 Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n👤 User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!("🤖 Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 "📊 This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!("❌ Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n🎉 Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!("✅ Created an OpenAI client");
320 println!("✅ Made basic chat completions");
321 println!("✅ Used streaming responses");
322 println!("✅ Implemented tool/function calling");
323 println!("✅ Handled errors gracefully");
324 println!("✅ Built a complete conversational AI");
325 println!("\n📊 Total tokens used in examples: {total_tokens}");
326 println!(
327 "💰 Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n🚀 Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Trait Implementations§
Source§impl Clone for ChatCompletionResponseWrapper
impl Clone for ChatCompletionResponseWrapper
Source§fn clone(&self) -> ChatCompletionResponseWrapper
fn clone(&self) -> ChatCompletionResponseWrapper
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreAuto Trait Implementations§
impl Freeze for ChatCompletionResponseWrapper
impl RefUnwindSafe for ChatCompletionResponseWrapper
impl Send for ChatCompletionResponseWrapper
impl Sync for ChatCompletionResponseWrapper
impl Unpin for ChatCompletionResponseWrapper
impl UnwindSafe for ChatCompletionResponseWrapper
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more