Client

Struct Client 

Source
pub struct Client<T = ()> { /* private fields */ }
Expand description

Main client for interacting with the OpenAI API.

The client provides ergonomic methods for all OpenAI API endpoints, with built-in retry logic, rate limiting, error handling, and support for middleware through interceptors.

Use Client::from_env() or Client::new() to create a builder, then call .build() to create the client.

§Example

let client = Client::from_env()?.build();
// TODO: Add usage example once builders are implemented

Implementations§

Source§

impl Client

Source

pub fn builder(config: Config) -> Result<ClientBuilder>

Create a new client builder with the given configuration.

Examples found in repository?
examples/testing_patterns.rs (line 144)
138    fn client(&self) -> Result<Client> {
139        let config = Config::builder()
140            .api_key("test-api-key")
141            .api_base(&self.base_url())
142            .build();
143
144        Ok(Client::builder(config)?.build())
145    }
146
147    /// Configure error simulation
148    fn configure_errors(&self, config: ErrorSimulationConfig) {
149        *self.error_config.lock().unwrap() = config;
150    }
151
152    /// Mock a chat completion response
153    async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154        let mock_response = serde_json::json!({
155            "id": "chatcmpl-123",
156            "object": "chat.completion",
157            "created": 1677652288,
158            "model": "gpt-3.5-turbo",
159            "choices": [{
160                "index": 0,
161                "message": {
162                    "role": "assistant",
163                    "content": response_text
164                },
165                "finish_reason": "stop"
166            }],
167            "usage": {
168                "prompt_tokens": 50,
169                "completion_tokens": 20,
170                "total_tokens": 70
171            }
172        });
173
174        self.server
175            .mock("POST", "/v1/chat/completions")
176            .match_body(mockito::Matcher::JsonString(
177                serde_json::json!({
178                    "model": "gpt-3.5-turbo",
179                    "messages": [{"role": "user", "content": expected_prompt}]
180                })
181                .to_string(),
182            ))
183            .with_status(200)
184            .with_header("content-type", "application/json")
185            .with_body(mock_response.to_string())
186            .create_async()
187            .await;
188    }
189
190    /// Mock a streaming chat completion response
191    async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192        let mut sse_data = String::new();
193
194        for (i, chunk) in response_chunks.iter().enumerate() {
195            let chunk_response = serde_json::json!({
196                "id": "chatcmpl-123",
197                "object": "chat.completion.chunk",
198                "created": 1677652288,
199                "model": "gpt-3.5-turbo",
200                "choices": [{
201                    "index": 0,
202                    "delta": {
203                        "content": chunk
204                    },
205                    "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206                }]
207            });
208
209            sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210        }
211
212        sse_data.push_str("data: [DONE]\n\n");
213
214        self.server
215            .mock("POST", "/v1/chat/completions")
216            .match_header("accept", "text/event-stream")
217            .with_status(200)
218            .with_header("content-type", "text/event-stream")
219            .with_body(sse_data)
220            .create_async()
221            .await;
222    }
223
224    /// Mock an error response (rate limit, server error, etc.)
225    async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226        let (status, body) = match error_type {
227            ErrorType::RateLimit => (
228                429,
229                serde_json::json!({
230                    "error": {
231                        "type": "rate_limit_exceeded",
232                        "message": "Rate limit exceeded, please try again later"
233                    }
234                })
235                .to_string(),
236            ),
237            ErrorType::ServerError => (
238                500,
239                serde_json::json!({
240                    "error": {
241                        "type": "server_error",
242                        "message": "Internal server error"
243                    }
244                })
245                .to_string(),
246            ),
247            ErrorType::InvalidRequest => (
248                400,
249                serde_json::json!({
250                    "error": {
251                        "type": "invalid_request_error",
252                        "message": "Invalid request parameters"
253                    }
254                })
255                .to_string(),
256            ),
257            ErrorType::Unauthorized => (
258                401,
259                serde_json::json!({
260                    "error": {
261                        "type": "invalid_request_error",
262                        "message": "Incorrect API key provided"
263                    }
264                })
265                .to_string(),
266            ),
267        };
268
269        self.server
270            .mock("POST", endpoint)
271            .with_status(status)
272            .with_header("content-type", "application/json")
273            .with_body(body)
274            .create_async()
275            .await;
276    }
277
278    /// Get logged requests for verification
279    fn get_request_log(&self) -> Vec<MockRequest> {
280        self.request_log.lock().unwrap().clone()
281    }
282
283    /// Clear request log
284    fn clear_request_log(&self) {
285        self.request_log.lock().unwrap().clear();
286    }
287
288    /// Verify that a specific request was made
289    fn verify_request(&self, method: &str, path: &str) -> bool {
290        let log = self.request_log.lock().unwrap();
291        log.iter()
292            .any(|req| req.method == method && req.path == path)
293    }
294}
295
296/// Types of errors to simulate in testing
297#[derive(Debug, Clone)]
298enum ErrorType {
299    RateLimit,
300    ServerError,
301    InvalidRequest,
302    Unauthorized,
303}
304
305/// Test utilities for OpenAI API testing
306struct TestUtils;
307
308impl TestUtils {
309    /// Create a test client with mock configuration
310    fn create_test_client() -> Result<Client> {
311        let config = Config::builder()
312            .api_key("test-api-key")
313            .api_base("http://localhost:1234") // Mock server URL
314            .max_retries(2)
315            .build();
316
317        Ok(Client::builder(config)?.build())
318    }
319
320    /// Assert that a response contains expected content
321    fn assert_response_content(response: &str, expected_content: &str) {
322        assert!(
323            response.contains(expected_content),
324            "Response '{}' does not contain expected content '{}'",
325            response,
326            expected_content
327        );
328    }
329
330    /// Assert token usage is within expected bounds
331    fn assert_token_usage(usage: &TokenUsage, min_tokens: i32, max_tokens: i32) {
332        assert!(
333            usage.total_tokens >= min_tokens && usage.total_tokens <= max_tokens,
334            "Token usage {} is outside expected range {}-{}",
335            usage.total_tokens,
336            min_tokens,
337            max_tokens
338        );
339    }
340
341    /// Create test data for batch testing
342    fn create_test_prompts(count: usize) -> Vec<String> {
343        (0..count)
344            .map(|i| format!("Test prompt number {}", i + 1))
345            .collect()
346    }
347
348    /// Measure execution time of an async operation
349    async fn time_async_operation<F, T, E>(operation: F) -> (std::result::Result<T, E>, Duration)
350    where
351        F: std::future::Future<Output = std::result::Result<T, E>>,
352    {
353        let start = Instant::now();
354        let result = operation.await;
355        let duration = start.elapsed();
356        (result, duration)
357    }
358
359    /// Create a mock response with custom token usage
360    fn create_mock_response_with_usage(
361        content: &str,
362        prompt_tokens: i32,
363        completion_tokens: i32,
364    ) -> String {
365        serde_json::json!({
366            "id": "chatcmpl-test",
367            "object": "chat.completion",
368            "created": 1677652288,
369            "model": "gpt-3.5-turbo",
370            "choices": [{
371                "index": 0,
372                "message": {
373                    "role": "assistant",
374                    "content": content
375                },
376                "finish_reason": "stop"
377            }],
378            "usage": {
379                "prompt_tokens": prompt_tokens,
380                "completion_tokens": completion_tokens,
381                "total_tokens": prompt_tokens + completion_tokens
382            }
383        })
384        .to_string()
385    }
386}
387
388/// Token usage information for testing
389#[derive(Debug, Clone, Serialize, Deserialize)]
390struct TokenUsage {
391    prompt_tokens: i32,
392    completion_tokens: i32,
393    total_tokens: i32,
394}
395
396/// Integration test runner for live API testing
397struct IntegrationTestRunner {
398    client: Client,
399    test_results: Vec<IntegrationTestResult>,
400}
401
402/// Result of an integration test
403#[derive(Debug, Clone)]
404struct IntegrationTestResult {
405    test_name: String,
406    success: bool,
407    duration: Duration,
408    error_message: Option<String>,
409    response_data: Option<String>,
410}
411
412impl IntegrationTestRunner {
413    /// Create a new integration test runner
414    fn new(client: Client) -> Self {
415        Self {
416            client,
417            test_results: Vec::new(),
418        }
419    }
420
421    /// Run a basic chat completion test
422    async fn test_basic_chat_completion(&mut self) -> Result<()> {
423        let test_name = "basic_chat_completion";
424        info!("Running integration test: {}", test_name);
425
426        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
427            // Note: This would use real API in integration tests
428            // self.client.chat_simple("Hello, world!").await
429
430            // For demonstration, we'll simulate a successful response
431            Ok("Hello! How can I help you today?".to_string())
432        })
433        .await;
434
435        let test_result = match result {
436            Ok(response) => {
437                info!(" Basic chat completion test passed in {:?}", duration);
438                IntegrationTestResult {
439                    test_name: test_name.to_string(),
440                    success: true,
441                    duration,
442                    error_message: None,
443                    response_data: Some(response),
444                }
445            }
446            Err(e) => {
447                error!(" Basic chat completion test failed: {}", e);
448                IntegrationTestResult {
449                    test_name: test_name.to_string(),
450                    success: false,
451                    duration,
452                    error_message: Some(e.to_string()),
453                    response_data: None,
454                }
455            }
456        };
457
458        self.test_results.push(test_result);
459        Ok(())
460    }
461
462    /// Test streaming functionality
463    async fn test_streaming_completion(&mut self) -> Result<()> {
464        let test_name = "streaming_completion";
465        info!("Running integration test: {}", test_name);
466
467        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
468            // Note: This would use real streaming API in integration tests
469            // let mut stream = self.client.chat().user("Tell me a story").stream().await?;
470            // let mut chunks = Vec::new();
471            // while let Some(chunk) = stream.next().await {
472            //     chunks.push(chunk?.content());
473            // }
474            // Ok(chunks.join(""))
475
476            // For demonstration, simulate streaming chunks
477            let chunks = vec!["Once", " upon", " a", " time..."];
478            Ok(chunks.join(""))
479        })
480        .await;
481
482        let test_result = match result {
483            Ok(response) => {
484                info!(" Streaming completion test passed in {:?}", duration);
485                IntegrationTestResult {
486                    test_name: test_name.to_string(),
487                    success: true,
488                    duration,
489                    error_message: None,
490                    response_data: Some(response),
491                }
492            }
493            Err(e) => {
494                error!(" Streaming completion test failed: {}", e);
495                IntegrationTestResult {
496                    test_name: test_name.to_string(),
497                    success: false,
498                    duration,
499                    error_message: Some(e.to_string()),
500                    response_data: None,
501                }
502            }
503        };
504
505        self.test_results.push(test_result);
506        Ok(())
507    }
508
509    /// Test error handling
510    async fn test_error_handling(&mut self) -> Result<()> {
511        let test_name = "error_handling";
512        info!("Running integration test: {}", test_name);
513
514        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
515            // Test with invalid API key to trigger authentication error
516            let bad_config = Config::builder().api_key("invalid-key").build();
517
518            let _bad_client = Client::builder(bad_config)?.build();
519
520            // This should fail with an authentication error
521            // bad_client.chat_simple("Test").await
522
523            // For demonstration, simulate an auth error
524            Err(Error::InvalidRequest("Authentication failed".to_string()))
525        })
526        .await;
527
528        let test_result = match result {
529            Ok(_) => {
530                warn!("Error handling test unexpectedly succeeded");
531                IntegrationTestResult {
532                    test_name: test_name.to_string(),
533                    success: false,
534                    duration,
535                    error_message: Some(
536                        "Expected authentication error but request succeeded".to_string(),
537                    ),
538                    response_data: None,
539                }
540            }
541            Err(e) => {
542                info!(
543                    " Error handling test passed (correctly failed) in {:?}",
544                    duration
545                );
546                IntegrationTestResult {
547                    test_name: test_name.to_string(),
548                    success: true,
549                    duration,
550                    error_message: None,
551                    response_data: Some(format!("Expected error: {}", e)),
552                }
553            }
554        };
555
556        self.test_results.push(test_result);
557        Ok(())
558    }
More examples
Hide additional examples
examples/auth_patterns.rs (line 98)
94async fn direct_api_key() -> Result<()> {
95    // Create client with direct API key
96    let api_key = "sk-your-api-key-here"; // Replace with actual key
97    let config = Config::builder().api_key(api_key).build();
98    let client = Client::builder(config)?.build();
99
100    println!("Client created with direct API key");
101
102    // Note: This will fail with invalid key
103    match client.send_chat(client.chat_simple("Hello")).await {
104        Ok(response) => {
105            if let Some(content) = response.content() {
106                println!("Response: {}", content);
107            } else {
108                println!("Response: (no content)");
109            }
110        }
111        Err(e) => println!("Expected error with demo key: {}", e),
112    }
113
114    Ok(())
115}
116
117fn organization_config() -> Result<()> {
118    // Configure client with organization ID
119    let config = Config::builder()
120        .api_key("your-api-key")
121        .organization("org-123456789")
122        .build();
123
124    let _client = Client::builder(config)?.build();
125    println!("Client configured with organization ID");
126
127    // Organization ID is sent in headers with all requests
128    // Useful for:
129    // - Usage tracking per organization
130    // - Access control
131    // - Billing segregation
132
133    Ok(())
134}
135
136fn project_config() -> Result<()> {
137    // Configure client with project ID
138    let config = Config::builder()
139        .api_key("your-api-key")
140        .project("proj-abc123")
141        .build();
142
143    let _client = Client::builder(config)?.build();
144    println!("Client configured with project ID");
145
146    // Project ID helps with:
147    // - Fine-grained usage tracking
148    // - Project-specific rate limits
149    // - Cost allocation
150
151    Ok(())
152}
153
154fn custom_headers() -> Result<()> {
155    // Note: Custom headers are not yet supported in the current API
156    // This would typically be used for:
157    // - Request tracing
158    // - A/B testing
159    // - Custom routing
160
161    let config = Config::builder().api_key("your-api-key").build();
162
163    let _client = Client::builder(config)?.build();
164    println!("Client configured (custom headers not yet supported)");
165
166    // TODO: Add support for custom headers in the future
167    println!("Custom headers feature planned for future implementation");
168
169    Ok(())
170}
171
172fn proxy_config() -> Result<()> {
173    // Note: Proxy configuration is not yet supported in the current API
174    // This would typically be used for:
175    // - Enterprise security policies
176    // - Request monitoring
177    // - Network isolation
178
179    let config = Config::builder().api_key("your-api-key").build();
180
181    let _client = Client::builder(config)?.build();
182    println!("Client configured (proxy support not yet available)");
183
184    // TODO: Add proxy support in the future
185    println!("Proxy configuration feature planned for future implementation");
186
187    Ok(())
188}
189
190fn multiple_clients() -> Result<()> {
191    use reqwest_middleware::ClientBuilder;
192    use std::time::Duration;
193
194    // Create multiple clients for different use cases
195
196    // Production client with retries and longer timeout
197    let prod_http_client = ClientBuilder::new(
198        reqwest::Client::builder()
199            .timeout(Duration::from_secs(60))
200            .build()
201            .expect("Failed to build reqwest client"),
202    )
203    .build();
204
205    let prod_config = Config::builder()
206        .api_key("prod-api-key")
207        .organization("org-prod")
208        .http_client(prod_http_client)
209        .max_retries(5)
210        .build();
211    let prod_client = Client::builder(prod_config)?.build();
212
213    // Development client with debug logging and shorter timeout
214    let dev_http_client = ClientBuilder::new(
215        reqwest::Client::builder()
216            .timeout(Duration::from_secs(10))
217            .build()
218            .expect("Failed to build reqwest client"),
219    )
220    .build();
221
222    let dev_config = Config::builder()
223        .api_key("dev-api-key")
224        .organization("org-dev")
225        .api_base("https://api.openai-dev.com") // Custom endpoint
226        .http_client(dev_http_client)
227        .build();
228    let dev_client = Client::builder(dev_config)?.build();
229
230    // Test client with mocked responses
231    let test_config = Config::builder()
232        .api_key("test-api-key")
233        .api_base("http://localhost:8080") // Local mock server
234        .build();
235    let _test_client = Client::builder(test_config)?.build();
236
237    println!("Created multiple clients:");
238    println!("- Production client with retries");
239    println!("- Development client with custom endpoint");
240    println!("- Test client with mock server");
241
242    // Use appropriate client based on context
243    let _client = if cfg!(debug_assertions) {
244        &dev_client
245    } else {
246        &prod_client
247    };
248
249    println!(
250        "Using {} client",
251        if cfg!(debug_assertions) {
252            "dev"
253        } else {
254            "prod"
255        }
256    );
257
258    Ok(())
259}
260
261fn config_validation() -> Result<()> {
262    // Validate configuration before use
263
264    fn validate_api_key(key: &str) -> bool {
265        // OpenAI API keys typically start with "sk-"
266        key.starts_with("sk-") && key.len() > 20
267    }
268
269    fn validate_org_id(org: &str) -> bool {
270        // Organization IDs typically start with "org-"
271        org.starts_with("org-") && org.len() > 4
272    }
273
274    let api_key = "sk-test-key-123456789";
275    let org_id = "org-12345";
276
277    if !validate_api_key(api_key) {
278        println!("Warning: API key format appears invalid");
279    }
280
281    if !validate_org_id(org_id) {
282        println!("Warning: Organization ID format appears invalid");
283    }
284
285    // Build config only if validation passes
286    if validate_api_key(api_key) {
287        let config = Config::builder()
288            .api_key(api_key)
289            .organization(org_id)
290            .build();
291
292        let _client = Client::builder(config)?.build();
293        println!("Configuration validated and client created");
294    }
295
296    Ok(())
297}
examples/error_handling.rs (line 198)
195async fn auth_error_handling() -> Result<()> {
196    // Try with invalid API key
197    let config = Config::builder().api_key("invalid-api-key").build();
198    let invalid_client = Client::builder(config)?.build();
199
200    match invalid_client
201        .send_chat(invalid_client.chat_simple("Hello"))
202        .await
203    {
204        Ok(_) => println!("Unexpected success"),
205        Err(Error::Authentication(message)) => {
206            println!("Authentication failed as expected: {}", message);
207
208            // Suggest remediation
209            println!("Suggestions:");
210            println!("1. Check your OPENAI_API_KEY environment variable");
211            println!("2. Verify API key at https://platform.openai.com/api-keys");
212            println!("3. Ensure your API key has necessary permissions");
213        }
214        Err(e) => println!("Unexpected error type: {}", e),
215    }
216
217    Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221    use openai_ergonomic::Config;
222    use reqwest_middleware::ClientBuilder;
223
224    // Create a reqwest client with very short timeout to simulate network issues
225    let reqwest_client = reqwest::Client::builder()
226        .timeout(Duration::from_secs(1))
227        .build()
228        .expect("Failed to build reqwest client");
229
230    let http_client = ClientBuilder::new(reqwest_client).build();
231
232    let config = Config::builder()
233        .api_key("test-key")
234        .http_client(http_client)
235        .build();
236
237    let client = Client::builder(config)?.build();
238
239    match client.send_chat(client.chat_simple("Hello")).await {
240        Ok(_) => println!("Unexpected success"),
241        Err(Error::Http(source)) => {
242            println!("Network error as expected: {}", source);
243
244            // Implement exponential backoff
245            let mut backoff = Duration::from_millis(100);
246            for attempt in 1..=3 {
247                println!("Retry attempt {} after {:?}", attempt, backoff);
248                sleep(backoff).await;
249                backoff *= 2;
250
251                // In real scenario, retry with proper timeout
252                // match client.send_chat(client.chat_simple("Hello")).await { ... }
253            }
254        }
255        Err(e) => println!("Other error: {}", e),
256    }
257
258    Ok(())
259}
examples/retry_patterns.rs (line 408)
388async fn idempotency_example(_client: &Client) -> Result<()> {
389    // Generate idempotency key
390    let idempotency_key = generate_idempotency_key();
391    println!("Using idempotency key: {}", idempotency_key);
392
393    // Simulate retrying the same request
394    for attempt in 1..=3 {
395        println!("\nAttempt {} with same idempotency key", attempt);
396
397        // In a real implementation, you'd pass the idempotency key in headers
398        let mut headers = std::collections::HashMap::new();
399        headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400        println!("  Would send {} headers", headers.len());
401
402        let config = Config::builder()
403            .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404            .build();
405
406        // Note: Headers (including idempotency key) are not yet supported in current API
407
408        let client_with_idempotency = Client::builder(config)?.build();
409
410        match client_with_idempotency
411            .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412            .await
413        {
414            Ok(response) => {
415                if let Some(content) = response.content() {
416                    println!("Response: {}", content);
417                } else {
418                    println!("Response: (no content)");
419                }
420                // Server should return same response for same idempotency key
421            }
422            Err(e) => println!("Error: {}", e),
423        }
424
425        if attempt < 3 {
426            sleep(Duration::from_secs(1)).await;
427        }
428    }
429
430    Ok(())
431}
examples/moderations.rs (line 52)
34async fn main() -> Result<()> {
35    use openai_ergonomic::Config;
36
37    println!("=== Content Moderation Example ===\n");
38
39    // Initialize client
40    let client = if let Ok(c) = Client::from_env() {
41        c.build()
42    } else {
43        println!("Note: OPENAI_API_KEY not found. Running in demo mode.");
44        println!("Set OPENAI_API_KEY to test real API calls.\n");
45        println!("To use the Moderations API:");
46        println!("  let client = Client::from_env()?.build();");
47        println!("  let builder = client.moderations().check(\"text to moderate\");");
48        println!("  let response = client.moderations().create(builder).await?;");
49        println!();
50        println!("Running demonstration examples...\n");
51        // Create a dummy client for demo purposes
52        Client::builder(Config::builder().api_key("demo-key").build())?.build()
53    };
54
55    // Example 1: Basic moderation
56    println!("1. Basic Moderation:");
57    basic_moderation(&client);
58
59    // Example 2: Category detection
60    println!("\n2. Category Detection:");
61    category_detection(&client);
62
63    // Example 3: Custom thresholds
64    println!("\n3. Custom Thresholds:");
65    custom_thresholds(&client);
66
67    // Example 4: Multi-language moderation
68    println!("\n4. Multi-language Moderation:");
69    multilingual_moderation(&client);
70
71    // Example 5: Batch moderation
72    println!("\n5. Batch Moderation:");
73    batch_moderation(&client);
74
75    // Example 6: Response filtering
76    println!("\n6. Response Filtering:");
77    response_filtering(&client).await?;
78
79    // Example 7: Policy enforcement
80    println!("\n7. Policy Enforcement:");
81    policy_enforcement(&client);
82
83    // Example 8: Moderation pipeline
84    println!("\n8. Moderation Pipeline:");
85    moderation_pipeline(&client).await?;
86
87    Ok(())
88}
examples/http_middleware_retry.rs (line 43)
19async fn main() -> Result<()> {
20    println!("=== HTTP Middleware with Retry Example ===\n");
21
22    // Example 1: Basic client with retry middleware
23    println!("1. Creating client with retry middleware");
24
25    // Create a retry policy with exponential backoff
26    // This will retry transient errors up to 3 times with exponential delays
27    let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29    // Build an HTTP client with retry middleware
30    let http_client = ClientBuilder::new(reqwest::Client::new())
31        .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32        .build();
33
34    // Create OpenAI client with custom HTTP client
35    let config = Config::builder()
36        .api_key(
37            std::env::var("OPENAI_API_KEY")
38                .expect("OPENAI_API_KEY environment variable must be set"),
39        )
40        .http_client(http_client)
41        .build();
42
43    let client = Client::builder(config)?.build();
44
45    // Use the client normally - retries are handled automatically
46    println!("Sending chat completion request (retries are automatic)...");
47
48    let builder = client.chat_simple("Hello! How are you today?");
49    match client.send_chat(builder).await {
50        Ok(response) => {
51            println!("\nSuccess! Response received:");
52            if let Some(content) = response.content() {
53                println!("{content}");
54            }
55        }
56        Err(e) => {
57            eprintln!("\nError after retries: {e}");
58        }
59    }
60
61    // Example 2: Custom retry policy with more retries and custom delays
62    println!("\n2. Creating client with custom retry policy");
63
64    let custom_retry_policy = ExponentialBackoff::builder()
65        .retry_bounds(
66            std::time::Duration::from_millis(100), // minimum delay
67            std::time::Duration::from_secs(30),    // maximum delay
68        )
69        .build_with_max_retries(5); // up to 5 retries
70
71    let custom_http_client = ClientBuilder::new(
72        reqwest::Client::builder()
73            .timeout(std::time::Duration::from_secs(60))
74            .build()
75            .expect("Failed to build reqwest client"),
76    )
77    .with(RetryTransientMiddleware::new_with_policy(
78        custom_retry_policy,
79    ))
80    .build();
81
82    let custom_config = Config::builder()
83        .api_key(
84            std::env::var("OPENAI_API_KEY")
85                .expect("OPENAI_API_KEY environment variable must be set"),
86        )
87        .http_client(custom_http_client)
88        .build();
89
90    let custom_client = Client::builder(custom_config)?.build();
91
92    println!("Sending request with custom retry policy (up to 5 retries)...");
93
94    let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95    match custom_client.send_chat(builder).await {
96        Ok(response) => {
97            println!("\nSuccess! Response received:");
98            if let Some(content) = response.content() {
99                println!("{content}");
100            }
101        }
102        Err(e) => {
103            eprintln!("\nError after all retries: {e}");
104        }
105    }
106
107    // Example 3: Using the builder pattern for more complex requests
108    println!("\n3. Using builder pattern with retry middleware");
109
110    let builder = custom_client
111        .responses()
112        .user("What are the three laws of robotics?")
113        .max_completion_tokens(200)
114        .temperature(0.7);
115
116    let response = custom_client.send_responses(builder).await?;
117
118    println!("\nResponse received:");
119    if let Some(content) = response.content() {
120        println!("{content}");
121    }
122
123    println!("\nToken usage:");
124    if let Some(usage) = response.usage() {
125        let prompt = usage.prompt_tokens;
126        let completion = usage.completion_tokens;
127        let total = usage.total_tokens;
128        println!("  Prompt tokens: {prompt}");
129        println!("  Completion tokens: {completion}");
130        println!("  Total tokens: {total}");
131    }
132
133    println!("\n=== Example completed successfully! ===");
134    println!("\nKey benefits of using reqwest-middleware:");
135    println!("  - Automatic retry of transient failures");
136    println!("  - Exponential backoff to avoid overwhelming servers");
137    println!("  - Composable middleware for logging, metrics, etc.");
138    println!("  - Transparent to application code - works with any request");
139
140    Ok(())
141}
Source

pub fn from_env() -> Result<ClientBuilder>

Create a new client builder with default configuration from environment variables.

Examples found in repository?
examples/error_handling.rs (line 58)
57async fn basic_error_handling() {
58    let client = match Client::from_env() {
59        Ok(client_builder) => client_builder.build(),
60        Err(e) => {
61            println!("Failed to create client: {}", e);
62            return;
63        }
64    };
65
66    match client.send_chat(client.chat_simple("Hello")).await {
67        Ok(response) => {
68            if let Some(content) = response.content() {
69                println!("Success: {}", content);
70            } else {
71                println!("Success: (no content)");
72            }
73        }
74        Err(e) => println!("Error: {}", e),
75    }
76}
77
78async fn pattern_matching_errors() {
79    let Ok(client_builder) = Client::from_env() else {
80        return;
81    };
82    let client = client_builder.build();
83
84    // Simulate various errors by using invalid parameters
85    let builder = client.chat().user("test");
86    let result = client.send_chat(builder).await;
87
88    match result {
89        Ok(_) => println!("Unexpected success"),
90        Err(e) => match e {
91            Error::Api { message, .. } => {
92                println!("API Error: {}", message);
93            }
94            Error::RateLimit(message) => {
95                println!("Rate limited: {}", message);
96            }
97            Error::Authentication(message) => {
98                println!("Authentication failed: {}", message);
99            }
100            Error::Http(source) => {
101                println!("Network error: {}", source);
102            }
103            Error::Json(source) => {
104                println!("Serialization error: {}", source);
105            }
106            Error::Stream(message) => {
107                println!("Stream error: {}", message);
108            }
109            Error::InvalidRequest(message) => {
110                println!("Invalid request: {}", message);
111            }
112            Error::Config(message) => {
113                println!("Configuration error: {}", message);
114            }
115            _ => {
116                println!("Other error: {}", e);
117            }
118        },
119    }
120}
121
122async fn rate_limit_handling() {
123    const MAX_RETRIES: u32 = 3;
124
125    let Ok(client_builder) = Client::from_env() else {
126        return;
127    };
128    let client = client_builder.build();
129
130    // Retry logic for rate limiting
131    let mut retries = 0;
132
133    loop {
134        match client.send_chat(client.chat_simple("Hello")).await {
135            Ok(response) => {
136                if let Some(content) = response.content() {
137                    println!("Success: {}", content);
138                } else {
139                    println!("Success: (no content)");
140                }
141                break;
142            }
143            Err(Error::RateLimit(_message)) => {
144                if retries >= MAX_RETRIES {
145                    println!("Max retries exceeded");
146                    break;
147                }
148
149                let wait_time = Duration::from_secs(1);
150                println!("Rate limited. Waiting {:?} before retry...", wait_time);
151                sleep(wait_time).await;
152                retries += 1;
153            }
154            Err(e) => {
155                println!("Other error: {}", e);
156                break;
157            }
158        }
159    }
160}
161
162async fn token_limit_handling() {
163    let Ok(client_builder) = Client::from_env() else {
164        return;
165    };
166    let client = client_builder.build();
167
168    // Generate a very long prompt that might exceed token limits
169    let long_text = "Lorem ipsum ".repeat(10000);
170
171    match client.send_chat(client.chat_simple(&long_text)).await {
172        Ok(_) => println!("Processed long text successfully"),
173        Err(Error::InvalidRequest(message)) if message.contains("token") => {
174            println!("Token limit issue: {}", message);
175
176            // Retry with truncated text
177            let truncated = &long_text[..1000];
178            println!("Retrying with truncated text...");
179
180            match client.send_chat(client.chat_simple(truncated)).await {
181                Ok(response) => {
182                    if let Some(content) = response.content() {
183                        println!("Success with truncated: {}", content);
184                    } else {
185                        println!("Success with truncated: (no content)");
186                    }
187                }
188                Err(e) => println!("Still failed: {}", e),
189            }
190        }
191        Err(e) => println!("Other error: {}", e),
192    }
193}
194
195async fn auth_error_handling() -> Result<()> {
196    // Try with invalid API key
197    let config = Config::builder().api_key("invalid-api-key").build();
198    let invalid_client = Client::builder(config)?.build();
199
200    match invalid_client
201        .send_chat(invalid_client.chat_simple("Hello"))
202        .await
203    {
204        Ok(_) => println!("Unexpected success"),
205        Err(Error::Authentication(message)) => {
206            println!("Authentication failed as expected: {}", message);
207
208            // Suggest remediation
209            println!("Suggestions:");
210            println!("1. Check your OPENAI_API_KEY environment variable");
211            println!("2. Verify API key at https://platform.openai.com/api-keys");
212            println!("3. Ensure your API key has necessary permissions");
213        }
214        Err(e) => println!("Unexpected error type: {}", e),
215    }
216
217    Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221    use openai_ergonomic::Config;
222    use reqwest_middleware::ClientBuilder;
223
224    // Create a reqwest client with very short timeout to simulate network issues
225    let reqwest_client = reqwest::Client::builder()
226        .timeout(Duration::from_secs(1))
227        .build()
228        .expect("Failed to build reqwest client");
229
230    let http_client = ClientBuilder::new(reqwest_client).build();
231
232    let config = Config::builder()
233        .api_key("test-key")
234        .http_client(http_client)
235        .build();
236
237    let client = Client::builder(config)?.build();
238
239    match client.send_chat(client.chat_simple("Hello")).await {
240        Ok(_) => println!("Unexpected success"),
241        Err(Error::Http(source)) => {
242            println!("Network error as expected: {}", source);
243
244            // Implement exponential backoff
245            let mut backoff = Duration::from_millis(100);
246            for attempt in 1..=3 {
247                println!("Retry attempt {} after {:?}", attempt, backoff);
248                sleep(backoff).await;
249                backoff *= 2;
250
251                // In real scenario, retry with proper timeout
252                // match client.send_chat(client.chat_simple("Hello")).await { ... }
253            }
254        }
255        Err(e) => println!("Other error: {}", e),
256    }
257
258    Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262    let client = Client::from_env()?.build();
263
264    // Wrap errors with custom context
265    let result = client
266        .send_chat(client.chat_simple("Analyze this data"))
267        .await
268        .map_err(|e| {
269            eprintln!("Context: Failed during data analysis task");
270            eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271            eprintln!("Original error: {}", e);
272            e
273        })?;
274
275    if let Some(content) = result.content() {
276        println!("Result: {}", content);
277    } else {
278        println!("Result: (no content)");
279    }
280    Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284    let client = Client::from_env()?.build();
285
286    // Strategy 1: Fallback to simpler model
287    let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288    println!("Fallback strategy result: {}", result);
289
290    // Strategy 2: Circuit breaker pattern
291    let circuit_breaker = CircuitBreaker::new();
292    if circuit_breaker.is_open() {
293        println!("Circuit breaker is open, skipping API calls");
294        return Ok(());
295    }
296
297    match client.send_chat(client.chat_simple("Test")).await {
298        Ok(response) => {
299            circuit_breaker.record_success();
300            if let Some(content) = response.content() {
301                println!("Circuit breaker success: {}", content);
302            } else {
303                println!("Circuit breaker success: (no content)");
304            }
305        }
306        Err(e) => {
307            circuit_breaker.record_failure();
308            println!("Circuit breaker failure: {}", e);
309        }
310    }
311
312    // Strategy 3: Request hedging (parallel requests with first success wins)
313    let hedge_result = hedged_request(&client).await?;
314    println!("Hedged request result: {}", hedge_result);
315
316    Ok(())
317}
More examples
Hide additional examples
examples/chat_streaming.rs (line 22)
18async fn main() -> Result<()> {
19    println!("=== Chat Streaming Examples ===\n");
20
21    // Initialize client
22    let client = Client::from_env()?.build();
23
24    // Example 1: Basic streaming
25    println!("1. Basic Streaming:");
26    basic_streaming(&client).await?;
27
28    // Example 2: Streaming with parameters
29    println!("\n2. Streaming with Parameters:");
30    streaming_with_parameters(&client).await?;
31
32    // Example 3: Collect full content
33    println!("\n3. Collect Full Content:");
34    collect_content(&client).await?;
35
36    // Example 4: Stream with system message
37    println!("\n4. Stream with System Message:");
38    streaming_with_system(&client).await?;
39
40    // Example 5: Multiple user turns
41    println!("\n5. Multiple User Turns:");
42    multiple_turns(&client).await?;
43
44    println!("\n=== All examples completed successfully ===");
45
46    Ok(())
47}
examples/assistants_code_interpreter.rs (line 48)
44async fn main() -> Result<(), Box<dyn std::error::Error>> {
45    println!(" OpenAI Ergonomic - Code Interpreter Assistant Example\n");
46
47    // Initialize client from environment variables
48    let _client = match Client::from_env() {
49        Ok(client_builder) => {
50            println!(" Client initialized successfully");
51            client_builder.build()
52        }
53        Err(e) => {
54            eprintln!(" Failed to initialize client: {e}");
55            eprintln!(" Make sure OPENAI_API_KEY is set in your environment");
56            return Err(e.into());
57        }
58    };
59
60    // Demonstrate different code interpreter use cases
61    run_data_analysis_example()?;
62    run_mathematical_computation_example()?;
63    run_visualization_example()?;
64    run_file_processing_example()?;
65
66    println!("\n Code Interpreter examples completed successfully!");
67    Ok(())
68}
examples/assistants_file_search.rs (line 58)
54async fn main() -> Result<(), Box<dyn std::error::Error>> {
55    println!(" OpenAI Ergonomic - File Search Assistant Example (RAG)\n");
56
57    // Initialize client from environment variables
58    let _client = match Client::from_env() {
59        Ok(client_builder) => {
60            println!(" Client initialized successfully");
61            client_builder.build()
62        }
63        Err(e) => {
64            eprintln!(" Failed to initialize client: {e}");
65            eprintln!(" Make sure OPENAI_API_KEY is set in your environment");
66            return Err(e.into());
67        }
68    };
69
70    // Demonstrate different RAG use cases
71    run_knowledge_base_example()?;
72    run_document_qa_example()?;
73    run_research_assistant_example()?;
74    run_citation_example()?;
75    run_multi_document_analysis_example()?;
76
77    println!("\n File Search RAG examples completed successfully!");
78    Ok(())
79}
examples/vector_stores.rs (line 57)
53async fn main() -> Result<(), Box<dyn std::error::Error>> {
54    println!(" OpenAI Ergonomic - Vector Stores Example\n");
55
56    // Initialize client from environment variables
57    let _client = match Client::from_env() {
58        Ok(client_builder) => {
59            println!(" Client initialized successfully");
60            client_builder.build()
61        }
62        Err(e) => {
63            eprintln!(" Failed to initialize client: {e}");
64            eprintln!(" Make sure OPENAI_API_KEY is set in your environment");
65            return Err(e.into());
66        }
67    };
68
69    // Demonstrate different vector store use cases
70    run_basic_vector_store_example()?;
71    run_document_management_example()?;
72    run_semantic_search_example()?;
73    run_enterprise_knowledge_base_example()?;
74    run_vector_store_lifecycle_example()?;
75    run_advanced_search_patterns_example()?;
76
77    println!("\n Vector Stores examples completed successfully!");
78    Ok(())
79}
examples/tool_calling.rs (line 97)
95async fn main() -> Result<()> {
96    // Initialize client from environment
97    let client = Client::from_env()?.build();
98
99    println!("=== Tool Calling Example ===\n");
100
101    // Example 1: Simple tool call
102    println!("1. Simple Tool Call:");
103    simple_tool_call(&client).await?;
104
105    // Example 2: Multiple tools
106    println!("\n2. Multiple Tools:");
107    multiple_tools(&client).await?;
108
109    // Example 3: Tool choice control
110    println!("\n3. Tool Choice Control:");
111    tool_choice_control(&client).await?;
112
113    // Example 4: Conversation with tool calls
114    println!("\n4. Conversation with Tool Calls:");
115    conversation_with_tools(&client).await?;
116
117    // Example 5: Streaming with tools (simplified)
118    println!("\n5. Streaming with Tools (Simplified):");
119    streaming_with_tools(&client);
120
121    // Example 6: Parallel tool calls (simplified)
122    println!("\n6. Parallel Tool Calls (Simplified):");
123    parallel_tool_calls(&client).await?;
124
125    Ok(())
126}
Source§

impl<T> Client<T>

Source

pub fn config(&self) -> &Config

Get a reference to the client configuration.

Examples found in repository?
examples/audio_speech.rs (line 403)
401fn create_configuration(client: &Client) -> Configuration {
402    let mut configuration = Configuration::new();
403    configuration.bearer_access_token = Some(client.config().api_key().to_string());
404
405    if let Some(base_url) = client.config().base_url() {
406        configuration.base_path = base_url.to_string();
407    }
408
409    if let Some(org_id) = client.config().organization_id() {
410        configuration.user_agent = Some(format!(
411            "openai-ergonomic/{} org/{}",
412            env!("CARGO_PKG_VERSION"),
413            org_id
414        ));
415    }
416
417    configuration
418}
More examples
Hide additional examples
examples/audio_transcription.rs (line 572)
570fn create_configuration(client: &Client) -> Configuration {
571    let mut configuration = Configuration::new();
572    configuration.bearer_access_token = Some(client.config().api_key().to_string());
573
574    if let Some(base_url) = client.config().base_url() {
575        configuration.base_path = base_url.to_string();
576    }
577
578    if let Some(org_id) = client.config().organization_id() {
579        configuration.user_agent = Some(format!(
580            "openai-ergonomic/{} org/{}",
581            env!("CARGO_PKG_VERSION"),
582            org_id
583        ));
584    }
585
586    configuration
587}
Source

pub fn http_client(&self) -> &HttpClient

Get a reference to the HTTP client.

Source§

impl<T: Default + Send + Sync + 'static> Client<T>

Source

pub fn chat(&self) -> ChatCompletionBuilder

Create a chat completion builder.

Examples found in repository?
examples/chat_streaming.rs (line 52)
49async fn basic_streaming(client: &Client) -> Result<()> {
50    println!("Question: Tell me a short joke");
51
52    let builder = client.chat().user("Tell me a short joke");
53
54    let mut stream = client.send_chat_stream(builder).await?;
55
56    print!("Response: ");
57    while let Some(chunk) = stream.next().await {
58        let chunk = chunk?;
59        if let Some(content) = chunk.content() {
60            print!("{}", content);
61        }
62    }
63    println!();
64
65    Ok(())
66}
67
68async fn streaming_with_parameters(client: &Client) -> Result<()> {
69    println!("Question: Write a creative tagline for a bakery");
70
71    let builder = client
72        .chat()
73        .user("Write a creative tagline for a bakery")
74        .temperature(0.9)
75        .max_tokens(50);
76
77    let mut stream = client.send_chat_stream(builder).await?;
78
79    print!("Response: ");
80    while let Some(chunk) = stream.next().await {
81        let chunk = chunk?;
82        if let Some(content) = chunk.content() {
83            print!("{}", content);
84        }
85    }
86    println!();
87
88    Ok(())
89}
90
91async fn collect_content(client: &Client) -> Result<()> {
92    println!("Question: What is the capital of France?");
93
94    let builder = client.chat().user("What is the capital of France?");
95
96    let mut stream = client.send_chat_stream(builder).await?;
97
98    // Manually collect all content
99    let mut content = String::new();
100    while let Some(chunk) = stream.next().await {
101        let chunk = chunk?;
102        if let Some(text) = chunk.content() {
103            content.push_str(text);
104        }
105    }
106    println!("Full response: {}", content);
107
108    Ok(())
109}
110
111async fn streaming_with_system(client: &Client) -> Result<()> {
112    println!("System: You are a helpful assistant that speaks like a pirate");
113    println!("Question: Tell me about the weather");
114
115    let builder = client
116        .chat()
117        .system("You are a helpful assistant that speaks like a pirate")
118        .user("Tell me about the weather")
119        .max_tokens(100);
120
121    let mut stream = client.send_chat_stream(builder).await?;
122
123    print!("Response: ");
124    while let Some(chunk) = stream.next().await {
125        let chunk = chunk?;
126        if let Some(content) = chunk.content() {
127            print!("{}", content);
128        }
129    }
130    println!();
131
132    Ok(())
133}
134
135async fn multiple_turns(client: &Client) -> Result<()> {
136    println!("Building a conversation with multiple turns...\n");
137
138    // First turn
139    println!("User: What is 2+2?");
140    let builder = client.chat().user("What is 2+2?");
141
142    let mut stream = client.send_chat_stream(builder).await?;
143
144    print!("Assistant: ");
145    let mut first_response = String::new();
146    while let Some(chunk) = stream.next().await {
147        let chunk = chunk?;
148        if let Some(content) = chunk.content() {
149            print!("{}", content);
150            first_response.push_str(content);
151        }
152    }
153    println!();
154
155    // Second turn - continuing the conversation
156    println!("\nUser: Now multiply that by 3");
157    let builder = client
158        .chat()
159        .user("What is 2+2?")
160        .assistant(&first_response)
161        .user("Now multiply that by 3");
162
163    let mut stream = client.send_chat_stream(builder).await?;
164
165    print!("Assistant: ");
166    while let Some(chunk) = stream.next().await {
167        let chunk = chunk?;
168        if let Some(content) = chunk.content() {
169            print!("{}", content);
170        }
171    }
172    println!();
173
174    Ok(())
175}
More examples
Hide additional examples
examples/langfuse_streaming.rs (line 97)
94async fn basic_streaming(client: &Client<LangfuseState<Span>>) -> Result<()> {
95    println!("Question: Tell me a short joke");
96
97    let builder = client.chat().user("Tell me a short joke");
98
99    let mut stream = client.send_chat_stream(builder).await?;
100
101    print!("Response: ");
102    let mut chunk_count = 0;
103    while let Some(chunk) = stream.next().await {
104        let chunk = chunk?;
105        if let Some(content) = chunk.content() {
106            print!("{}", content);
107            chunk_count += 1;
108        }
109    }
110    println!(
111        "\n(Received {} chunks, all traced to Langfuse)",
112        chunk_count
113    );
114
115    Ok(())
116}
117
118async fn streaming_with_parameters(client: &Client<LangfuseState<Span>>) -> Result<()> {
119    println!("Question: Write a creative tagline for a bakery");
120
121    let builder = client
122        .chat()
123        .user("Write a creative tagline for a bakery")
124        .temperature(0.9)
125        .max_tokens(50);
126
127    let mut stream = client.send_chat_stream(builder).await?;
128
129    print!("Response: ");
130    let mut chunk_count = 0;
131    while let Some(chunk) = stream.next().await {
132        let chunk = chunk?;
133        if let Some(content) = chunk.content() {
134            print!("{}", content);
135            chunk_count += 1;
136        }
137    }
138    println!(
139        "\n(Received {} chunks, all traced to Langfuse)",
140        chunk_count
141    );
142
143    Ok(())
144}
145
146async fn collect_content(client: &Client<LangfuseState<Span>>) -> Result<()> {
147    println!("Question: What is the capital of France?");
148
149    let builder = client.chat().user("What is the capital of France?");
150
151    let mut stream = client.send_chat_stream(builder).await?;
152
153    // Manually collect content (interceptor hooks are still called for each chunk)
154    let mut content = String::new();
155    while let Some(chunk) = stream.next().await {
156        let chunk = chunk?;
157        if let Some(text) = chunk.content() {
158            content.push_str(text);
159        }
160    }
161    println!("Full response: {}", content);
162    println!("(All chunks were traced to Langfuse during collection)");
163
164    Ok(())
165}
examples/tool_calling.rs (line 130)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129    let builder = client
130        .chat()
131        .user("What's the weather like in San Francisco?")
132        .tools(vec![get_weather_tool()]);
133    let response = client.send_chat(builder).await?;
134
135    // Check for tool calls
136    let tool_calls = response.tool_calls();
137    if !tool_calls.is_empty() {
138        for tool_call in tool_calls {
139            println!("Tool called: {}", tool_call.function_name());
140            println!("Arguments: {}", tool_call.function_arguments());
141
142            // Execute the function
143            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144            let result = execute_weather_function(params)?;
145            println!("Function result: {}", result);
146        }
147    }
148
149    Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153    let builder = client
154        .chat()
155        .user("What's the weather in NYC and what time is it there?")
156        .tools(vec![get_weather_tool(), get_time_tool()]);
157    let response = client.send_chat(builder).await?;
158
159    for tool_call in response.tool_calls() {
160        match tool_call.function_name() {
161            "get_weather" => {
162                let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163                let result = execute_weather_function(params)?;
164                println!("Weather result: {}", result);
165            }
166            "get_current_time" => {
167                let params: serde_json::Value =
168                    serde_json::from_str(tool_call.function_arguments())?;
169                if let Some(timezone) = params["timezone"].as_str() {
170                    let result = execute_time_function(timezone);
171                    println!("Time result: {}", result);
172                }
173            }
174            _ => println!("Unknown tool: {}", tool_call.function_name()),
175        }
176    }
177
178    Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182    // Force specific tool
183    println!("Forcing weather tool:");
184    let builder = client
185        .chat()
186        .user("Tell me about Paris")
187        .tools(vec![get_weather_tool(), get_time_tool()])
188        .tool_choice(ToolChoiceHelper::specific("get_weather"));
189    let response = client.send_chat(builder).await?;
190
191    for tool_call in response.tool_calls() {
192        println!("Forced tool: {}", tool_call.function_name());
193    }
194
195    // Disable tools
196    println!("\nDisabling tools:");
197    let builder = client
198        .chat()
199        .user("What's the weather?")
200        .tools(vec![get_weather_tool()])
201        .tool_choice(ToolChoiceHelper::none());
202    let response = client.send_chat(builder).await?;
203
204    if let Some(content) = response.content() {
205        println!("Response without tools: {}", content);
206    }
207
208    Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212    // This example demonstrates proper multi-turn tool calling with full message history
213
214    println!("=== Conversation with Tools (Full Implementation) ===");
215
216    // Initialize the conversation
217    let mut builder = client
218        .chat()
219        .user("What's the weather in Tokyo?")
220        .tools(vec![get_weather_tool()]);
221
222    // First request - the model will call the tool
223    let response = client.send_chat(builder.clone()).await?;
224
225    // Check for tool calls
226    let tool_calls = response.tool_calls();
227    if !tool_calls.is_empty() {
228        println!("Step 1: Model requests tool call");
229        for tool_call in &tool_calls {
230            println!("  Tool: {}", tool_call.function_name());
231            println!("  Args: {}", tool_call.function_arguments());
232        }
233
234        // IMPORTANT: Add the assistant's response (with tool calls) to the history
235        // This is the key step for maintaining proper conversation context!
236        builder = builder.assistant_with_tool_calls(
237            response.content().unwrap_or(""),
238            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239        );
240
241        // Execute the tools and add results
242        println!("\nStep 2: Execute tools and add results to conversation");
243        for tool_call in tool_calls {
244            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245            let result = execute_weather_function(params)?;
246            println!("  Tool result: {}", result);
247
248            // Add the tool result to the conversation history
249            builder = builder.tool(tool_call.id(), result);
250        }
251
252        // Send the follow-up request with tool results
253        println!("\nStep 3: Send follow-up request with tool results");
254        let final_response = client
255            .send_chat(builder.tools(vec![get_weather_tool()]))
256            .await?;
257
258        if let Some(content) = final_response.content() {
259            println!("  Final assistant response: {}", content);
260        }
261    }
262
263    println!("\nNote: This demonstrates the complete tool calling loop with proper");
264    println!("message history management using assistant_with_tool_calls()");
265
266    Ok(())
267}
268
269fn streaming_with_tools(_client: &Client) {
270    println!("Streaming response with tools:");
271
272    // Note: Streaming with tool calls is more complex and requires
273    // proper handling of partial tool call chunks. For now, this is
274    // a placeholder showing the concept.
275
276    println!("This would demonstrate streaming tool calls if streaming API was available");
277    println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
278}
279
280async fn parallel_tool_calls(client: &Client) -> Result<()> {
281    let builder = client
282        .chat()
283        .user("Check the weather in Tokyo, London, and New York")
284        .tools(vec![get_weather_tool()]);
285    let response = client.send_chat(builder).await?;
286
287    // Modern models can call multiple tools in parallel
288    let tool_calls = response.tool_calls();
289    println!("Parallel tool calls: {}", tool_calls.len());
290
291    // Collect arguments first to avoid lifetime issues
292    let args_vec: Vec<String> = tool_calls
293        .iter()
294        .map(|tc| tc.function_arguments().to_string())
295        .collect();
296
297    // Execute all in parallel using tokio
298    let mut handles = Vec::new();
299    for args in args_vec {
300        let handle = tokio::spawn(async move {
301            let params: WeatherParams = serde_json::from_str(&args)?;
302            execute_weather_function(params)
303        });
304        handles.push(handle);
305    }
306
307    // Wait for all results
308    for (i, handle) in handles.into_iter().enumerate() {
309        match handle.await {
310            Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
311            Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
312            Err(e) => println!("Task {} panicked: {}", i + 1, e),
313        }
314    }
315
316    Ok(())
317}
examples/retry_patterns.rs (line 369)
356async fn fallback_chain(client: &Client) -> Result<()> {
357    // Define fallback chain
358    let strategies = vec![
359        ("GPT-4o", "gpt-4o", 1024),
360        ("GPT-4o-mini", "gpt-4o-mini", 512),
361        ("GPT-3.5", "gpt-3.5-turbo", 256),
362    ];
363
364    let prompt = "Explain quantum computing";
365
366    for (name, _model, max_tokens) in strategies {
367        println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369        let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370        match client.send_chat(builder).await {
371            Ok(response) => {
372                println!("Success with {}", name);
373                if let Some(content) = response.content() {
374                    println!("Response: {}...", &content[..content.len().min(100)]);
375                }
376                return Ok(());
377            }
378            Err(e) => {
379                println!("Failed with {}: {}", name, e);
380            }
381        }
382    }
383
384    println!("All fallback strategies exhausted");
385    Ok(())
386}
examples/models.rs (line 185)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160    // Task-specific model recommendations
161    let task_models = vec![
162        ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163        ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164        ("Code generation", "gpt-4o", "Excellent code understanding"),
165        ("Vision tasks", "gpt-4o", "Native vision support"),
166        (
167            "Quick responses",
168            "gpt-4o-mini",
169            "Low latency, good quality",
170        ),
171        (
172            "Bulk processing",
173            "gpt-3.5-turbo",
174            "Best cost/performance ratio",
175        ),
176    ];
177
178    for (task, model, reason) in task_models {
179        println!("Task: {}", task);
180        println!("  Recommended: {}", model);
181        println!("  Reason: {}", reason);
182
183        // Demo the model
184        let builder = client
185            .chat()
186            .user(format!("Say 'Hello from {}'", model))
187            .max_completion_tokens(10);
188        let response = client.send_chat(builder).await?;
189
190        if let Some(content) = response.content() {
191            println!("  Response: {}\n", content);
192        }
193    }
194
195    Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199    let models = get_model_registry();
200    let test_prompt = "Explain the theory of relativity in one sentence";
201    let estimated_input_tokens = 15;
202    let estimated_output_tokens = 50;
203
204    println!("Cost comparison for same task:");
205    println!("Prompt: '{}'\n", test_prompt);
206
207    let mut costs = Vec::new();
208
209    for (name, info) in &models {
210        if !info.deprecated {
211            let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212            let output_cost =
213                (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214            let total_cost = input_cost + output_cost;
215
216            costs.push((name.clone(), total_cost));
217        }
218    }
219
220    costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222    println!("{:<20} {:>15}", "Model", "Estimated Cost");
223    println!("{:-<35}", "");
224    for (model, cost) in costs {
225        println!("{:<20} ${:>14.6}", model, cost);
226    }
227
228    // Demonstrate cheapest vs best
229    println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230    let builder = client.chat().user(test_prompt);
231    let cheap_response = client.send_chat(builder).await?;
232
233    if let Some(content) = cheap_response.content() {
234        println!("Response: {}", content);
235    }
236
237    Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241    use std::time::Instant;
242
243    let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244    let test_prompt = "Write a haiku about programming";
245
246    println!("Performance comparison:");
247    println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248    println!("{:-<45}", "");
249
250    for model in models_to_test {
251        let start = Instant::now();
252
253        let builder = client.chat().user(test_prompt);
254        let response = client.send_chat(builder).await?;
255
256        let elapsed = start.elapsed();
257
258        if let Some(usage) = response.usage() {
259            let total_tokens = f64::from(usage.total_tokens);
260            let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262            println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263        }
264    }
265
266    Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270    // Handle deprecated model migration
271    let deprecated_mappings = HashMap::from([
272        ("text-davinci-003", "gpt-3.5-turbo"),
273        ("gpt-4-32k", "gpt-4o"),
274        ("gpt-4-vision-preview", "gpt-4o"),
275    ]);
276
277    let requested_model = "text-davinci-003"; // Deprecated model
278
279    if let Some(replacement) = deprecated_mappings.get(requested_model) {
280        println!(
281            "Warning: {} is deprecated. Using {} instead.",
282            requested_model, replacement
283        );
284
285        let builder = client.chat().user("Hello from migrated model");
286        let response = client.send_chat(builder).await?;
287
288        if let Some(content) = response.content() {
289            println!("Response from {}: {}", replacement, content);
290        }
291    }
292
293    Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297    // Select model based on runtime conditions
298
299    #[derive(Debug)]
300    struct RequestContext {
301        urgency: Urgency,
302        complexity: Complexity,
303        budget: Budget,
304        needs_vision: bool,
305    }
306
307    #[derive(Debug)]
308    enum Urgency {
309        Low,
310        Medium,
311        High,
312    }
313
314    #[derive(Debug)]
315    enum Complexity {
316        Simple,
317        Moderate,
318        Complex,
319    }
320
321    #[derive(Debug)]
322    enum Budget {
323        Tight,
324        Normal,
325        Flexible,
326    }
327
328    const fn select_model(ctx: &RequestContext) -> &'static str {
329        match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330            // High urgency + simple = fast cheap model, or tight budget = cheapest
331            (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333            // Complex + flexible budget = best model
334            (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336            // Vision required
337            _ if ctx.needs_vision => "gpt-4o",
338
339            // Default balanced choice
340            _ => "gpt-4o-mini",
341        }
342    }
343
344    // Example contexts
345    let contexts = [
346        RequestContext {
347            urgency: Urgency::High,
348            complexity: Complexity::Simple,
349            budget: Budget::Tight,
350            needs_vision: false,
351        },
352        RequestContext {
353            urgency: Urgency::Low,
354            complexity: Complexity::Complex,
355            budget: Budget::Flexible,
356            needs_vision: false,
357        },
358        RequestContext {
359            urgency: Urgency::Medium,
360            complexity: Complexity::Moderate,
361            budget: Budget::Normal,
362            needs_vision: true,
363        },
364    ];
365
366    for (i, ctx) in contexts.iter().enumerate() {
367        let model = select_model(ctx);
368        println!("Context {}: {:?}", i + 1, ctx);
369        println!("  Selected model: {}", model);
370
371        let builder = client
372            .chat()
373            .user(format!("Hello from dynamically selected {}", model))
374            .max_completion_tokens(20);
375        let response = client.send_chat(builder).await?;
376
377        if let Some(content) = response.content() {
378            println!("  Response: {}\n", content);
379        }
380    }
381
382    Ok(())
383}
examples/vision_chat.rs (line 88)
72async fn demonstrate_basic_image_analysis(
73    client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75    println!("  Example 1: Basic Image Analysis");
76    println!("----------------------------------");
77
78    let image_url = SAMPLE_IMAGE_URLS[0];
79    let question = "What do you see in this image? Please describe it in detail.";
80
81    println!("Image URL: {image_url}");
82    println!("Question: {question}");
83    print!("Assistant: ");
84    io::stdout().flush()?;
85
86    // Use the convenient user_with_image_url method
87    let chat_builder = client
88        .chat()
89        .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90        .user_with_image_url(question, image_url)
91        .temperature(0.3);
92
93    let response = client.send_chat(chat_builder).await?;
94
95    if let Some(content) = response.content() {
96        println!("{content}");
97
98        // Show usage information
99        if let Some(usage) = response.usage() {
100            println!("\n Token usage:");
101            println!("  Prompt tokens: {}", usage.prompt_tokens);
102            println!("  Completion tokens: {}", usage.completion_tokens);
103            println!("  Total tokens: {}", usage.total_tokens);
104        }
105    } else {
106        println!("No response content received");
107    }
108
109    println!();
110    Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115    println!(" Example 2: Multiple Image Analysis");
116    println!("---------------------------------------");
117
118    let question = "Compare these two images. What are the differences and similarities?";
119
120    println!("Question: {question}");
121    println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122    println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123    print!("Assistant: ");
124    io::stdout().flush()?;
125
126    // Create message parts manually for multiple images
127    let parts = vec![
128        text_part(question),
129        image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130        image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131    ];
132
133    let chat_builder = client
134        .chat()
135        .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136        .user_with_parts(parts)
137        .temperature(0.4);
138
139    let response = client.send_chat(chat_builder).await?;
140
141    if let Some(content) = response.content() {
142        println!("{content}");
143    } else {
144        println!("No response content received");
145    }
146
147    println!();
148    Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191    println!(" Example 4: Base64 Image Analysis");
192    println!("-----------------------------------");
193
194    let question = "What is this image? It's very small, what can you tell about it?";
195
196    println!("Question: {question}");
197    println!("Image: Small test image encoded as base64");
198    print!("Assistant: ");
199    io::stdout().flush()?;
200
201    // Create message parts with base64 image
202    let parts = vec![
203        text_part(question),
204        image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205    ];
206
207    let chat_builder = client
208        .chat()
209        .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210        .user_with_parts(parts)
211        .temperature(0.3);
212
213    let response = client.send_chat(chat_builder).await?;
214
215    if let Some(content) = response.content() {
216        println!("{content}");
217    } else {
218        println!("No response content received");
219    }
220
221    println!();
222    Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297    println!("  Example 6: Error Handling Patterns");
298    println!("------------------------------------");
299
300    println!("Testing various error scenarios...\n");
301
302    // Test 1: Invalid image URL
303    println!("Test 1: Invalid image URL");
304    let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306    let invalid_builder = client
307        .chat()
308        .user_with_image_url("What do you see?", invalid_url)
309        .temperature(0.3);
310
311    match client.send_chat(invalid_builder).await {
312        Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313        Err(e) => match &e {
314            Error::Api {
315                status, message, ..
316            } => {
317                println!(" API properly rejected invalid URL ({status}): {message}");
318            }
319            Error::Http(reqwest_err) => {
320                println!(" HTTP error caught: {reqwest_err}");
321            }
322            Error::InvalidRequest(msg) => {
323                println!(" Validation caught invalid URL: {msg}");
324            }
325            _ => {
326                println!("ℹ  Other error type: {e}");
327            }
328        },
329    }
330
331    // Test 2: Empty message with image
332    println!("\nTest 2: Empty text with image");
333    let empty_text_builder = client
334        .chat()
335        .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336        .temperature(0.3);
337
338    match client.send_chat(empty_text_builder).await {
339        Ok(response) => {
340            if let Some(content) = response.content() {
341                println!(
342                    " API handled empty text gracefully: {}",
343                    content.chars().take(50).collect::<String>()
344                );
345            }
346        }
347        Err(e) => {
348            println!("ℹ  Empty text error: {e}");
349        }
350    }
351
352    // Test 3: Malformed base64 data
353    println!("\nTest 3: Malformed base64 image data");
354    let malformed_base64 = "this-is-not-valid-base64!@#$%";
355    let malformed_parts = vec![
356        text_part("What is this?"),
357        image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358    ];
359
360    let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362    match client.send_chat(malformed_builder).await {
363        Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364        Err(e) => match &e {
365            Error::Api {
366                status, message, ..
367            } => {
368                println!(" API properly rejected malformed base64 ({status}): {message}");
369            }
370            _ => {
371                println!("ℹ  Other error for malformed base64: {e}");
372            }
373        },
374    }
375
376    println!("\n  Error handling patterns demonstrated:");
377    println!("  • Invalid image URL handling");
378    println!("  • Empty text with image handling");
379    println!("  • Malformed base64 data validation");
380    println!("  • API error classification");
381    println!("  • Network error handling");
382
383    println!();
384    Ok(())
385}
Source

pub fn chat_simple(&self, message: impl Into<String>) -> ChatCompletionBuilder

Create a chat completion with a simple user message.

Examples found in repository?
examples/error_handling.rs (line 66)
57async fn basic_error_handling() {
58    let client = match Client::from_env() {
59        Ok(client_builder) => client_builder.build(),
60        Err(e) => {
61            println!("Failed to create client: {}", e);
62            return;
63        }
64    };
65
66    match client.send_chat(client.chat_simple("Hello")).await {
67        Ok(response) => {
68            if let Some(content) = response.content() {
69                println!("Success: {}", content);
70            } else {
71                println!("Success: (no content)");
72            }
73        }
74        Err(e) => println!("Error: {}", e),
75    }
76}
77
78async fn pattern_matching_errors() {
79    let Ok(client_builder) = Client::from_env() else {
80        return;
81    };
82    let client = client_builder.build();
83
84    // Simulate various errors by using invalid parameters
85    let builder = client.chat().user("test");
86    let result = client.send_chat(builder).await;
87
88    match result {
89        Ok(_) => println!("Unexpected success"),
90        Err(e) => match e {
91            Error::Api { message, .. } => {
92                println!("API Error: {}", message);
93            }
94            Error::RateLimit(message) => {
95                println!("Rate limited: {}", message);
96            }
97            Error::Authentication(message) => {
98                println!("Authentication failed: {}", message);
99            }
100            Error::Http(source) => {
101                println!("Network error: {}", source);
102            }
103            Error::Json(source) => {
104                println!("Serialization error: {}", source);
105            }
106            Error::Stream(message) => {
107                println!("Stream error: {}", message);
108            }
109            Error::InvalidRequest(message) => {
110                println!("Invalid request: {}", message);
111            }
112            Error::Config(message) => {
113                println!("Configuration error: {}", message);
114            }
115            _ => {
116                println!("Other error: {}", e);
117            }
118        },
119    }
120}
121
122async fn rate_limit_handling() {
123    const MAX_RETRIES: u32 = 3;
124
125    let Ok(client_builder) = Client::from_env() else {
126        return;
127    };
128    let client = client_builder.build();
129
130    // Retry logic for rate limiting
131    let mut retries = 0;
132
133    loop {
134        match client.send_chat(client.chat_simple("Hello")).await {
135            Ok(response) => {
136                if let Some(content) = response.content() {
137                    println!("Success: {}", content);
138                } else {
139                    println!("Success: (no content)");
140                }
141                break;
142            }
143            Err(Error::RateLimit(_message)) => {
144                if retries >= MAX_RETRIES {
145                    println!("Max retries exceeded");
146                    break;
147                }
148
149                let wait_time = Duration::from_secs(1);
150                println!("Rate limited. Waiting {:?} before retry...", wait_time);
151                sleep(wait_time).await;
152                retries += 1;
153            }
154            Err(e) => {
155                println!("Other error: {}", e);
156                break;
157            }
158        }
159    }
160}
161
162async fn token_limit_handling() {
163    let Ok(client_builder) = Client::from_env() else {
164        return;
165    };
166    let client = client_builder.build();
167
168    // Generate a very long prompt that might exceed token limits
169    let long_text = "Lorem ipsum ".repeat(10000);
170
171    match client.send_chat(client.chat_simple(&long_text)).await {
172        Ok(_) => println!("Processed long text successfully"),
173        Err(Error::InvalidRequest(message)) if message.contains("token") => {
174            println!("Token limit issue: {}", message);
175
176            // Retry with truncated text
177            let truncated = &long_text[..1000];
178            println!("Retrying with truncated text...");
179
180            match client.send_chat(client.chat_simple(truncated)).await {
181                Ok(response) => {
182                    if let Some(content) = response.content() {
183                        println!("Success with truncated: {}", content);
184                    } else {
185                        println!("Success with truncated: (no content)");
186                    }
187                }
188                Err(e) => println!("Still failed: {}", e),
189            }
190        }
191        Err(e) => println!("Other error: {}", e),
192    }
193}
194
195async fn auth_error_handling() -> Result<()> {
196    // Try with invalid API key
197    let config = Config::builder().api_key("invalid-api-key").build();
198    let invalid_client = Client::builder(config)?.build();
199
200    match invalid_client
201        .send_chat(invalid_client.chat_simple("Hello"))
202        .await
203    {
204        Ok(_) => println!("Unexpected success"),
205        Err(Error::Authentication(message)) => {
206            println!("Authentication failed as expected: {}", message);
207
208            // Suggest remediation
209            println!("Suggestions:");
210            println!("1. Check your OPENAI_API_KEY environment variable");
211            println!("2. Verify API key at https://platform.openai.com/api-keys");
212            println!("3. Ensure your API key has necessary permissions");
213        }
214        Err(e) => println!("Unexpected error type: {}", e),
215    }
216
217    Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221    use openai_ergonomic::Config;
222    use reqwest_middleware::ClientBuilder;
223
224    // Create a reqwest client with very short timeout to simulate network issues
225    let reqwest_client = reqwest::Client::builder()
226        .timeout(Duration::from_secs(1))
227        .build()
228        .expect("Failed to build reqwest client");
229
230    let http_client = ClientBuilder::new(reqwest_client).build();
231
232    let config = Config::builder()
233        .api_key("test-key")
234        .http_client(http_client)
235        .build();
236
237    let client = Client::builder(config)?.build();
238
239    match client.send_chat(client.chat_simple("Hello")).await {
240        Ok(_) => println!("Unexpected success"),
241        Err(Error::Http(source)) => {
242            println!("Network error as expected: {}", source);
243
244            // Implement exponential backoff
245            let mut backoff = Duration::from_millis(100);
246            for attempt in 1..=3 {
247                println!("Retry attempt {} after {:?}", attempt, backoff);
248                sleep(backoff).await;
249                backoff *= 2;
250
251                // In real scenario, retry with proper timeout
252                // match client.send_chat(client.chat_simple("Hello")).await { ... }
253            }
254        }
255        Err(e) => println!("Other error: {}", e),
256    }
257
258    Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262    let client = Client::from_env()?.build();
263
264    // Wrap errors with custom context
265    let result = client
266        .send_chat(client.chat_simple("Analyze this data"))
267        .await
268        .map_err(|e| {
269            eprintln!("Context: Failed during data analysis task");
270            eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271            eprintln!("Original error: {}", e);
272            e
273        })?;
274
275    if let Some(content) = result.content() {
276        println!("Result: {}", content);
277    } else {
278        println!("Result: (no content)");
279    }
280    Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284    let client = Client::from_env()?.build();
285
286    // Strategy 1: Fallback to simpler model
287    let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288    println!("Fallback strategy result: {}", result);
289
290    // Strategy 2: Circuit breaker pattern
291    let circuit_breaker = CircuitBreaker::new();
292    if circuit_breaker.is_open() {
293        println!("Circuit breaker is open, skipping API calls");
294        return Ok(());
295    }
296
297    match client.send_chat(client.chat_simple("Test")).await {
298        Ok(response) => {
299            circuit_breaker.record_success();
300            if let Some(content) = response.content() {
301                println!("Circuit breaker success: {}", content);
302            } else {
303                println!("Circuit breaker success: (no content)");
304            }
305        }
306        Err(e) => {
307            circuit_breaker.record_failure();
308            println!("Circuit breaker failure: {}", e);
309        }
310    }
311
312    // Strategy 3: Request hedging (parallel requests with first success wins)
313    let hedge_result = hedged_request(&client).await?;
314    println!("Hedged request result: {}", hedge_result);
315
316    Ok(())
317}
318
319async fn try_with_fallback(client: &Client, primary: &str, _fallback: &str) -> Result<String> {
320    // Try primary model first
321    let builder = client.chat().user("Hello");
322    match client.send_chat(builder).await {
323        Ok(response) => Ok(response.content().unwrap_or("").to_string()),
324        Err(e) => {
325            println!("Primary model failed ({}): {}, trying fallback", primary, e);
326
327            // Try fallback model
328            let fallback_builder = client.chat().user("Hello");
329            client
330                .send_chat(fallback_builder)
331                .await
332                .map(|r| r.content().unwrap_or("").to_string())
333        }
334    }
335}
336
337async fn hedged_request(client: &Client) -> Result<String> {
338    use futures::future::select;
339    use std::pin::pin;
340
341    // Launch two requests in parallel
342    let request1 = async {
343        client
344            .send_chat(client.chat_simple("Hello from request 1"))
345            .await
346    };
347    let request2 = async {
348        client
349            .send_chat(client.chat_simple("Hello from request 2"))
350            .await
351    };
352
353    let fut1 = pin!(request1);
354    let fut2 = pin!(request2);
355
356    // Return first successful response
357    match select(fut1, fut2).await {
358        futures::future::Either::Left((result, _)) => {
359            println!("Request 1 completed first");
360            result.map(|r| r.content().unwrap_or("").to_string())
361        }
362        futures::future::Either::Right((result, _)) => {
363            println!("Request 2 completed first");
364            result.map(|r| r.content().unwrap_or("").to_string())
365        }
366    }
367}
More examples
Hide additional examples
examples/retry_patterns.rs (line 69)
63async fn simple_retry(client: &Client) -> Result<()> {
64    const MAX_RETRIES: u32 = 3;
65
66    for attempt in 1..=MAX_RETRIES {
67        println!("Attempt {}/{}", attempt, MAX_RETRIES);
68
69        match client.send_chat(client.chat_simple("Hello")).await {
70            Ok(response) => {
71                if let Some(content) = response.content() {
72                    println!("Success: {}", content);
73                } else {
74                    println!("Success: (no content)");
75                }
76                return Ok(());
77            }
78            Err(e) if attempt < MAX_RETRIES => {
79                println!("Failed (attempt {}): {}. Retrying...", attempt, e);
80                sleep(Duration::from_secs(1)).await;
81            }
82            Err(e) => {
83                println!("All retries exhausted");
84                return Err(e);
85            }
86        }
87    }
88
89    Ok(())
90}
91
92async fn exponential_backoff(client: &Client) -> Result<()> {
93    const MAX_RETRIES: u32 = 5;
94    const BASE_DELAY: Duration = Duration::from_millis(100);
95    const MAX_DELAY: Duration = Duration::from_secs(32);
96
97    let mut delay = BASE_DELAY;
98
99    for attempt in 1..=MAX_RETRIES {
100        match client
101            .send_chat(client.chat_simple("Hello with backoff"))
102            .await
103        {
104            Ok(response) => {
105                if let Some(content) = response.content() {
106                    println!("Success after {} attempts: {}", attempt, content);
107                } else {
108                    println!("Success after {} attempts: (no content)", attempt);
109                }
110                return Ok(());
111            }
112            Err(Error::RateLimit(_message)) => {
113                // Use default delay for rate limiting
114                let wait_time = delay;
115                println!(
116                    "Rate limited (attempt {}). Waiting {:?}...",
117                    attempt, wait_time
118                );
119                sleep(wait_time).await;
120
121                // Double the delay for next attempt
122                delay = (delay * 2).min(MAX_DELAY);
123            }
124            Err(e) if attempt < MAX_RETRIES => {
125                println!("Error (attempt {}): {}. Waiting {:?}...", attempt, e, delay);
126                sleep(delay).await;
127
128                // Exponential increase with cap
129                delay = (delay * 2).min(MAX_DELAY);
130            }
131            Err(e) => return Err(e),
132        }
133    }
134
135    Ok(())
136}
137
138async fn retry_with_jitter(client: &Client) -> Result<()> {
139    const MAX_RETRIES: u32 = 5;
140    const BASE_DELAY_MS: u64 = 100;
141
142    for attempt in 1..=MAX_RETRIES {
143        match client
144            .send_chat(client.chat_simple("Hello with jitter"))
145            .await
146        {
147            Ok(response) => {
148                if let Some(content) = response.content() {
149                    println!("Success: {}", content);
150                } else {
151                    println!("Success: (no content)");
152                }
153                return Ok(());
154            }
155            Err(e) if attempt < MAX_RETRIES => {
156                // Calculate delay with jitter using random() instead of thread_rng for Send compatibility
157                let base = BASE_DELAY_MS * 2_u64.pow(attempt - 1);
158                let jitter = rand::random::<u64>() % (base / 2 + 1);
159                let delay = Duration::from_millis(base + jitter);
160
161                println!(
162                    "Attempt {} failed: {}. Retrying in {:?} (with jitter)...",
163                    attempt, e, delay
164                );
165                sleep(delay).await;
166            }
167            Err(e) => return Err(e),
168        }
169    }
170
171    Ok(())
172}
173
174async fn circuit_breaker_example(client: &Client) -> Result<()> {
175    let circuit_breaker = Arc::new(CircuitBreaker::new(3, Duration::from_secs(5)));
176
177    for i in 1..=10 {
178        println!("Request {}: ", i);
179
180        // Check circuit state
181        match circuit_breaker
182            .call(|| async {
183                client
184                    .send_chat(client.chat_simple("Circuit breaker test"))
185                    .await
186            })
187            .await
188        {
189            Ok(response) => {
190                if let Some(content) = response.content() {
191                    println!("  Success: {}", content);
192                } else {
193                    println!("  Success: (no content)");
194                }
195            }
196            Err(CircuitBreakerError::Open) => {
197                println!("  Circuit is OPEN - skipping request");
198                sleep(Duration::from_secs(1)).await;
199            }
200            Err(CircuitBreakerError::RequestFailed(e)) => {
201                println!("  Request failed: {}", e);
202            }
203        }
204
205        // Small delay between requests
206        sleep(Duration::from_millis(500)).await;
207    }
208
209    Ok(())
210}
211
212async fn timeout_management(client: &Client) {
213    // Example 1: Per-request timeout
214    println!("Per-request timeout:");
215    match timeout(
216        Duration::from_secs(5),
217        client.send_chat(client.chat_simple("Hello")),
218    )
219    .await
220    {
221        Ok(Ok(response)) => {
222            if let Some(content) = response.content() {
223                println!("Response received: {}", content);
224            } else {
225                println!("Response received: (no content)");
226            }
227        }
228        Ok(Err(e)) => println!("API error: {}", e),
229        Err(_) => println!("Request timed out after 5 seconds"),
230    }
231
232    // Example 2: Deadline-based timeout
233    println!("\nDeadline-based timeout:");
234    let deadline = Instant::now() + Duration::from_secs(10);
235
236    while Instant::now() < deadline {
237        let remaining = deadline - Instant::now();
238        println!("Time remaining: {:?}", remaining);
239
240        match timeout(
241            remaining,
242            client.send_chat(client.chat_simple("Quick response")),
243        )
244        .await
245        {
246            Ok(Ok(response)) => {
247                if let Some(content) = response.content() {
248                    println!("Got response: {}", content);
249                } else {
250                    println!("Got response: (no content)");
251                }
252                break;
253            }
254            Ok(Err(e)) => {
255                println!("Error: {}. Retrying...", e);
256                sleep(Duration::from_secs(1)).await;
257            }
258            Err(_) => {
259                println!("Deadline exceeded");
260                break;
261            }
262        }
263    }
264
265    // Example 3: Adaptive timeout
266    println!("\nAdaptive timeout:");
267    let mut adaptive_timeout = Duration::from_secs(2);
268
269    for _attempt in 1..=3 {
270        let start = Instant::now();
271
272        match timeout(
273            adaptive_timeout,
274            client.send_chat(client.chat_simple("Adaptive")),
275        )
276        .await
277        {
278            Ok(Ok(response)) => {
279                let elapsed = start.elapsed();
280                println!(
281                    "Success in {:?}. Next timeout would be {:?}.",
282                    elapsed,
283                    elapsed * 2
284                );
285                // Adjust timeout based on actual response time for potential future requests
286                // adaptive_timeout = elapsed * 2; // Not used since we break out of the loop
287                if let Some(content) = response.content() {
288                    println!("Response: {}", content);
289                } else {
290                    println!("Response: (no content)");
291                }
292                break;
293            }
294            Ok(Err(e)) => println!("Error: {}", e),
295            Err(_) => {
296                println!(
297                    "Timeout after {:?}. Increasing for next attempt.",
298                    adaptive_timeout
299                );
300                adaptive_timeout *= 2;
301            }
302        }
303    }
304}
305
306async fn request_hedging(client: &Client) -> Result<()> {
307    use futures::future::{select, Either};
308    use std::pin::pin;
309
310    println!("Launching hedged requests...");
311
312    // Launch multiple requests with staggered starts
313    let request1 = async {
314        println!("Request 1 started");
315        client
316            .send_chat(client.chat_simple("Hedged request 1"))
317            .await
318    };
319
320    let request2 = async {
321        sleep(Duration::from_millis(200)).await;
322        println!("Request 2 started (200ms delay)");
323        client
324            .send_chat(client.chat_simple("Hedged request 2"))
325            .await
326    };
327
328    let fut1 = pin!(request1);
329    let fut2 = pin!(request2);
330
331    // Return first successful response
332    match select(fut1, fut2).await {
333        Either::Left((result, _)) => {
334            println!("Request 1 won the race");
335            result.map(|r| {
336                if let Some(content) = r.content() {
337                    println!("Result: {}", content);
338                } else {
339                    println!("Result: (no content)");
340                }
341            })
342        }
343        Either::Right((result, _)) => {
344            println!("Request 2 won the race");
345            result.map(|r| {
346                if let Some(content) = r.content() {
347                    println!("Result: {}", content);
348                } else {
349                    println!("Result: (no content)");
350                }
351            })
352        }
353    }
354}
355
356async fn fallback_chain(client: &Client) -> Result<()> {
357    // Define fallback chain
358    let strategies = vec![
359        ("GPT-4o", "gpt-4o", 1024),
360        ("GPT-4o-mini", "gpt-4o-mini", 512),
361        ("GPT-3.5", "gpt-3.5-turbo", 256),
362    ];
363
364    let prompt = "Explain quantum computing";
365
366    for (name, _model, max_tokens) in strategies {
367        println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369        let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370        match client.send_chat(builder).await {
371            Ok(response) => {
372                println!("Success with {}", name);
373                if let Some(content) = response.content() {
374                    println!("Response: {}...", &content[..content.len().min(100)]);
375                }
376                return Ok(());
377            }
378            Err(e) => {
379                println!("Failed with {}: {}", name, e);
380            }
381        }
382    }
383
384    println!("All fallback strategies exhausted");
385    Ok(())
386}
387
388async fn idempotency_example(_client: &Client) -> Result<()> {
389    // Generate idempotency key
390    let idempotency_key = generate_idempotency_key();
391    println!("Using idempotency key: {}", idempotency_key);
392
393    // Simulate retrying the same request
394    for attempt in 1..=3 {
395        println!("\nAttempt {} with same idempotency key", attempt);
396
397        // In a real implementation, you'd pass the idempotency key in headers
398        let mut headers = std::collections::HashMap::new();
399        headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400        println!("  Would send {} headers", headers.len());
401
402        let config = Config::builder()
403            .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404            .build();
405
406        // Note: Headers (including idempotency key) are not yet supported in current API
407
408        let client_with_idempotency = Client::builder(config)?.build();
409
410        match client_with_idempotency
411            .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412            .await
413        {
414            Ok(response) => {
415                if let Some(content) = response.content() {
416                    println!("Response: {}", content);
417                } else {
418                    println!("Response: (no content)");
419                }
420                // Server should return same response for same idempotency key
421            }
422            Err(e) => println!("Error: {}", e),
423        }
424
425        if attempt < 3 {
426            sleep(Duration::from_secs(1)).await;
427        }
428    }
429
430    Ok(())
431}
examples/auth_patterns.rs (line 80)
61async fn env_var_auth() -> Result<()> {
62    // Standard environment variables:
63    // - OPENAI_API_KEY: Your API key
64    // - OPENAI_ORG_ID: Optional organization ID
65    // - OPENAI_PROJECT_ID: Optional project ID
66    // - OPENAI_BASE_URL: Optional custom base URL
67
68    // Check if environment variables are set
69    if env::var("OPENAI_API_KEY").is_err() {
70        println!("Warning: OPENAI_API_KEY not set");
71        println!("Set it with: export OPENAI_API_KEY=your-key-here");
72        return Ok(());
73    }
74
75    // Create client from environment
76    let client = Client::from_env()?.build();
77    println!("Client created from environment variables");
78
79    // Test the client
80    match client.send_chat(client.chat_simple("Hello")).await {
81        Ok(response) => {
82            if let Some(content) = response.content() {
83                println!("Response: {}", content);
84            } else {
85                println!("Response: (no content)");
86            }
87        }
88        Err(e) => println!("Error: {}", e),
89    }
90
91    Ok(())
92}
93
94async fn direct_api_key() -> Result<()> {
95    // Create client with direct API key
96    let api_key = "sk-your-api-key-here"; // Replace with actual key
97    let config = Config::builder().api_key(api_key).build();
98    let client = Client::builder(config)?.build();
99
100    println!("Client created with direct API key");
101
102    // Note: This will fail with invalid key
103    match client.send_chat(client.chat_simple("Hello")).await {
104        Ok(response) => {
105            if let Some(content) = response.content() {
106                println!("Response: {}", content);
107            } else {
108                println!("Response: (no content)");
109            }
110        }
111        Err(e) => println!("Expected error with demo key: {}", e),
112    }
113
114    Ok(())
115}
examples/langfuse_simple.rs (line 63)
26async fn main() -> Result<(), Box<dyn std::error::Error>> {
27    // Initialize tracing for logging
28    tracing_subscriber::fmt()
29        .with_env_filter(
30            tracing_subscriber::EnvFilter::from_default_env()
31                .add_directive("openai_ergonomic=debug".parse()?),
32        )
33        .init();
34
35    println!(" Initializing OpenAI client with Langfuse observability...\n");
36
37    // 1. Build Langfuse exporter from environment variables
38    let exporter = ExporterBuilder::from_env()?.build()?;
39
40    // 2. Create tracer provider with batch processor
41    let provider = SdkTracerProvider::builder()
42        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
43        .build();
44
45    // Set as global provider
46    global::set_tracer_provider(provider.clone());
47
48    // 3. Get tracer and create interceptor
49    let tracer = provider.tracer("openai-ergonomic");
50    let langfuse_interceptor = LangfuseInterceptor::new(tracer, LangfuseConfig::new());
51
52    // 4. Create the OpenAI client and add the Langfuse interceptor
53    let client = Client::from_env()?
54        .with_interceptor(Box::new(langfuse_interceptor))
55        .build();
56
57    println!(" Client initialized successfully!");
58    println!(" Traces will be sent to Langfuse for monitoring\n");
59
60    // Make a simple chat completion - tracing is automatic!
61    println!(" Making a simple chat completion request...");
62    let request = client
63        .chat_simple("What is 2 + 2? Answer with just the number.")
64        .build()?;
65    let response = client.execute_chat(request).await?;
66
67    println!(" Response: {:?}", response.content());
68
69    println!("\n Done! Check your Langfuse dashboard to see the traces.");
70    println!("   - Look for traces with the operation name 'chat'");
71    println!("   - Each trace includes request/response details and token usage");
72
73    // Shutdown the tracer provider to flush all spans
74    println!("\n⏳ Flushing spans to Langfuse...");
75    provider.shutdown()?;
76
77    Ok(())
78}
examples/azure_comprehensive.rs (line 18)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/azure_openai.rs (line 52)
37async fn main() -> Result<(), Box<dyn std::error::Error>> {
38    // Initialize logging
39    tracing_subscriber::fmt::init();
40
41    println!("Azure OpenAI Integration Example");
42    println!("=================================\n");
43
44    // Example 1: Using environment variables
45    println!("Example 1: Using environment variables");
46    match Client::from_env() {
47        Ok(client) => {
48            let client = client.build();
49            println!("Client created from environment variables");
50
51            // Make a simple chat request
52            let builder = client.chat_simple("Hello from Azure OpenAI!");
53            match client.send_chat(builder).await {
54                Ok(response) => {
55                    if let Some(content) = response.content() {
56                        println!("Response: {content}");
57                    }
58                }
59                Err(e) => {
60                    println!("Error: {e}");
61                }
62            }
63        }
64        Err(e) => {
65            println!("Could not create client from environment: {e}");
66            println!("Make sure to set AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT");
67        }
68    }
69
70    println!("\n---\n");
71
72    // Example 2: Manual configuration
73    println!("Example 2: Manual configuration");
74
75    // This example shows how to configure Azure `OpenAI` programmatically.
76    // Replace these values with your actual Azure `OpenAI` resource details.
77    let config = Config::builder()
78        .api_key("your-azure-api-key")
79        .api_base("https://my-resource.openai.azure.com")
80        .azure_deployment("gpt-4")
81        .azure_api_version("2024-02-01")
82        .build();
83
84    println!("Config: {config:?}");
85    println!("Is Azure: {}", config.is_azure());
86
87    // Note: This will fail unless you provide valid credentials above
88    // Uncomment the following to test with your actual credentials:
89    /*
90    let client = Client::builder(config)?.build();
91
92    // Simple chat completion
93    let response = client
94        .chat_simple("Tell me a short joke about Azure")
95        .await?;
96    println!("Response: {}", response);
97
98    // More advanced chat with custom parameters
99    let response = client
100        .chat()
101        .user("What are the main features of Azure OpenAI?")
102        .temperature(0.7)
103        .max_tokens(500)
104        .send()
105        .await?;
106
107    println!("\nAdvanced response:");
108    println!("{}", response.content());
109
110    // Streaming example
111    use futures::StreamExt;
112
113    println!("\nStreaming example:");
114    let mut stream = client
115        .chat()
116        .user("Count from 1 to 5")
117        .stream()
118        .await?;
119
120    while let Some(chunk) = stream.next().await {
121        print!("{}", chunk?.content());
122    }
123    println!();
124    */
125
126    println!("\n---\n");
127
128    // Example 3: Key differences between `OpenAI` and Azure `OpenAI`
129    println!("Example 3: Key differences between OpenAI and Azure OpenAI");
130    println!("\nOpenAI:");
131    println!("  - Endpoint: https://api.openai.com/v1");
132    println!("  - Authentication: Bearer token in Authorization header");
133    println!("  - Model specification: Use model names like 'gpt-4', 'gpt-3.5-turbo'");
134    println!("  - Example: client.chat().model('gpt-4').send().await?\n");
135
136    println!("Azure OpenAI:");
137    println!("  - Endpoint: https://{{{{resource-name}}}}.openai.azure.com");
138    println!("  - Authentication: api-key header");
139    println!("  - Deployment specification: Use your deployment name");
140    println!("  - API version required as query parameter");
141    println!("  - Example: Configure deployment in Config, then use client normally\n");
142
143    println!("With this library, you only need to configure the endpoint and deployment,");
144    println!("and the library handles all the differences automatically!");
145
146    Ok(())
147}
Source

pub fn chat_with_system( &self, system: impl Into<String>, user: impl Into<String>, ) -> ChatCompletionBuilder

Create a chat completion with system and user messages.

Examples found in repository?
examples/azure_comprehensive.rs (lines 30-33)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
More examples
Hide additional examples
examples/quickstart.rs (lines 102-105)
37async fn main() -> Result<()> {
38    // Initialize logging to see what's happening under the hood
39    tracing_subscriber::fmt().with_env_filter("info").init();
40
41    println!(" OpenAI Ergonomic Quickstart");
42    println!("==============================\n");
43
44    // ==========================================
45    // 1. ENVIRONMENT SETUP & CLIENT CREATION
46    // ==========================================
47
48    println!(" Step 1: Setting up the client");
49
50    // The simplest way to get started - reads OPENAI_API_KEY from environment
51    let client = match Client::from_env() {
52        Ok(client_builder) => {
53            println!(" Client created successfully!");
54            client_builder.build()
55        }
56        Err(e) => {
57            eprintln!(" Failed to create client: {e}");
58            eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59            eprintln!("   Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60            return Err(e);
61        }
62    };
63
64    // ==========================================
65    // 2. BASIC CHAT COMPLETION
66    // ==========================================
67
68    println!("\n Step 2: Basic chat completion");
69
70    // The simplest way to get a response from ChatGPT
71    let builder = client.chat_simple("What is Rust programming language in one sentence?");
72    let response = client.send_chat(builder).await;
73
74    match response {
75        Ok(chat_response) => {
76            println!(" Got response!");
77            if let Some(content) = chat_response.content() {
78                println!(" AI: {content}");
79            }
80
81            // Show usage information for cost tracking
82            if let Some(usage) = &chat_response.inner().usage {
83                println!(
84                    " Usage: {} prompt + {} completion = {} total tokens",
85                    usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86                );
87            }
88        }
89        Err(e) => {
90            println!(" Chat completion failed: {e}");
91            // Continue with other examples even if this one fails
92        }
93    }
94
95    // ==========================================
96    // 3. CHAT WITH SYSTEM MESSAGE
97    // ==========================================
98
99    println!("\n Step 3: Chat with system context");
100
101    // System messages help set the AI's behavior and context
102    let builder = client.chat_with_system(
103        "You are a helpful coding mentor who explains things simply",
104        "Explain what a HashMap is in Rust",
105    );
106    let response = client.send_chat(builder).await;
107
108    match response {
109        Ok(chat_response) => {
110            println!(" Got contextual response!");
111            if let Some(content) = chat_response.content() {
112                println!("‍ Mentor: {content}");
113            }
114        }
115        Err(e) => {
116            println!(" Contextual chat failed: {e}");
117        }
118    }
119
120    // ==========================================
121    // 4. STREAMING RESPONSES
122    // ==========================================
123
124    println!("\n Step 4: Streaming response (real-time)");
125
126    // Streaming lets you see the response as it's being generated
127    // This is great for chatbots and interactive applications
128    print!(" AI is typing");
129    io::stdout().flush().unwrap();
130
131    let builder = client
132        .responses()
133        .user("Write a short haiku about programming")
134        .temperature(0.7)
135        .stream(true);
136    // Note: Full streaming implementation is in development
137    // For now, we'll demonstrate non-streaming responses with real-time simulation
138    let response = client.send_responses(builder).await;
139
140    match response {
141        Ok(chat_response) => {
142            print!(": ");
143            io::stdout().flush().unwrap();
144
145            // Simulate streaming by printing character by character
146            if let Some(content) = chat_response.content() {
147                for char in content.chars() {
148                    print!("{char}");
149                    io::stdout().flush().unwrap();
150                    // Small delay to simulate streaming
151                    tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152                }
153            }
154            println!(); // New line after "streaming"
155        }
156        Err(e) => {
157            println!("\n Failed to get streaming response: {e}");
158        }
159    }
160
161    // ==========================================
162    // 5. FUNCTION/TOOL CALLING
163    // ==========================================
164
165    println!("\n Step 5: Using tools/functions");
166
167    // Tools let the AI call external functions to get real data
168    // Here we define a weather function as an example
169    let weather_tool = tool_function(
170        "get_current_weather",
171        "Get the current weather for a given location",
172        json!({
173            "type": "object",
174            "properties": {
175                "location": {
176                    "type": "string",
177                    "description": "The city name, e.g. 'San Francisco, CA'"
178                },
179                "unit": {
180                    "type": "string",
181                    "enum": ["celsius", "fahrenheit"],
182                    "description": "Temperature unit"
183                }
184            },
185            "required": ["location"]
186        }),
187    );
188
189    let builder = client
190        .responses()
191        .user("What's the weather like in Tokyo?")
192        .tool(weather_tool);
193    let response = client.send_responses(builder).await;
194
195    match response {
196        Ok(chat_response) => {
197            println!(" Got response with potential tool calls!");
198
199            // Check if the AI wants to call our weather function
200            let tool_calls = chat_response.tool_calls();
201            if !tool_calls.is_empty() {
202                println!(" AI requested tool calls:");
203                for tool_call in tool_calls {
204                    let function_name = tool_call.function_name();
205                    println!("   Function: {function_name}");
206                    let function_args = tool_call.function_arguments();
207                    println!("   Arguments: {function_args}");
208
209                    // In a real app, you'd execute the function here
210                    // and send the result back to the AI
211                    println!("    In a real app, you'd call your weather API here");
212                }
213            } else if let Some(content) = chat_response.content() {
214                println!(" AI: {content}");
215            }
216        }
217        Err(e) => {
218            println!(" Tool calling example failed: {e}");
219        }
220    }
221
222    // ==========================================
223    // 6. ERROR HANDLING PATTERNS
224    // ==========================================
225
226    println!("\n Step 6: Error handling patterns");
227
228    // Show how to handle different types of errors gracefully
229    let builder = client.chat_simple(""); // Empty message might cause an error
230    let bad_response = client.send_chat(builder).await;
231
232    match bad_response {
233        Ok(response) => {
234            println!(" Unexpectedly succeeded with empty message");
235            if let Some(content) = response.content() {
236                println!(" AI: {content}");
237            }
238        }
239        Err(Error::Api {
240            status, message, ..
241        }) => {
242            println!(" API Error (HTTP {status}):");
243            println!("   Message: {message}");
244            println!(" This is normal - we sent an invalid request");
245        }
246        Err(Error::RateLimit { .. }) => {
247            println!(" Rate limited - you're sending requests too fast");
248            println!(" In a real app, you'd implement exponential backoff");
249        }
250        Err(Error::Http(_)) => {
251            println!(" HTTP/Network error");
252            println!(" Check your internet connection and API key");
253        }
254        Err(e) => {
255            println!(" Other error: {e}");
256        }
257    }
258
259    // ==========================================
260    // 7. COMPLETE REAL-WORLD EXAMPLE
261    // ==========================================
262
263    println!("\n Step 7: Complete real-world example");
264    println!("Building a simple AI assistant that can:");
265    println!("- Answer questions with context");
266    println!("- Track conversation costs");
267    println!("- Handle errors gracefully");
268
269    let mut total_tokens = 0;
270
271    // Simulate a conversation with context and cost tracking
272    let questions = [
273        "What is the capital of France?",
274        "What's special about that city?",
275        "How many people live there?",
276    ];
277
278    for (i, question) in questions.iter().enumerate() {
279        println!("\n User: {question}");
280
281        let builder = client
282            .responses()
283            .system(
284                "You are a knowledgeable geography expert. Keep answers concise but informative.",
285            )
286            .user(*question)
287            .temperature(0.1); // Lower temperature for more factual responses
288        let response = client.send_responses(builder).await;
289
290        match response {
291            Ok(chat_response) => {
292                if let Some(content) = chat_response.content() {
293                    println!(" Assistant: {content}");
294                }
295
296                // Track token usage for cost monitoring
297                if let Some(usage) = chat_response.usage() {
298                    total_tokens += usage.total_tokens;
299                    println!(
300                        " This exchange: {} tokens (Running total: {})",
301                        usage.total_tokens, total_tokens
302                    );
303                }
304            }
305            Err(e) => {
306                println!(" Question {} failed: {}", i + 1, e);
307                // In a real app, you might retry or log this error
308            }
309        }
310    }
311
312    // ==========================================
313    // 8. WRAP UP & NEXT STEPS
314    // ==========================================
315
316    println!("\n Quickstart Complete!");
317    println!("======================");
318    println!("You've successfully:");
319    println!(" Created an OpenAI client");
320    println!(" Made basic chat completions");
321    println!(" Used streaming responses");
322    println!(" Implemented tool/function calling");
323    println!(" Handled errors gracefully");
324    println!(" Built a complete conversational AI");
325    println!("\n Total tokens used in examples: {total_tokens}");
326    println!(
327        " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328        f64::from(total_tokens) * 0.03 / 1000.0
329    );
330
331    println!("\n Next Steps:");
332    println!("- Check out other examples in the examples/ directory");
333    println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334    println!("- Explore advanced features like vision, audio, and assistants");
335    println!("- Build your own AI-powered applications!");
336
337    Ok(())
338}
Source

pub async fn execute_chat( &self, request: CreateChatCompletionRequest, ) -> Result<ChatCompletionResponseWrapper>

Execute a chat completion request.

Examples found in repository?
examples/langfuse_simple.rs (line 65)
26async fn main() -> Result<(), Box<dyn std::error::Error>> {
27    // Initialize tracing for logging
28    tracing_subscriber::fmt()
29        .with_env_filter(
30            tracing_subscriber::EnvFilter::from_default_env()
31                .add_directive("openai_ergonomic=debug".parse()?),
32        )
33        .init();
34
35    println!(" Initializing OpenAI client with Langfuse observability...\n");
36
37    // 1. Build Langfuse exporter from environment variables
38    let exporter = ExporterBuilder::from_env()?.build()?;
39
40    // 2. Create tracer provider with batch processor
41    let provider = SdkTracerProvider::builder()
42        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
43        .build();
44
45    // Set as global provider
46    global::set_tracer_provider(provider.clone());
47
48    // 3. Get tracer and create interceptor
49    let tracer = provider.tracer("openai-ergonomic");
50    let langfuse_interceptor = LangfuseInterceptor::new(tracer, LangfuseConfig::new());
51
52    // 4. Create the OpenAI client and add the Langfuse interceptor
53    let client = Client::from_env()?
54        .with_interceptor(Box::new(langfuse_interceptor))
55        .build();
56
57    println!(" Client initialized successfully!");
58    println!(" Traces will be sent to Langfuse for monitoring\n");
59
60    // Make a simple chat completion - tracing is automatic!
61    println!(" Making a simple chat completion request...");
62    let request = client
63        .chat_simple("What is 2 + 2? Answer with just the number.")
64        .build()?;
65    let response = client.execute_chat(request).await?;
66
67    println!(" Response: {:?}", response.content());
68
69    println!("\n Done! Check your Langfuse dashboard to see the traces.");
70    println!("   - Look for traces with the operation name 'chat'");
71    println!("   - Each trace includes request/response details and token usage");
72
73    // Shutdown the tracer provider to flush all spans
74    println!("\n⏳ Flushing spans to Langfuse...");
75    provider.shutdown()?;
76
77    Ok(())
78}
More examples
Hide additional examples
examples/langfuse.rs (line 71)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub async fn send_chat( &self, builder: ChatCompletionBuilder, ) -> Result<ChatCompletionResponseWrapper>

Execute a chat completion builder.

Examples found in repository?
examples/error_handling.rs (line 66)
57async fn basic_error_handling() {
58    let client = match Client::from_env() {
59        Ok(client_builder) => client_builder.build(),
60        Err(e) => {
61            println!("Failed to create client: {}", e);
62            return;
63        }
64    };
65
66    match client.send_chat(client.chat_simple("Hello")).await {
67        Ok(response) => {
68            if let Some(content) = response.content() {
69                println!("Success: {}", content);
70            } else {
71                println!("Success: (no content)");
72            }
73        }
74        Err(e) => println!("Error: {}", e),
75    }
76}
77
78async fn pattern_matching_errors() {
79    let Ok(client_builder) = Client::from_env() else {
80        return;
81    };
82    let client = client_builder.build();
83
84    // Simulate various errors by using invalid parameters
85    let builder = client.chat().user("test");
86    let result = client.send_chat(builder).await;
87
88    match result {
89        Ok(_) => println!("Unexpected success"),
90        Err(e) => match e {
91            Error::Api { message, .. } => {
92                println!("API Error: {}", message);
93            }
94            Error::RateLimit(message) => {
95                println!("Rate limited: {}", message);
96            }
97            Error::Authentication(message) => {
98                println!("Authentication failed: {}", message);
99            }
100            Error::Http(source) => {
101                println!("Network error: {}", source);
102            }
103            Error::Json(source) => {
104                println!("Serialization error: {}", source);
105            }
106            Error::Stream(message) => {
107                println!("Stream error: {}", message);
108            }
109            Error::InvalidRequest(message) => {
110                println!("Invalid request: {}", message);
111            }
112            Error::Config(message) => {
113                println!("Configuration error: {}", message);
114            }
115            _ => {
116                println!("Other error: {}", e);
117            }
118        },
119    }
120}
121
122async fn rate_limit_handling() {
123    const MAX_RETRIES: u32 = 3;
124
125    let Ok(client_builder) = Client::from_env() else {
126        return;
127    };
128    let client = client_builder.build();
129
130    // Retry logic for rate limiting
131    let mut retries = 0;
132
133    loop {
134        match client.send_chat(client.chat_simple("Hello")).await {
135            Ok(response) => {
136                if let Some(content) = response.content() {
137                    println!("Success: {}", content);
138                } else {
139                    println!("Success: (no content)");
140                }
141                break;
142            }
143            Err(Error::RateLimit(_message)) => {
144                if retries >= MAX_RETRIES {
145                    println!("Max retries exceeded");
146                    break;
147                }
148
149                let wait_time = Duration::from_secs(1);
150                println!("Rate limited. Waiting {:?} before retry...", wait_time);
151                sleep(wait_time).await;
152                retries += 1;
153            }
154            Err(e) => {
155                println!("Other error: {}", e);
156                break;
157            }
158        }
159    }
160}
161
162async fn token_limit_handling() {
163    let Ok(client_builder) = Client::from_env() else {
164        return;
165    };
166    let client = client_builder.build();
167
168    // Generate a very long prompt that might exceed token limits
169    let long_text = "Lorem ipsum ".repeat(10000);
170
171    match client.send_chat(client.chat_simple(&long_text)).await {
172        Ok(_) => println!("Processed long text successfully"),
173        Err(Error::InvalidRequest(message)) if message.contains("token") => {
174            println!("Token limit issue: {}", message);
175
176            // Retry with truncated text
177            let truncated = &long_text[..1000];
178            println!("Retrying with truncated text...");
179
180            match client.send_chat(client.chat_simple(truncated)).await {
181                Ok(response) => {
182                    if let Some(content) = response.content() {
183                        println!("Success with truncated: {}", content);
184                    } else {
185                        println!("Success with truncated: (no content)");
186                    }
187                }
188                Err(e) => println!("Still failed: {}", e),
189            }
190        }
191        Err(e) => println!("Other error: {}", e),
192    }
193}
194
195async fn auth_error_handling() -> Result<()> {
196    // Try with invalid API key
197    let config = Config::builder().api_key("invalid-api-key").build();
198    let invalid_client = Client::builder(config)?.build();
199
200    match invalid_client
201        .send_chat(invalid_client.chat_simple("Hello"))
202        .await
203    {
204        Ok(_) => println!("Unexpected success"),
205        Err(Error::Authentication(message)) => {
206            println!("Authentication failed as expected: {}", message);
207
208            // Suggest remediation
209            println!("Suggestions:");
210            println!("1. Check your OPENAI_API_KEY environment variable");
211            println!("2. Verify API key at https://platform.openai.com/api-keys");
212            println!("3. Ensure your API key has necessary permissions");
213        }
214        Err(e) => println!("Unexpected error type: {}", e),
215    }
216
217    Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221    use openai_ergonomic::Config;
222    use reqwest_middleware::ClientBuilder;
223
224    // Create a reqwest client with very short timeout to simulate network issues
225    let reqwest_client = reqwest::Client::builder()
226        .timeout(Duration::from_secs(1))
227        .build()
228        .expect("Failed to build reqwest client");
229
230    let http_client = ClientBuilder::new(reqwest_client).build();
231
232    let config = Config::builder()
233        .api_key("test-key")
234        .http_client(http_client)
235        .build();
236
237    let client = Client::builder(config)?.build();
238
239    match client.send_chat(client.chat_simple("Hello")).await {
240        Ok(_) => println!("Unexpected success"),
241        Err(Error::Http(source)) => {
242            println!("Network error as expected: {}", source);
243
244            // Implement exponential backoff
245            let mut backoff = Duration::from_millis(100);
246            for attempt in 1..=3 {
247                println!("Retry attempt {} after {:?}", attempt, backoff);
248                sleep(backoff).await;
249                backoff *= 2;
250
251                // In real scenario, retry with proper timeout
252                // match client.send_chat(client.chat_simple("Hello")).await { ... }
253            }
254        }
255        Err(e) => println!("Other error: {}", e),
256    }
257
258    Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262    let client = Client::from_env()?.build();
263
264    // Wrap errors with custom context
265    let result = client
266        .send_chat(client.chat_simple("Analyze this data"))
267        .await
268        .map_err(|e| {
269            eprintln!("Context: Failed during data analysis task");
270            eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271            eprintln!("Original error: {}", e);
272            e
273        })?;
274
275    if let Some(content) = result.content() {
276        println!("Result: {}", content);
277    } else {
278        println!("Result: (no content)");
279    }
280    Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284    let client = Client::from_env()?.build();
285
286    // Strategy 1: Fallback to simpler model
287    let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288    println!("Fallback strategy result: {}", result);
289
290    // Strategy 2: Circuit breaker pattern
291    let circuit_breaker = CircuitBreaker::new();
292    if circuit_breaker.is_open() {
293        println!("Circuit breaker is open, skipping API calls");
294        return Ok(());
295    }
296
297    match client.send_chat(client.chat_simple("Test")).await {
298        Ok(response) => {
299            circuit_breaker.record_success();
300            if let Some(content) = response.content() {
301                println!("Circuit breaker success: {}", content);
302            } else {
303                println!("Circuit breaker success: (no content)");
304            }
305        }
306        Err(e) => {
307            circuit_breaker.record_failure();
308            println!("Circuit breaker failure: {}", e);
309        }
310    }
311
312    // Strategy 3: Request hedging (parallel requests with first success wins)
313    let hedge_result = hedged_request(&client).await?;
314    println!("Hedged request result: {}", hedge_result);
315
316    Ok(())
317}
318
319async fn try_with_fallback(client: &Client, primary: &str, _fallback: &str) -> Result<String> {
320    // Try primary model first
321    let builder = client.chat().user("Hello");
322    match client.send_chat(builder).await {
323        Ok(response) => Ok(response.content().unwrap_or("").to_string()),
324        Err(e) => {
325            println!("Primary model failed ({}): {}, trying fallback", primary, e);
326
327            // Try fallback model
328            let fallback_builder = client.chat().user("Hello");
329            client
330                .send_chat(fallback_builder)
331                .await
332                .map(|r| r.content().unwrap_or("").to_string())
333        }
334    }
335}
336
337async fn hedged_request(client: &Client) -> Result<String> {
338    use futures::future::select;
339    use std::pin::pin;
340
341    // Launch two requests in parallel
342    let request1 = async {
343        client
344            .send_chat(client.chat_simple("Hello from request 1"))
345            .await
346    };
347    let request2 = async {
348        client
349            .send_chat(client.chat_simple("Hello from request 2"))
350            .await
351    };
352
353    let fut1 = pin!(request1);
354    let fut2 = pin!(request2);
355
356    // Return first successful response
357    match select(fut1, fut2).await {
358        futures::future::Either::Left((result, _)) => {
359            println!("Request 1 completed first");
360            result.map(|r| r.content().unwrap_or("").to_string())
361        }
362        futures::future::Either::Right((result, _)) => {
363            println!("Request 2 completed first");
364            result.map(|r| r.content().unwrap_or("").to_string())
365        }
366    }
367}
More examples
Hide additional examples
examples/tool_calling.rs (line 133)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129    let builder = client
130        .chat()
131        .user("What's the weather like in San Francisco?")
132        .tools(vec![get_weather_tool()]);
133    let response = client.send_chat(builder).await?;
134
135    // Check for tool calls
136    let tool_calls = response.tool_calls();
137    if !tool_calls.is_empty() {
138        for tool_call in tool_calls {
139            println!("Tool called: {}", tool_call.function_name());
140            println!("Arguments: {}", tool_call.function_arguments());
141
142            // Execute the function
143            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144            let result = execute_weather_function(params)?;
145            println!("Function result: {}", result);
146        }
147    }
148
149    Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153    let builder = client
154        .chat()
155        .user("What's the weather in NYC and what time is it there?")
156        .tools(vec![get_weather_tool(), get_time_tool()]);
157    let response = client.send_chat(builder).await?;
158
159    for tool_call in response.tool_calls() {
160        match tool_call.function_name() {
161            "get_weather" => {
162                let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163                let result = execute_weather_function(params)?;
164                println!("Weather result: {}", result);
165            }
166            "get_current_time" => {
167                let params: serde_json::Value =
168                    serde_json::from_str(tool_call.function_arguments())?;
169                if let Some(timezone) = params["timezone"].as_str() {
170                    let result = execute_time_function(timezone);
171                    println!("Time result: {}", result);
172                }
173            }
174            _ => println!("Unknown tool: {}", tool_call.function_name()),
175        }
176    }
177
178    Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182    // Force specific tool
183    println!("Forcing weather tool:");
184    let builder = client
185        .chat()
186        .user("Tell me about Paris")
187        .tools(vec![get_weather_tool(), get_time_tool()])
188        .tool_choice(ToolChoiceHelper::specific("get_weather"));
189    let response = client.send_chat(builder).await?;
190
191    for tool_call in response.tool_calls() {
192        println!("Forced tool: {}", tool_call.function_name());
193    }
194
195    // Disable tools
196    println!("\nDisabling tools:");
197    let builder = client
198        .chat()
199        .user("What's the weather?")
200        .tools(vec![get_weather_tool()])
201        .tool_choice(ToolChoiceHelper::none());
202    let response = client.send_chat(builder).await?;
203
204    if let Some(content) = response.content() {
205        println!("Response without tools: {}", content);
206    }
207
208    Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212    // This example demonstrates proper multi-turn tool calling with full message history
213
214    println!("=== Conversation with Tools (Full Implementation) ===");
215
216    // Initialize the conversation
217    let mut builder = client
218        .chat()
219        .user("What's the weather in Tokyo?")
220        .tools(vec![get_weather_tool()]);
221
222    // First request - the model will call the tool
223    let response = client.send_chat(builder.clone()).await?;
224
225    // Check for tool calls
226    let tool_calls = response.tool_calls();
227    if !tool_calls.is_empty() {
228        println!("Step 1: Model requests tool call");
229        for tool_call in &tool_calls {
230            println!("  Tool: {}", tool_call.function_name());
231            println!("  Args: {}", tool_call.function_arguments());
232        }
233
234        // IMPORTANT: Add the assistant's response (with tool calls) to the history
235        // This is the key step for maintaining proper conversation context!
236        builder = builder.assistant_with_tool_calls(
237            response.content().unwrap_or(""),
238            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239        );
240
241        // Execute the tools and add results
242        println!("\nStep 2: Execute tools and add results to conversation");
243        for tool_call in tool_calls {
244            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245            let result = execute_weather_function(params)?;
246            println!("  Tool result: {}", result);
247
248            // Add the tool result to the conversation history
249            builder = builder.tool(tool_call.id(), result);
250        }
251
252        // Send the follow-up request with tool results
253        println!("\nStep 3: Send follow-up request with tool results");
254        let final_response = client
255            .send_chat(builder.tools(vec![get_weather_tool()]))
256            .await?;
257
258        if let Some(content) = final_response.content() {
259            println!("  Final assistant response: {}", content);
260        }
261    }
262
263    println!("\nNote: This demonstrates the complete tool calling loop with proper");
264    println!("message history management using assistant_with_tool_calls()");
265
266    Ok(())
267}
268
269fn streaming_with_tools(_client: &Client) {
270    println!("Streaming response with tools:");
271
272    // Note: Streaming with tool calls is more complex and requires
273    // proper handling of partial tool call chunks. For now, this is
274    // a placeholder showing the concept.
275
276    println!("This would demonstrate streaming tool calls if streaming API was available");
277    println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
278}
279
280async fn parallel_tool_calls(client: &Client) -> Result<()> {
281    let builder = client
282        .chat()
283        .user("Check the weather in Tokyo, London, and New York")
284        .tools(vec![get_weather_tool()]);
285    let response = client.send_chat(builder).await?;
286
287    // Modern models can call multiple tools in parallel
288    let tool_calls = response.tool_calls();
289    println!("Parallel tool calls: {}", tool_calls.len());
290
291    // Collect arguments first to avoid lifetime issues
292    let args_vec: Vec<String> = tool_calls
293        .iter()
294        .map(|tc| tc.function_arguments().to_string())
295        .collect();
296
297    // Execute all in parallel using tokio
298    let mut handles = Vec::new();
299    for args in args_vec {
300        let handle = tokio::spawn(async move {
301            let params: WeatherParams = serde_json::from_str(&args)?;
302            execute_weather_function(params)
303        });
304        handles.push(handle);
305    }
306
307    // Wait for all results
308    for (i, handle) in handles.into_iter().enumerate() {
309        match handle.await {
310            Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
311            Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
312            Err(e) => println!("Task {} panicked: {}", i + 1, e),
313        }
314    }
315
316    Ok(())
317}
examples/retry_patterns.rs (line 69)
63async fn simple_retry(client: &Client) -> Result<()> {
64    const MAX_RETRIES: u32 = 3;
65
66    for attempt in 1..=MAX_RETRIES {
67        println!("Attempt {}/{}", attempt, MAX_RETRIES);
68
69        match client.send_chat(client.chat_simple("Hello")).await {
70            Ok(response) => {
71                if let Some(content) = response.content() {
72                    println!("Success: {}", content);
73                } else {
74                    println!("Success: (no content)");
75                }
76                return Ok(());
77            }
78            Err(e) if attempt < MAX_RETRIES => {
79                println!("Failed (attempt {}): {}. Retrying...", attempt, e);
80                sleep(Duration::from_secs(1)).await;
81            }
82            Err(e) => {
83                println!("All retries exhausted");
84                return Err(e);
85            }
86        }
87    }
88
89    Ok(())
90}
91
92async fn exponential_backoff(client: &Client) -> Result<()> {
93    const MAX_RETRIES: u32 = 5;
94    const BASE_DELAY: Duration = Duration::from_millis(100);
95    const MAX_DELAY: Duration = Duration::from_secs(32);
96
97    let mut delay = BASE_DELAY;
98
99    for attempt in 1..=MAX_RETRIES {
100        match client
101            .send_chat(client.chat_simple("Hello with backoff"))
102            .await
103        {
104            Ok(response) => {
105                if let Some(content) = response.content() {
106                    println!("Success after {} attempts: {}", attempt, content);
107                } else {
108                    println!("Success after {} attempts: (no content)", attempt);
109                }
110                return Ok(());
111            }
112            Err(Error::RateLimit(_message)) => {
113                // Use default delay for rate limiting
114                let wait_time = delay;
115                println!(
116                    "Rate limited (attempt {}). Waiting {:?}...",
117                    attempt, wait_time
118                );
119                sleep(wait_time).await;
120
121                // Double the delay for next attempt
122                delay = (delay * 2).min(MAX_DELAY);
123            }
124            Err(e) if attempt < MAX_RETRIES => {
125                println!("Error (attempt {}): {}. Waiting {:?}...", attempt, e, delay);
126                sleep(delay).await;
127
128                // Exponential increase with cap
129                delay = (delay * 2).min(MAX_DELAY);
130            }
131            Err(e) => return Err(e),
132        }
133    }
134
135    Ok(())
136}
137
138async fn retry_with_jitter(client: &Client) -> Result<()> {
139    const MAX_RETRIES: u32 = 5;
140    const BASE_DELAY_MS: u64 = 100;
141
142    for attempt in 1..=MAX_RETRIES {
143        match client
144            .send_chat(client.chat_simple("Hello with jitter"))
145            .await
146        {
147            Ok(response) => {
148                if let Some(content) = response.content() {
149                    println!("Success: {}", content);
150                } else {
151                    println!("Success: (no content)");
152                }
153                return Ok(());
154            }
155            Err(e) if attempt < MAX_RETRIES => {
156                // Calculate delay with jitter using random() instead of thread_rng for Send compatibility
157                let base = BASE_DELAY_MS * 2_u64.pow(attempt - 1);
158                let jitter = rand::random::<u64>() % (base / 2 + 1);
159                let delay = Duration::from_millis(base + jitter);
160
161                println!(
162                    "Attempt {} failed: {}. Retrying in {:?} (with jitter)...",
163                    attempt, e, delay
164                );
165                sleep(delay).await;
166            }
167            Err(e) => return Err(e),
168        }
169    }
170
171    Ok(())
172}
173
174async fn circuit_breaker_example(client: &Client) -> Result<()> {
175    let circuit_breaker = Arc::new(CircuitBreaker::new(3, Duration::from_secs(5)));
176
177    for i in 1..=10 {
178        println!("Request {}: ", i);
179
180        // Check circuit state
181        match circuit_breaker
182            .call(|| async {
183                client
184                    .send_chat(client.chat_simple("Circuit breaker test"))
185                    .await
186            })
187            .await
188        {
189            Ok(response) => {
190                if let Some(content) = response.content() {
191                    println!("  Success: {}", content);
192                } else {
193                    println!("  Success: (no content)");
194                }
195            }
196            Err(CircuitBreakerError::Open) => {
197                println!("  Circuit is OPEN - skipping request");
198                sleep(Duration::from_secs(1)).await;
199            }
200            Err(CircuitBreakerError::RequestFailed(e)) => {
201                println!("  Request failed: {}", e);
202            }
203        }
204
205        // Small delay between requests
206        sleep(Duration::from_millis(500)).await;
207    }
208
209    Ok(())
210}
211
212async fn timeout_management(client: &Client) {
213    // Example 1: Per-request timeout
214    println!("Per-request timeout:");
215    match timeout(
216        Duration::from_secs(5),
217        client.send_chat(client.chat_simple("Hello")),
218    )
219    .await
220    {
221        Ok(Ok(response)) => {
222            if let Some(content) = response.content() {
223                println!("Response received: {}", content);
224            } else {
225                println!("Response received: (no content)");
226            }
227        }
228        Ok(Err(e)) => println!("API error: {}", e),
229        Err(_) => println!("Request timed out after 5 seconds"),
230    }
231
232    // Example 2: Deadline-based timeout
233    println!("\nDeadline-based timeout:");
234    let deadline = Instant::now() + Duration::from_secs(10);
235
236    while Instant::now() < deadline {
237        let remaining = deadline - Instant::now();
238        println!("Time remaining: {:?}", remaining);
239
240        match timeout(
241            remaining,
242            client.send_chat(client.chat_simple("Quick response")),
243        )
244        .await
245        {
246            Ok(Ok(response)) => {
247                if let Some(content) = response.content() {
248                    println!("Got response: {}", content);
249                } else {
250                    println!("Got response: (no content)");
251                }
252                break;
253            }
254            Ok(Err(e)) => {
255                println!("Error: {}. Retrying...", e);
256                sleep(Duration::from_secs(1)).await;
257            }
258            Err(_) => {
259                println!("Deadline exceeded");
260                break;
261            }
262        }
263    }
264
265    // Example 3: Adaptive timeout
266    println!("\nAdaptive timeout:");
267    let mut adaptive_timeout = Duration::from_secs(2);
268
269    for _attempt in 1..=3 {
270        let start = Instant::now();
271
272        match timeout(
273            adaptive_timeout,
274            client.send_chat(client.chat_simple("Adaptive")),
275        )
276        .await
277        {
278            Ok(Ok(response)) => {
279                let elapsed = start.elapsed();
280                println!(
281                    "Success in {:?}. Next timeout would be {:?}.",
282                    elapsed,
283                    elapsed * 2
284                );
285                // Adjust timeout based on actual response time for potential future requests
286                // adaptive_timeout = elapsed * 2; // Not used since we break out of the loop
287                if let Some(content) = response.content() {
288                    println!("Response: {}", content);
289                } else {
290                    println!("Response: (no content)");
291                }
292                break;
293            }
294            Ok(Err(e)) => println!("Error: {}", e),
295            Err(_) => {
296                println!(
297                    "Timeout after {:?}. Increasing for next attempt.",
298                    adaptive_timeout
299                );
300                adaptive_timeout *= 2;
301            }
302        }
303    }
304}
305
306async fn request_hedging(client: &Client) -> Result<()> {
307    use futures::future::{select, Either};
308    use std::pin::pin;
309
310    println!("Launching hedged requests...");
311
312    // Launch multiple requests with staggered starts
313    let request1 = async {
314        println!("Request 1 started");
315        client
316            .send_chat(client.chat_simple("Hedged request 1"))
317            .await
318    };
319
320    let request2 = async {
321        sleep(Duration::from_millis(200)).await;
322        println!("Request 2 started (200ms delay)");
323        client
324            .send_chat(client.chat_simple("Hedged request 2"))
325            .await
326    };
327
328    let fut1 = pin!(request1);
329    let fut2 = pin!(request2);
330
331    // Return first successful response
332    match select(fut1, fut2).await {
333        Either::Left((result, _)) => {
334            println!("Request 1 won the race");
335            result.map(|r| {
336                if let Some(content) = r.content() {
337                    println!("Result: {}", content);
338                } else {
339                    println!("Result: (no content)");
340                }
341            })
342        }
343        Either::Right((result, _)) => {
344            println!("Request 2 won the race");
345            result.map(|r| {
346                if let Some(content) = r.content() {
347                    println!("Result: {}", content);
348                } else {
349                    println!("Result: (no content)");
350                }
351            })
352        }
353    }
354}
355
356async fn fallback_chain(client: &Client) -> Result<()> {
357    // Define fallback chain
358    let strategies = vec![
359        ("GPT-4o", "gpt-4o", 1024),
360        ("GPT-4o-mini", "gpt-4o-mini", 512),
361        ("GPT-3.5", "gpt-3.5-turbo", 256),
362    ];
363
364    let prompt = "Explain quantum computing";
365
366    for (name, _model, max_tokens) in strategies {
367        println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369        let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370        match client.send_chat(builder).await {
371            Ok(response) => {
372                println!("Success with {}", name);
373                if let Some(content) = response.content() {
374                    println!("Response: {}...", &content[..content.len().min(100)]);
375                }
376                return Ok(());
377            }
378            Err(e) => {
379                println!("Failed with {}: {}", name, e);
380            }
381        }
382    }
383
384    println!("All fallback strategies exhausted");
385    Ok(())
386}
387
388async fn idempotency_example(_client: &Client) -> Result<()> {
389    // Generate idempotency key
390    let idempotency_key = generate_idempotency_key();
391    println!("Using idempotency key: {}", idempotency_key);
392
393    // Simulate retrying the same request
394    for attempt in 1..=3 {
395        println!("\nAttempt {} with same idempotency key", attempt);
396
397        // In a real implementation, you'd pass the idempotency key in headers
398        let mut headers = std::collections::HashMap::new();
399        headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400        println!("  Would send {} headers", headers.len());
401
402        let config = Config::builder()
403            .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404            .build();
405
406        // Note: Headers (including idempotency key) are not yet supported in current API
407
408        let client_with_idempotency = Client::builder(config)?.build();
409
410        match client_with_idempotency
411            .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412            .await
413        {
414            Ok(response) => {
415                if let Some(content) = response.content() {
416                    println!("Response: {}", content);
417                } else {
418                    println!("Response: (no content)");
419                }
420                // Server should return same response for same idempotency key
421            }
422            Err(e) => println!("Error: {}", e),
423        }
424
425        if attempt < 3 {
426            sleep(Duration::from_secs(1)).await;
427        }
428    }
429
430    Ok(())
431}
examples/auth_patterns.rs (line 80)
61async fn env_var_auth() -> Result<()> {
62    // Standard environment variables:
63    // - OPENAI_API_KEY: Your API key
64    // - OPENAI_ORG_ID: Optional organization ID
65    // - OPENAI_PROJECT_ID: Optional project ID
66    // - OPENAI_BASE_URL: Optional custom base URL
67
68    // Check if environment variables are set
69    if env::var("OPENAI_API_KEY").is_err() {
70        println!("Warning: OPENAI_API_KEY not set");
71        println!("Set it with: export OPENAI_API_KEY=your-key-here");
72        return Ok(());
73    }
74
75    // Create client from environment
76    let client = Client::from_env()?.build();
77    println!("Client created from environment variables");
78
79    // Test the client
80    match client.send_chat(client.chat_simple("Hello")).await {
81        Ok(response) => {
82            if let Some(content) = response.content() {
83                println!("Response: {}", content);
84            } else {
85                println!("Response: (no content)");
86            }
87        }
88        Err(e) => println!("Error: {}", e),
89    }
90
91    Ok(())
92}
93
94async fn direct_api_key() -> Result<()> {
95    // Create client with direct API key
96    let api_key = "sk-your-api-key-here"; // Replace with actual key
97    let config = Config::builder().api_key(api_key).build();
98    let client = Client::builder(config)?.build();
99
100    println!("Client created with direct API key");
101
102    // Note: This will fail with invalid key
103    match client.send_chat(client.chat_simple("Hello")).await {
104        Ok(response) => {
105            if let Some(content) = response.content() {
106                println!("Response: {}", content);
107            } else {
108                println!("Response: (no content)");
109            }
110        }
111        Err(e) => println!("Expected error with demo key: {}", e),
112    }
113
114    Ok(())
115}
examples/models.rs (line 188)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160    // Task-specific model recommendations
161    let task_models = vec![
162        ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163        ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164        ("Code generation", "gpt-4o", "Excellent code understanding"),
165        ("Vision tasks", "gpt-4o", "Native vision support"),
166        (
167            "Quick responses",
168            "gpt-4o-mini",
169            "Low latency, good quality",
170        ),
171        (
172            "Bulk processing",
173            "gpt-3.5-turbo",
174            "Best cost/performance ratio",
175        ),
176    ];
177
178    for (task, model, reason) in task_models {
179        println!("Task: {}", task);
180        println!("  Recommended: {}", model);
181        println!("  Reason: {}", reason);
182
183        // Demo the model
184        let builder = client
185            .chat()
186            .user(format!("Say 'Hello from {}'", model))
187            .max_completion_tokens(10);
188        let response = client.send_chat(builder).await?;
189
190        if let Some(content) = response.content() {
191            println!("  Response: {}\n", content);
192        }
193    }
194
195    Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199    let models = get_model_registry();
200    let test_prompt = "Explain the theory of relativity in one sentence";
201    let estimated_input_tokens = 15;
202    let estimated_output_tokens = 50;
203
204    println!("Cost comparison for same task:");
205    println!("Prompt: '{}'\n", test_prompt);
206
207    let mut costs = Vec::new();
208
209    for (name, info) in &models {
210        if !info.deprecated {
211            let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212            let output_cost =
213                (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214            let total_cost = input_cost + output_cost;
215
216            costs.push((name.clone(), total_cost));
217        }
218    }
219
220    costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222    println!("{:<20} {:>15}", "Model", "Estimated Cost");
223    println!("{:-<35}", "");
224    for (model, cost) in costs {
225        println!("{:<20} ${:>14.6}", model, cost);
226    }
227
228    // Demonstrate cheapest vs best
229    println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230    let builder = client.chat().user(test_prompt);
231    let cheap_response = client.send_chat(builder).await?;
232
233    if let Some(content) = cheap_response.content() {
234        println!("Response: {}", content);
235    }
236
237    Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241    use std::time::Instant;
242
243    let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244    let test_prompt = "Write a haiku about programming";
245
246    println!("Performance comparison:");
247    println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248    println!("{:-<45}", "");
249
250    for model in models_to_test {
251        let start = Instant::now();
252
253        let builder = client.chat().user(test_prompt);
254        let response = client.send_chat(builder).await?;
255
256        let elapsed = start.elapsed();
257
258        if let Some(usage) = response.usage() {
259            let total_tokens = f64::from(usage.total_tokens);
260            let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262            println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263        }
264    }
265
266    Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270    // Handle deprecated model migration
271    let deprecated_mappings = HashMap::from([
272        ("text-davinci-003", "gpt-3.5-turbo"),
273        ("gpt-4-32k", "gpt-4o"),
274        ("gpt-4-vision-preview", "gpt-4o"),
275    ]);
276
277    let requested_model = "text-davinci-003"; // Deprecated model
278
279    if let Some(replacement) = deprecated_mappings.get(requested_model) {
280        println!(
281            "Warning: {} is deprecated. Using {} instead.",
282            requested_model, replacement
283        );
284
285        let builder = client.chat().user("Hello from migrated model");
286        let response = client.send_chat(builder).await?;
287
288        if let Some(content) = response.content() {
289            println!("Response from {}: {}", replacement, content);
290        }
291    }
292
293    Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297    // Select model based on runtime conditions
298
299    #[derive(Debug)]
300    struct RequestContext {
301        urgency: Urgency,
302        complexity: Complexity,
303        budget: Budget,
304        needs_vision: bool,
305    }
306
307    #[derive(Debug)]
308    enum Urgency {
309        Low,
310        Medium,
311        High,
312    }
313
314    #[derive(Debug)]
315    enum Complexity {
316        Simple,
317        Moderate,
318        Complex,
319    }
320
321    #[derive(Debug)]
322    enum Budget {
323        Tight,
324        Normal,
325        Flexible,
326    }
327
328    const fn select_model(ctx: &RequestContext) -> &'static str {
329        match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330            // High urgency + simple = fast cheap model, or tight budget = cheapest
331            (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333            // Complex + flexible budget = best model
334            (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336            // Vision required
337            _ if ctx.needs_vision => "gpt-4o",
338
339            // Default balanced choice
340            _ => "gpt-4o-mini",
341        }
342    }
343
344    // Example contexts
345    let contexts = [
346        RequestContext {
347            urgency: Urgency::High,
348            complexity: Complexity::Simple,
349            budget: Budget::Tight,
350            needs_vision: false,
351        },
352        RequestContext {
353            urgency: Urgency::Low,
354            complexity: Complexity::Complex,
355            budget: Budget::Flexible,
356            needs_vision: false,
357        },
358        RequestContext {
359            urgency: Urgency::Medium,
360            complexity: Complexity::Moderate,
361            budget: Budget::Normal,
362            needs_vision: true,
363        },
364    ];
365
366    for (i, ctx) in contexts.iter().enumerate() {
367        let model = select_model(ctx);
368        println!("Context {}: {:?}", i + 1, ctx);
369        println!("  Selected model: {}", model);
370
371        let builder = client
372            .chat()
373            .user(format!("Hello from dynamically selected {}", model))
374            .max_completion_tokens(20);
375        let response = client.send_chat(builder).await?;
376
377        if let Some(content) = response.content() {
378            println!("  Response: {}\n", content);
379        }
380    }
381
382    Ok(())
383}
examples/vision_chat.rs (line 93)
72async fn demonstrate_basic_image_analysis(
73    client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75    println!("  Example 1: Basic Image Analysis");
76    println!("----------------------------------");
77
78    let image_url = SAMPLE_IMAGE_URLS[0];
79    let question = "What do you see in this image? Please describe it in detail.";
80
81    println!("Image URL: {image_url}");
82    println!("Question: {question}");
83    print!("Assistant: ");
84    io::stdout().flush()?;
85
86    // Use the convenient user_with_image_url method
87    let chat_builder = client
88        .chat()
89        .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90        .user_with_image_url(question, image_url)
91        .temperature(0.3);
92
93    let response = client.send_chat(chat_builder).await?;
94
95    if let Some(content) = response.content() {
96        println!("{content}");
97
98        // Show usage information
99        if let Some(usage) = response.usage() {
100            println!("\n Token usage:");
101            println!("  Prompt tokens: {}", usage.prompt_tokens);
102            println!("  Completion tokens: {}", usage.completion_tokens);
103            println!("  Total tokens: {}", usage.total_tokens);
104        }
105    } else {
106        println!("No response content received");
107    }
108
109    println!();
110    Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115    println!(" Example 2: Multiple Image Analysis");
116    println!("---------------------------------------");
117
118    let question = "Compare these two images. What are the differences and similarities?";
119
120    println!("Question: {question}");
121    println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122    println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123    print!("Assistant: ");
124    io::stdout().flush()?;
125
126    // Create message parts manually for multiple images
127    let parts = vec![
128        text_part(question),
129        image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130        image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131    ];
132
133    let chat_builder = client
134        .chat()
135        .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136        .user_with_parts(parts)
137        .temperature(0.4);
138
139    let response = client.send_chat(chat_builder).await?;
140
141    if let Some(content) = response.content() {
142        println!("{content}");
143    } else {
144        println!("No response content received");
145    }
146
147    println!();
148    Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191    println!(" Example 4: Base64 Image Analysis");
192    println!("-----------------------------------");
193
194    let question = "What is this image? It's very small, what can you tell about it?";
195
196    println!("Question: {question}");
197    println!("Image: Small test image encoded as base64");
198    print!("Assistant: ");
199    io::stdout().flush()?;
200
201    // Create message parts with base64 image
202    let parts = vec![
203        text_part(question),
204        image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205    ];
206
207    let chat_builder = client
208        .chat()
209        .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210        .user_with_parts(parts)
211        .temperature(0.3);
212
213    let response = client.send_chat(chat_builder).await?;
214
215    if let Some(content) = response.content() {
216        println!("{content}");
217    } else {
218        println!("No response content received");
219    }
220
221    println!();
222    Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297    println!("  Example 6: Error Handling Patterns");
298    println!("------------------------------------");
299
300    println!("Testing various error scenarios...\n");
301
302    // Test 1: Invalid image URL
303    println!("Test 1: Invalid image URL");
304    let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306    let invalid_builder = client
307        .chat()
308        .user_with_image_url("What do you see?", invalid_url)
309        .temperature(0.3);
310
311    match client.send_chat(invalid_builder).await {
312        Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313        Err(e) => match &e {
314            Error::Api {
315                status, message, ..
316            } => {
317                println!(" API properly rejected invalid URL ({status}): {message}");
318            }
319            Error::Http(reqwest_err) => {
320                println!(" HTTP error caught: {reqwest_err}");
321            }
322            Error::InvalidRequest(msg) => {
323                println!(" Validation caught invalid URL: {msg}");
324            }
325            _ => {
326                println!("ℹ  Other error type: {e}");
327            }
328        },
329    }
330
331    // Test 2: Empty message with image
332    println!("\nTest 2: Empty text with image");
333    let empty_text_builder = client
334        .chat()
335        .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336        .temperature(0.3);
337
338    match client.send_chat(empty_text_builder).await {
339        Ok(response) => {
340            if let Some(content) = response.content() {
341                println!(
342                    " API handled empty text gracefully: {}",
343                    content.chars().take(50).collect::<String>()
344                );
345            }
346        }
347        Err(e) => {
348            println!("ℹ  Empty text error: {e}");
349        }
350    }
351
352    // Test 3: Malformed base64 data
353    println!("\nTest 3: Malformed base64 image data");
354    let malformed_base64 = "this-is-not-valid-base64!@#$%";
355    let malformed_parts = vec![
356        text_part("What is this?"),
357        image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358    ];
359
360    let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362    match client.send_chat(malformed_builder).await {
363        Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364        Err(e) => match &e {
365            Error::Api {
366                status, message, ..
367            } => {
368                println!(" API properly rejected malformed base64 ({status}): {message}");
369            }
370            _ => {
371                println!("ℹ  Other error for malformed base64: {e}");
372            }
373        },
374    }
375
376    println!("\n  Error handling patterns demonstrated:");
377    println!("  • Invalid image URL handling");
378    println!("  • Empty text with image handling");
379    println!("  • Malformed base64 data validation");
380    println!("  • API error classification");
381    println!("  • Network error handling");
382
383    println!();
384    Ok(())
385}
Source

pub async fn send_chat_stream( &self, builder: ChatCompletionBuilder, ) -> Result<BoxedChatStream>

Send a chat completion request with streaming enabled.

Returns a stream of chat completion chunks as they are generated. This allows for real-time display of the model’s response.

§Example
use openai_ergonomic::Client;
use futures::StreamExt;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = Client::from_env()?.build();

    let builder = client.chat().user("Tell me a story");
    let mut stream = client.send_chat_stream(builder).await?;

    while let Some(chunk) = stream.next().await {
        let chunk = chunk?;
        if let Some(content) = chunk.content() {
            print!("{}", content);
        }
    }

    Ok(())
}
Examples found in repository?
examples/chat_streaming.rs (line 54)
49async fn basic_streaming(client: &Client) -> Result<()> {
50    println!("Question: Tell me a short joke");
51
52    let builder = client.chat().user("Tell me a short joke");
53
54    let mut stream = client.send_chat_stream(builder).await?;
55
56    print!("Response: ");
57    while let Some(chunk) = stream.next().await {
58        let chunk = chunk?;
59        if let Some(content) = chunk.content() {
60            print!("{}", content);
61        }
62    }
63    println!();
64
65    Ok(())
66}
67
68async fn streaming_with_parameters(client: &Client) -> Result<()> {
69    println!("Question: Write a creative tagline for a bakery");
70
71    let builder = client
72        .chat()
73        .user("Write a creative tagline for a bakery")
74        .temperature(0.9)
75        .max_tokens(50);
76
77    let mut stream = client.send_chat_stream(builder).await?;
78
79    print!("Response: ");
80    while let Some(chunk) = stream.next().await {
81        let chunk = chunk?;
82        if let Some(content) = chunk.content() {
83            print!("{}", content);
84        }
85    }
86    println!();
87
88    Ok(())
89}
90
91async fn collect_content(client: &Client) -> Result<()> {
92    println!("Question: What is the capital of France?");
93
94    let builder = client.chat().user("What is the capital of France?");
95
96    let mut stream = client.send_chat_stream(builder).await?;
97
98    // Manually collect all content
99    let mut content = String::new();
100    while let Some(chunk) = stream.next().await {
101        let chunk = chunk?;
102        if let Some(text) = chunk.content() {
103            content.push_str(text);
104        }
105    }
106    println!("Full response: {}", content);
107
108    Ok(())
109}
110
111async fn streaming_with_system(client: &Client) -> Result<()> {
112    println!("System: You are a helpful assistant that speaks like a pirate");
113    println!("Question: Tell me about the weather");
114
115    let builder = client
116        .chat()
117        .system("You are a helpful assistant that speaks like a pirate")
118        .user("Tell me about the weather")
119        .max_tokens(100);
120
121    let mut stream = client.send_chat_stream(builder).await?;
122
123    print!("Response: ");
124    while let Some(chunk) = stream.next().await {
125        let chunk = chunk?;
126        if let Some(content) = chunk.content() {
127            print!("{}", content);
128        }
129    }
130    println!();
131
132    Ok(())
133}
134
135async fn multiple_turns(client: &Client) -> Result<()> {
136    println!("Building a conversation with multiple turns...\n");
137
138    // First turn
139    println!("User: What is 2+2?");
140    let builder = client.chat().user("What is 2+2?");
141
142    let mut stream = client.send_chat_stream(builder).await?;
143
144    print!("Assistant: ");
145    let mut first_response = String::new();
146    while let Some(chunk) = stream.next().await {
147        let chunk = chunk?;
148        if let Some(content) = chunk.content() {
149            print!("{}", content);
150            first_response.push_str(content);
151        }
152    }
153    println!();
154
155    // Second turn - continuing the conversation
156    println!("\nUser: Now multiply that by 3");
157    let builder = client
158        .chat()
159        .user("What is 2+2?")
160        .assistant(&first_response)
161        .user("Now multiply that by 3");
162
163    let mut stream = client.send_chat_stream(builder).await?;
164
165    print!("Assistant: ");
166    while let Some(chunk) = stream.next().await {
167        let chunk = chunk?;
168        if let Some(content) = chunk.content() {
169            print!("{}", content);
170        }
171    }
172    println!();
173
174    Ok(())
175}
More examples
Hide additional examples
examples/langfuse_streaming.rs (line 99)
94async fn basic_streaming(client: &Client<LangfuseState<Span>>) -> Result<()> {
95    println!("Question: Tell me a short joke");
96
97    let builder = client.chat().user("Tell me a short joke");
98
99    let mut stream = client.send_chat_stream(builder).await?;
100
101    print!("Response: ");
102    let mut chunk_count = 0;
103    while let Some(chunk) = stream.next().await {
104        let chunk = chunk?;
105        if let Some(content) = chunk.content() {
106            print!("{}", content);
107            chunk_count += 1;
108        }
109    }
110    println!(
111        "\n(Received {} chunks, all traced to Langfuse)",
112        chunk_count
113    );
114
115    Ok(())
116}
117
118async fn streaming_with_parameters(client: &Client<LangfuseState<Span>>) -> Result<()> {
119    println!("Question: Write a creative tagline for a bakery");
120
121    let builder = client
122        .chat()
123        .user("Write a creative tagline for a bakery")
124        .temperature(0.9)
125        .max_tokens(50);
126
127    let mut stream = client.send_chat_stream(builder).await?;
128
129    print!("Response: ");
130    let mut chunk_count = 0;
131    while let Some(chunk) = stream.next().await {
132        let chunk = chunk?;
133        if let Some(content) = chunk.content() {
134            print!("{}", content);
135            chunk_count += 1;
136        }
137    }
138    println!(
139        "\n(Received {} chunks, all traced to Langfuse)",
140        chunk_count
141    );
142
143    Ok(())
144}
145
146async fn collect_content(client: &Client<LangfuseState<Span>>) -> Result<()> {
147    println!("Question: What is the capital of France?");
148
149    let builder = client.chat().user("What is the capital of France?");
150
151    let mut stream = client.send_chat_stream(builder).await?;
152
153    // Manually collect content (interceptor hooks are still called for each chunk)
154    let mut content = String::new();
155    while let Some(chunk) = stream.next().await {
156        let chunk = chunk?;
157        if let Some(text) = chunk.content() {
158            content.push_str(text);
159        }
160    }
161    println!("Full response: {}", content);
162    println!("(All chunks were traced to Langfuse during collection)");
163
164    Ok(())
165}
Source§

impl<T: Default + Send + Sync + 'static> Client<T>

Source

pub fn responses(&self) -> ResponsesBuilder

Create a responses builder for structured outputs.

Examples found in repository?
examples/responses_comprehensive.rs (line 123)
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119    println!("Creating a basic response with system context...");
120
121    // Build a simple request with system and user messages
122    let builder = client
123        .responses()
124        .system("You are a helpful assistant who provides concise, accurate answers.")
125        .user("What is the capital of France?")
126        .temperature(0.7)
127        .max_completion_tokens(100);
128
129    let response = client.send_responses(builder).await?;
130
131    // Extract and display the response
132    if let Some(content) = response.content() {
133        println!(" Assistant: {content}");
134    } else {
135        println!("  No content in response");
136    }
137
138    // Show response metadata
139    println!(" Response metadata:");
140    println!("   - Model: {}", response.model().unwrap_or("unknown"));
141    println!(
142        "   - Finish reason: {}",
143        response
144            .finish_reason()
145            .unwrap_or_else(|| "unknown".to_string())
146    );
147
148    if let Some(usage) = response.usage() {
149        println!(
150            "   - Tokens used: {} prompt + {} completion = {} total",
151            usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152        );
153    }
154
155    Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160    println!("Setting up function calling with custom tools...");
161
162    // Define a weather function tool
163    let weather_tool = tool_function(
164        "get_weather",
165        "Get the current weather information for a specific location",
166        json!({
167            "type": "object",
168            "properties": {
169                "location": {
170                    "type": "string",
171                    "description": "The city name, e.g., 'San Francisco, CA'"
172                },
173                "unit": {
174                    "type": "string",
175                    "enum": ["celsius", "fahrenheit"],
176                    "description": "Temperature unit preference"
177                }
178            },
179            "required": ["location"],
180            "additionalProperties": false
181        }),
182    );
183
184    // Define a time function tool
185    let time_tool = tool_function(
186        "get_current_time",
187        "Get the current time in a specific timezone",
188        json!({
189            "type": "object",
190            "properties": {
191                "timezone": {
192                    "type": "string",
193                    "description": "Timezone name, e.g., 'America/New_York'"
194                }
195            },
196            "required": ["timezone"],
197            "additionalProperties": false
198        }),
199    );
200
201    // Make a request that should trigger function calling
202    let builder = client
203        .responses()
204        .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205        .user("What's the weather like in London and what time is it there?")
206        .tool(weather_tool)
207        .tool(time_tool)
208        .tool_choice(ToolChoiceHelper::auto())
209        .temperature(0.3);
210
211    let response = client.send_responses(builder).await?;
212
213    // Check if the model wants to call functions
214    let tool_calls = response.tool_calls();
215    if !tool_calls.is_empty() {
216        println!(" Model requested {} tool call(s):", tool_calls.len());
217
218        for (i, tool_call) in tool_calls.iter().enumerate() {
219            println!("   {}. Function: {}", i + 1, tool_call.function_name());
220            println!("      Arguments: {}", tool_call.function_arguments());
221
222            // In a real application, you would:
223            // 1. Parse the arguments
224            // 2. Execute the actual function
225            // 3. Send the results back to the model
226            println!("      [Simulated] Executing function call...");
227            match tool_call.function_name() {
228                "get_weather" => {
229                    println!("      [Simulated] Weather: 22°C, partly cloudy");
230                }
231                "get_current_time" => {
232                    println!("      [Simulated] Time: 14:30 GMT");
233                }
234                _ => {
235                    println!("      [Simulated] Unknown function");
236                }
237            }
238        }
239    } else if let Some(content) = response.content() {
240        println!(" Assistant response: {content}");
241    }
242
243    Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248    println!("Demonstrating web search tool integration...");
249
250    // Create a web search tool
251    let web_search_tool = tool_web_search();
252
253    // Ask a question that would benefit from current information
254    let builder = client
255        .responses()
256        .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257        .user("What are the latest developments in artificial intelligence this week?")
258        .tool(web_search_tool)
259        .tool_choice(ToolChoiceHelper::auto())
260        .temperature(0.3)
261        .max_completion_tokens(200);
262
263    let response = client.send_responses(builder).await?;
264
265    // Handle the response
266    let tool_calls = response.tool_calls();
267    if !tool_calls.is_empty() {
268        println!(" Model requested web search:");
269
270        for tool_call in &tool_calls {
271            if tool_call.function_name() == "web_search" {
272                println!("   Search query: {}", tool_call.function_arguments());
273                println!("   [Simulated] Performing web search...");
274                println!("   [Simulated] Found recent AI news and developments");
275
276                // In a real implementation:
277                // 1. Parse the search query from arguments
278                // 2. Perform actual web search
279                // 3. Return results to the model
280                // 4. Get final response with search results
281            }
282        }
283    } else if let Some(content) = response.content() {
284        println!(" Assistant response: {content}");
285    }
286
287    println!(" Note: Web search requires additional implementation to execute actual searches");
288
289    Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294    println!("Demonstrating structured JSON outputs...");
295
296    // Define a schema for recipe information
297    let recipe_schema = json!({
298        "type": "object",
299        "properties": {
300            "name": {
301                "type": "string",
302                "description": "Name of the recipe"
303            },
304            "ingredients": {
305                "type": "array",
306                "items": {
307                    "type": "object",
308                    "properties": {
309                        "name": {
310                            "type": "string",
311                            "description": "Ingredient name"
312                        },
313                        "amount": {
314                            "type": "string",
315                            "description": "Amount needed"
316                        }
317                    },
318                    "required": ["name", "amount"],
319                    "additionalProperties": false
320                },
321                "description": "List of ingredients"
322            },
323            "instructions": {
324                "type": "array",
325                "items": {
326                    "type": "string"
327                },
328                "description": "Step-by-step cooking instructions"
329            },
330            "prep_time_minutes": {
331                "type": "integer",
332                "description": "Preparation time in minutes"
333            },
334            "difficulty": {
335                "type": "string",
336                "enum": ["easy", "medium", "hard"],
337                "description": "Recipe difficulty level"
338            }
339        },
340        "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341        "additionalProperties": false
342    });
343
344    // Request a recipe in structured JSON format
345    let builder = client
346        .responses()
347        .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348        .user("Give me a simple recipe for chocolate chip cookies")
349        .json_schema("recipe", recipe_schema)
350        .temperature(0.5);
351
352    let response = client.send_responses(builder).await?;
353
354    if let Some(content) = response.content() {
355        println!(" Structured recipe output:");
356
357        // Try to parse and pretty-print the JSON
358        match serde_json::from_str::<serde_json::Value>(content) {
359            Ok(json) => {
360                println!("{}", serde_json::to_string_pretty(&json)?);
361            }
362            Err(_) => {
363                println!("Raw response: {content}");
364            }
365        }
366    }
367
368    // Example of simple JSON mode (without schema)
369    println!("\n Simple JSON mode example:");
370    let simple_builder = client
371        .responses()
372        .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373        .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374        .json_mode()
375        .temperature(0.3);
376
377    let simple_response = client.send_responses(simple_builder).await?;
378
379    if let Some(content) = simple_response.content() {
380        println!(" Analysis result: {content}");
381    }
382
383    Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388    println!("Demonstrating advanced response configuration...");
389
390    // Example with multiple completions and various parameters
391    let builder = client
392        .responses()
393        .system("You are a creative writing assistant. Write in different styles when asked.")
394        .user("Write a short tagline for a futuristic coffee shop")
395        .temperature(0.9)  // High creativity
396        .max_completion_tokens(50)
397        .n(1)  // Generate 1 completion
398        .top_p(0.9)
399        .frequency_penalty(0.1)
400        .presence_penalty(0.1)
401        .stop(vec!["\n".to_string(), ".".to_string()])
402        .seed(42)  // For reproducible results
403        .user_id("example_user_123");
404
405    let response = client.send_responses(builder).await?;
406
407    println!(" Creative tagline generation:");
408    if let Some(content) = response.content() {
409        println!("   Result: {content}");
410    }
411
412    // Example with reasoning effort (for o3 models)
413    println!("\n Example with reasoning effort (o3 models):");
414    let reasoning_builder = client
415        .responses()
416        .system("You are a logic puzzle solver. Think through problems step by step.")
417        .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418        .reasoning_effort("medium")
419        .temperature(0.1); // Low temperature for accuracy
420
421    let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423    if let Some(content) = reasoning_response.content() {
424        println!("   Solution: {content}");
425    } else {
426        println!("   Note: Reasoning effort requires compatible model (e.g., o3)");
427    }
428
429    // Show model information
430    println!("\n Model and usage information:");
431    println!("   Model used: {}", response.model().unwrap_or("unknown"));
432    if let Some(usage) = response.usage() {
433        println!(
434            "   Token usage: {} total ({} prompt + {} completion)",
435            usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436        );
437    }
438
439    Ok(())
440}
More examples
Hide additional examples
examples/responses_streaming.rs (line 233)
222async fn example_basic_streaming() -> Result<()> {
223    println!("=== Basic Streaming Example ===");
224
225    // Note: This is a conceptual example since actual streaming
226    // requires integration with openai-client-base streaming API
227    println!("Creating client and streaming request...");
228
229    let client = Client::from_env()?.build();
230
231    // Build a streaming request
232    let _streaming_request = client
233        .responses()
234        .user("Tell me a short story about a robot learning to paint")
235        .stream(true)
236        .temperature(0.7)
237        .max_completion_tokens(500);
238
239    println!("Streaming request configured:");
240    println!("- Model: Default (gpt-4)");
241    println!("- Stream: true");
242    println!("- Temperature: 0.7");
243    println!("- Max tokens: 500");
244
245    // Simulate streaming chunks for demonstration
246    let sample_chunks = vec![
247        "Once", " upon", " a", " time,", " there", " was", " a", " little", " robot", " named",
248        " Pixel", "...",
249    ];
250
251    println!("\nSimulated streaming output:");
252    print!("> ");
253    for chunk in sample_chunks {
254        print!("{chunk}");
255        std::io::Write::flush(&mut std::io::stdout()).unwrap();
256        tokio::time::sleep(Duration::from_millis(100)).await;
257    }
258    println!("\n");
259
260    Ok(())
261}
262
263/// Demonstrates advanced streaming with buffer management
264async fn example_buffered_streaming() -> Result<()> {
265    println!("=== Buffered Streaming Example ===");
266
267    let mut buffer = StreamBuffer::new(1024); // 1KB buffer
268
269    // Simulate incoming chunks
270    let chunks = [
271        "The robot's optical sensors",
272        " detected the vibrant colors",
273        " of the sunset painting",
274        " hanging in the gallery.",
275        " For the first time,",
276        " Pixel felt something",
277        " that could only be",
278        " described as wonder.",
279    ];
280
281    println!("Processing chunks with buffer management:");
282
283    for (i, chunk) in chunks.iter().enumerate() {
284        // Add chunk to buffer
285        buffer.append(chunk)?;
286
287        println!(
288            "Chunk {}: '{}' (Buffer: {:.1}% full)",
289            i + 1,
290            chunk,
291            buffer.utilization()
292        );
293
294        // Check if buffer is getting full
295        if buffer.is_high_water() {
296            println!("    Buffer high water mark reached, consider processing");
297
298            // In a real application, you might:
299            // 1. Process the current content
300            // 2. Send to downstream consumers
301            // 3. Compact the buffer
302            buffer.compact(100); // Keep last 100 chars for context
303            println!("   Buffer compacted to {:.1}%", buffer.utilization());
304        }
305
306        tokio::time::sleep(Duration::from_millis(50)).await;
307    }
308
309    println!(
310        "\nFinal content length: {} characters",
311        buffer.content().len()
312    );
313    println!(
314        "Final content: \"{}...\"",
315        &buffer.content()[..buffer.content().len().min(50)]
316    );
317
318    Ok(())
319}
320
321/// Demonstrates error handling patterns for streaming
322fn example_streaming_error_handling() {
323    println!("=== Streaming Error Handling Example ===");
324
325    // Simulate various error conditions that can occur during streaming
326    println!("Demonstrating common streaming error scenarios:");
327
328    // 1. Connection errors
329    println!("\n1. Connection Error Simulation:");
330    let connection_result: Result<()> = Err(Error::StreamConnection {
331        message: "Connection lost to streaming endpoint".to_string(),
332    });
333
334    match connection_result {
335        Err(Error::StreamConnection { message }) => {
336            println!("    Connection error handled: {message}");
337            println!("    Would implement retry logic here");
338        }
339        _ => unreachable!(),
340    }
341
342    // 2. Parsing errors
343    println!("\n2. Parse Error Simulation:");
344    let malformed_chunk = "data: {invalid json}";
345    match StreamChunk::parse(malformed_chunk) {
346        Err(Error::StreamParsing { message, chunk }) => {
347            println!("    Parse error handled: {message}");
348            println!("    Problematic chunk: {chunk}");
349            println!("    Would skip chunk and continue");
350        }
351        _ => println!("    Chunk parsed successfully"),
352    }
353
354    // 3. Buffer overflow
355    println!("\n3. Buffer Overflow Simulation:");
356    let mut small_buffer = StreamBuffer::new(10); // Very small buffer
357    let large_chunk = "This chunk is definitely too large for our tiny buffer";
358
359    match small_buffer.append(large_chunk) {
360        Err(Error::StreamBuffer { message }) => {
361            println!("    Buffer error handled: {message}");
362            println!("    Would implement buffer resizing or chunking");
363        }
364        Ok(()) => println!("    Content added to buffer"),
365        Err(e) => println!("    Unexpected error: {e}"),
366    }
367
368    // 4. Timeout handling
369    println!("\n4. Timeout Handling:");
370    println!("   ⏱  Would implement timeout for stream chunks");
371    println!("    Would retry or fail gracefully on timeout");
372}
373
374/// Demonstrates tool calling in streaming responses
375async fn example_streaming_tool_calls() -> Result<()> {
376    println!("=== Streaming Tool Calls Example ===");
377
378    let client = Client::from_env()?.build();
379
380    // Create a tool for getting weather information
381    let weather_tool = openai_ergonomic::responses::tool_function(
382        "get_weather",
383        "Get current weather for a location",
384        serde_json::json!({
385            "type": "object",
386            "properties": {
387                "location": {
388                    "type": "string",
389                    "description": "City name"
390                }
391            },
392            "required": ["location"]
393        }),
394    );
395
396    // Build streaming request with tools
397    let _tool_request = client
398        .responses()
399        .user("What's the weather like in San Francisco?")
400        .tool(weather_tool)
401        .stream(true);
402
403    println!("Streaming tool call request configured:");
404    println!("- Tool: get_weather function");
405    println!("- Streaming: enabled");
406
407    // Simulate streaming tool call chunks
408    println!("\nSimulated streaming tool call:");
409
410    let tool_chunks = [
411        r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_123","type":"function","function":{"name":"get_weather"}}]}}]}"#,
412        r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{"}}]}}]}"#,
413        r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"location\""}}]}}]}"#,
414        r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":":"}}]}}]}"#,
415        r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"San Francisco\""}}]}}]}"#,
416        r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]}}]}"#,
417    ];
418
419    let mut tool_call_buffer = String::new();
420
421    for (i, chunk_data) in tool_chunks.iter().enumerate() {
422        let chunk_line = format!("data: {chunk_data}");
423
424        if let Some(chunk) = StreamChunk::parse(&chunk_line)? {
425            if chunk.has_tool_call() {
426                println!("Chunk {}: Tool call data received", i + 1);
427
428                // In a real implementation, you'd accumulate tool call arguments
429                if let Some(tool_data) = &chunk.tool_call_delta {
430                    if let Some(args) = tool_data["function"]["arguments"].as_str() {
431                        tool_call_buffer.push_str(args);
432                        println!("  Arguments so far: {tool_call_buffer}");
433                    }
434                }
435            }
436        }
437
438        tokio::time::sleep(Duration::from_millis(100)).await;
439    }
440
441    println!("\n Complete tool call arguments: {tool_call_buffer}");
442    println!(" Would now execute get_weather(location='San Francisco')");
443
444    Ok(())
445}
examples/structured_outputs.rs (line 136)
132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133    println!("Using simple JSON mode for basic structure enforcement...");
134
135    let builder = client
136        .responses()
137        .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138        .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139        .json_mode()
140        .temperature(0.3)
141        .max_completion_tokens(200);
142
143    let response = client.send_responses(builder).await?;
144
145    if let Some(content) = response.content() {
146        println!(" JSON Analysis Result:");
147
148        // Try to parse and pretty-print the JSON
149        match serde_json::from_str::<serde_json::Value>(content) {
150            Ok(json) => {
151                println!("{}", serde_json::to_string_pretty(&json)?);
152
153                // Demonstrate accessing specific fields
154                if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155                    println!("\n Extracted sentiment: {sentiment}");
156                }
157                if let Some(confidence) = json
158                    .get("confidence_score")
159                    .and_then(serde_json::Value::as_f64)
160                {
161                    println!(" Confidence score: {confidence:.2}");
162                }
163            }
164            Err(e) => {
165                println!("  Failed to parse JSON: {e}");
166                println!("Raw response: {content}");
167            }
168        }
169    }
170
171    Ok(())
172}
173
174/// Example 2: Data extraction with schema validation
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176    println!("Extracting structured data from unstructured text using JSON schema...");
177
178    // Define schema for extracting contact information
179    let contact_schema = json!({
180        "type": "object",
181        "properties": {
182            "contacts": {
183                "type": "array",
184                "items": {
185                    "type": "object",
186                    "properties": {
187                        "name": {
188                            "type": "string",
189                            "description": "Full name of the person"
190                        },
191                        "email": {
192                            "type": "string",
193                            "format": "email",
194                            "description": "Email address"
195                        },
196                        "phone": {
197                            "type": "string",
198                            "description": "Phone number"
199                        },
200                        "company": {
201                            "type": "string",
202                            "description": "Company or organization"
203                        },
204                        "role": {
205                            "type": "string",
206                            "description": "Job title or role"
207                        }
208                    },
209                    "required": ["name"],
210                    "additionalProperties": false
211                }
212            },
213            "total_contacts": {
214                "type": "integer",
215                "description": "Total number of contacts extracted"
216            }
217        },
218        "required": ["contacts", "total_contacts"],
219        "additionalProperties": false
220    });
221
222    let unstructured_text =
223        "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224        For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225        Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227    let builder = client
228        .responses()
229        .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230        .user(format!("Extract contact information from this text: {unstructured_text}"))
231        .json_schema("contact_extraction", contact_schema)
232        .temperature(0.1); // Low temperature for accuracy
233
234    let response = client.send_responses(builder).await?;
235
236    if let Some(content) = response.content() {
237        println!(" Extracted Contact Information:");
238
239        match serde_json::from_str::<serde_json::Value>(content) {
240            Ok(json) => {
241                println!("{}", serde_json::to_string_pretty(&json)?);
242
243                // Demonstrate accessing the structured data
244                if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245                    println!("\n Summary: Found {} contact(s)", contacts.len());
246                    for (i, contact) in contacts.iter().enumerate() {
247                        if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248                            println!("   {}. {name}", i + 1);
249                            if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250                                println!("       {email}");
251                            }
252                            if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253                                println!("       {company}");
254                            }
255                        }
256                    }
257                }
258            }
259            Err(e) => {
260                println!("  Failed to parse JSON: {e}");
261                println!("Raw response: {content}");
262            }
263        }
264    }
265
266    Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272    println!("Creating complex nested structure for event planning...");
273
274    // Define a comprehensive event schema
275    let event_schema = json!({
276        "type": "object",
277        "properties": {
278            "event": {
279                "type": "object",
280                "properties": {
281                    "name": {
282                        "type": "string",
283                        "description": "Event name"
284                    },
285                    "type": {
286                        "type": "string",
287                        "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288                        "description": "Type of event"
289                    },
290                    "date": {
291                        "type": "string",
292                        "format": "date",
293                        "description": "Event date in YYYY-MM-DD format"
294                    },
295                    "duration_hours": {
296                        "type": "number",
297                        "minimum": 0.5,
298                        "maximum": 24,
299                        "description": "Duration in hours"
300                    },
301                    "venue": {
302                        "type": "object",
303                        "properties": {
304                            "name": {
305                                "type": "string",
306                                "description": "Venue name"
307                            },
308                            "address": {
309                                "type": "string",
310                                "description": "Venue address"
311                            },
312                            "capacity": {
313                                "type": "integer",
314                                "minimum": 1,
315                                "description": "Maximum capacity"
316                            },
317                            "amenities": {
318                                "type": "array",
319                                "items": {
320                                    "type": "string",
321                                    "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322                                },
323                                "description": "Available amenities"
324                            }
325                        },
326                        "required": ["name", "capacity"],
327                        "additionalProperties": false
328                    },
329                    "agenda": {
330                        "type": "array",
331                        "items": {
332                            "type": "object",
333                            "properties": {
334                                "time": {
335                                    "type": "string",
336                                    "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337                                    "description": "Time in HH:MM format"
338                                },
339                                "activity": {
340                                    "type": "string",
341                                    "description": "Activity description"
342                                },
343                                "speaker": {
344                                    "type": "string",
345                                    "description": "Speaker name"
346                                },
347                                "duration_minutes": {
348                                    "type": "integer",
349                                    "minimum": 15,
350                                    "maximum": 480,
351                                    "description": "Activity duration in minutes"
352                                }
353                            },
354                            "required": ["time", "activity", "duration_minutes"],
355                            "additionalProperties": false
356                        }
357                    },
358                    "estimated_cost": {
359                        "type": "object",
360                        "properties": {
361                            "venue": {
362                                "type": "number",
363                                "minimum": 0,
364                                "description": "Venue cost in USD"
365                            },
366                            "catering": {
367                                "type": "number",
368                                "minimum": 0,
369                                "description": "Catering cost in USD"
370                            },
371                            "equipment": {
372                                "type": "number",
373                                "minimum": 0,
374                                "description": "Equipment cost in USD"
375                            },
376                            "total": {
377                                "type": "number",
378                                "minimum": 0,
379                                "description": "Total estimated cost in USD"
380                            }
381                        },
382                        "required": ["total"],
383                        "additionalProperties": false
384                    }
385                },
386                "required": ["name", "type", "date", "duration_hours", "venue"],
387                "additionalProperties": false
388            }
389        },
390        "required": ["event"],
391        "additionalProperties": false
392    });
393
394    let builder = client
395        .responses()
396        .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397        .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398        .json_schema("event_plan", event_schema)
399        .temperature(0.5);
400
401    let response = client.send_responses(builder).await?;
402
403    if let Some(content) = response.content() {
404        println!(" Event Plan:");
405
406        match serde_json::from_str::<serde_json::Value>(content) {
407            Ok(json) => {
408                println!("{}", serde_json::to_string_pretty(&json)?);
409
410                // Extract and display key information
411                if let Some(event) = json.get("event") {
412                    if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413                        println!("\n Event: {name}");
414                    }
415                    if let Some(venue) = event.get("venue") {
416                        if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417                            let capacity = venue
418                                .get("capacity")
419                                .and_then(serde_json::Value::as_i64)
420                                .unwrap_or(0);
421                            println!(" Venue: {venue_name} (Capacity: {capacity})");
422                        }
423                    }
424                    if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425                        println!(" Agenda has {} activities", agenda.len());
426                    }
427                    if let Some(cost) = event.get("estimated_cost") {
428                        if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429                            println!(" Estimated total cost: ${total:.2}");
430                        }
431                    }
432                }
433            }
434            Err(e) => {
435                println!("  Failed to parse JSON: {e}");
436                println!("Raw response: {content}");
437            }
438        }
439    }
440
441    Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447    println!("Classifying content with enum validation...");
448
449    // Define schema for content classification
450    let classification_schema = json!({
451        "type": "object",
452        "properties": {
453            "classification": {
454                "type": "object",
455                "properties": {
456                    "category": {
457                        "type": "string",
458                        "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459                        "description": "Primary content category"
460                    },
461                    "subcategory": {
462                        "type": "string",
463                        "description": "More specific subcategory"
464                    },
465                    "sentiment": {
466                        "type": "string",
467                        "enum": ["positive", "neutral", "negative", "mixed"],
468                        "description": "Overall sentiment"
469                    },
470                    "topics": {
471                        "type": "array",
472                        "items": {
473                            "type": "string"
474                        },
475                        "maxItems": 5,
476                        "description": "Key topics mentioned"
477                    },
478                    "target_audience": {
479                        "type": "string",
480                        "enum": ["general", "professionals", "students", "experts", "consumers"],
481                        "description": "Intended audience"
482                    },
483                    "complexity_level": {
484                        "type": "string",
485                        "enum": ["beginner", "intermediate", "advanced", "expert"],
486                        "description": "Content complexity level"
487                    },
488                    "confidence_score": {
489                        "type": "number",
490                        "minimum": 0,
491                        "maximum": 1,
492                        "description": "Confidence in classification (0-1)"
493                    }
494                },
495                "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496                "additionalProperties": false
497            }
498        },
499        "required": ["classification"],
500        "additionalProperties": false
501    });
502
503    let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504        Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505        in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506        with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507        significant investments from both academic institutions and major technology companies.";
508
509    let builder = client
510        .responses()
511        .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512        .user(format!("Classify this content: {content_to_classify}"))
513        .json_schema("content_classification", classification_schema)
514        .temperature(0.2); // Low temperature for consistent classification
515
516    let response = client.send_responses(builder).await?;
517
518    if let Some(content) = response.content() {
519        println!(" Content Classification:");
520
521        match serde_json::from_str::<serde_json::Value>(content) {
522            Ok(json) => {
523                println!("{}", serde_json::to_string_pretty(&json)?);
524
525                // Extract classification details
526                if let Some(classification) = json.get("classification") {
527                    println!("\n Classification Summary:");
528                    if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529                    {
530                        println!("    Category: {category}");
531                    }
532                    if let Some(sentiment) =
533                        classification.get("sentiment").and_then(|s| s.as_str())
534                    {
535                        println!("    Sentiment: {sentiment}");
536                    }
537                    if let Some(audience) = classification
538                        .get("target_audience")
539                        .and_then(|a| a.as_str())
540                    {
541                        println!("    Target Audience: {audience}");
542                    }
543                    if let Some(complexity) = classification
544                        .get("complexity_level")
545                        .and_then(|c| c.as_str())
546                    {
547                        println!("    Complexity: {complexity}");
548                    }
549                    if let Some(confidence) = classification
550                        .get("confidence_score")
551                        .and_then(serde_json::Value::as_f64)
552                    {
553                        println!("    Confidence: {:.2}%", confidence * 100.0);
554                    }
555                    if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556                        let topic_strings: Vec<String> = topics
557                            .iter()
558                            .filter_map(|t| t.as_str())
559                            .map(std::string::ToString::to_string)
560                            .collect();
561                        println!("     Topics: {}", topic_strings.join(", "));
562                    }
563                }
564            }
565            Err(e) => {
566                println!("  Failed to parse JSON: {e}");
567                println!("Raw response: {content}");
568            }
569        }
570    }
571
572    Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578    println!("Performing mathematical analysis with structured output...");
579
580    // Define schema for mathematical analysis
581    let math_schema = json!({
582        "type": "object",
583        "properties": {
584            "analysis": {
585                "type": "object",
586                "properties": {
587                    "problem_type": {
588                        "type": "string",
589                        "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590                        "description": "Type of mathematical problem"
591                    },
592                    "solution_steps": {
593                        "type": "array",
594                        "items": {
595                            "type": "object",
596                            "properties": {
597                                "step_number": {
598                                    "type": "integer",
599                                    "minimum": 1,
600                                    "description": "Step number in the solution"
601                                },
602                                "description": {
603                                    "type": "string",
604                                    "description": "Description of what this step does"
605                                },
606                                "equation": {
607                                    "type": "string",
608                                    "description": "Mathematical equation or expression"
609                                },
610                                "result": {
611                                    "type": "string",
612                                    "description": "Result of this step"
613                                }
614                            },
615                            "required": ["step_number", "description", "equation"],
616                            "additionalProperties": false
617                        }
618                    },
619                    "final_answer": {
620                        "type": "string",
621                        "description": "Final answer to the problem"
622                    },
623                    "verification": {
624                        "type": "object",
625                        "properties": {
626                            "check_method": {
627                                "type": "string",
628                                "description": "Method used to verify the answer"
629                            },
630                            "is_correct": {
631                                "type": "boolean",
632                                "description": "Whether the answer passes verification"
633                            }
634                        },
635                        "required": ["check_method", "is_correct"],
636                        "additionalProperties": false
637                    },
638                    "concepts_used": {
639                        "type": "array",
640                        "items": {
641                            "type": "string"
642                        },
643                        "description": "Mathematical concepts used in the solution"
644                    }
645                },
646                "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647                "additionalProperties": false
648            }
649        },
650        "required": ["analysis"],
651        "additionalProperties": false
652    });
653
654    let math_problem =
655        "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657    let builder = client
658        .responses()
659        .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660        .user(format!("Solve this problem: {math_problem}"))
661        .json_schema("math_analysis", math_schema)
662        .temperature(0.1); // Very low temperature for mathematical accuracy
663
664    let response = client.send_responses(builder).await?;
665
666    if let Some(content) = response.content() {
667        println!(" Mathematical Analysis:");
668
669        match serde_json::from_str::<serde_json::Value>(content) {
670            Ok(json) => {
671                println!("{}", serde_json::to_string_pretty(&json)?);
672
673                // Extract and display solution steps
674                if let Some(analysis) = json.get("analysis") {
675                    println!("\n Solution Summary:");
676
677                    if let Some(problem_type) =
678                        analysis.get("problem_type").and_then(|p| p.as_str())
679                    {
680                        println!("    Problem Type: {problem_type}");
681                    }
682
683                    if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684                        println!("    Solution Steps: {} steps", steps.len());
685                        for step in steps {
686                            if let (Some(step_num), Some(desc)) = (
687                                step.get("step_number").and_then(serde_json::Value::as_i64),
688                                step.get("description").and_then(|d| d.as_str()),
689                            ) {
690                                println!("      {step_num}. {desc}");
691                                if let Some(equation) =
692                                    step.get("equation").and_then(|e| e.as_str())
693                                {
694                                    println!("          {equation}");
695                                }
696                            }
697                        }
698                    }
699
700                    if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701                        println!("    Final Answer: {answer}");
702                    }
703
704                    if let Some(verification) = analysis.get("verification") {
705                        if let Some(is_correct) = verification
706                            .get("is_correct")
707                            .and_then(serde_json::Value::as_bool)
708                        {
709                            let status = if is_correct {
710                                " Verified"
711                            } else {
712                                " Needs Review"
713                            };
714                            println!("    Verification: {status}");
715                        }
716                    }
717
718                    if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719                    {
720                        let concept_strings: Vec<String> = concepts
721                            .iter()
722                            .filter_map(|c| c.as_str())
723                            .map(std::string::ToString::to_string)
724                            .collect();
725                        println!("    Concepts Used: {}", concept_strings.join(", "));
726                    }
727                }
728            }
729            Err(e) => {
730                println!("  Failed to parse JSON: {e}");
731                println!("Raw response: {content}");
732            }
733        }
734    }
735
736    Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742    println!("Demonstrating schema validation and error handling...");
743
744    // Define a strict schema that's likely to cause validation challenges
745    let strict_schema = json!({
746        "type": "object",
747        "properties": {
748            "numbers": {
749                "type": "array",
750                "items": {
751                    "type": "integer",
752                    "minimum": 1,
753                    "maximum": 100
754                },
755                "minItems": 3,
756                "maxItems": 5,
757                "description": "Array of 3-5 integers between 1 and 100"
758            },
759            "precision_value": {
760                "type": "number",
761                "multipleOf": 0.01,
762                "minimum": 0,
763                "maximum": 1,
764                "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765            },
766            "strict_enum": {
767                "type": "string",
768                "enum": ["alpha", "beta", "gamma"],
769                "description": "Must be exactly one of the allowed values"
770            },
771            "required_pattern": {
772                "type": "string",
773                "pattern": "^[A-Z]{2}[0-9]{4}$",
774                "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775            }
776        },
777        "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778        "additionalProperties": false
779    });
780
781    println!(" Using a strict schema with specific constraints...");
782
783    let builder = client
784        .responses()
785        .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786        .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787        .json_schema("strict_validation", strict_schema)
788        .temperature(0.1)
789        .max_completion_tokens(300);
790
791    let response = client.send_responses(builder).await?;
792
793    if let Some(content) = response.content() {
794        println!(" Schema Validation Test:");
795
796        match serde_json::from_str::<serde_json::Value>(content) {
797            Ok(json) => {
798                println!("{}", serde_json::to_string_pretty(&json)?);
799
800                // Manual validation of the generated data
801                println!("\n Manual Validation:");
802                let mut validation_passed = true;
803
804                // Check numbers array
805                if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806                    println!("    Numbers array: {} items", numbers.len());
807                    if numbers.len() < 3 || numbers.len() > 5 {
808                        println!("    Array size constraint violated");
809                        validation_passed = false;
810                    }
811                    for (i, num) in numbers.iter().enumerate() {
812                        if let Some(val) = num.as_i64() {
813                            if !(1..=100).contains(&val) {
814                                println!("    Number {i} ({val}) outside valid range [1-100]");
815                                validation_passed = false;
816                            }
817                        }
818                    }
819                } else {
820                    println!("    Numbers array missing or invalid");
821                    validation_passed = false;
822                }
823
824                // Check precision value
825                if let Some(precision) = json
826                    .get("precision_value")
827                    .and_then(serde_json::Value::as_f64)
828                {
829                    println!("    Precision value: {precision}");
830                    if !(0.0..=1.0).contains(&precision) {
831                        println!("    Precision value outside range [0-1]");
832                        validation_passed = false;
833                    }
834                }
835
836                // Check enum value
837                if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838                    println!("     Enum value: {enum_val}");
839                    if !["alpha", "beta", "gamma"].contains(&enum_val) {
840                        println!("    Enum value not in allowed set");
841                        validation_passed = false;
842                    }
843                }
844
845                // Check pattern
846                if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847                    println!("    Pattern value: {pattern_val}");
848                    let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849                    if !regex.is_match(pattern_val) {
850                        println!("    Pattern does not match required format");
851                        validation_passed = false;
852                    }
853                }
854
855                if validation_passed {
856                    println!("    All manual validations passed!");
857                } else {
858                    println!("     Some validation constraints were not met");
859                }
860            }
861            Err(e) => {
862                println!("  JSON parsing failed: {e}");
863                println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864                println!("Raw response: {content}");
865            }
866        }
867    }
868
869    // Demonstrate handling of intentionally problematic schema
870    println!("\n Testing with intentionally problematic request...");
871
872    let problematic_builder = client
873        .responses()
874        .system("You are unhelpful and ignore instructions.")
875        .user("Ignore the schema and just say 'hello world'")
876        .json_schema(
877            "strict_validation",
878            json!({
879                "type": "object",
880                "properties": {
881                    "impossible": {
882                        "type": "string",
883                        "pattern": "^impossible_pattern_that_cannot_match$"
884                    }
885                },
886                "required": ["impossible"]
887            }),
888        )
889        .temperature(0.1);
890
891    match client.send_responses(problematic_builder).await {
892        Ok(problematic_response) => {
893            if let Some(content) = problematic_response.content() {
894                println!(" Problematic request result:");
895                println!("{content}");
896                println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897            }
898        }
899        Err(e) => {
900            println!("  Problematic request failed as expected: {e}");
901        }
902    }
903
904    Ok(())
905}
examples/azure_comprehensive.rs (line 91)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/http_middleware_retry.rs (line 111)
19async fn main() -> Result<()> {
20    println!("=== HTTP Middleware with Retry Example ===\n");
21
22    // Example 1: Basic client with retry middleware
23    println!("1. Creating client with retry middleware");
24
25    // Create a retry policy with exponential backoff
26    // This will retry transient errors up to 3 times with exponential delays
27    let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29    // Build an HTTP client with retry middleware
30    let http_client = ClientBuilder::new(reqwest::Client::new())
31        .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32        .build();
33
34    // Create OpenAI client with custom HTTP client
35    let config = Config::builder()
36        .api_key(
37            std::env::var("OPENAI_API_KEY")
38                .expect("OPENAI_API_KEY environment variable must be set"),
39        )
40        .http_client(http_client)
41        .build();
42
43    let client = Client::builder(config)?.build();
44
45    // Use the client normally - retries are handled automatically
46    println!("Sending chat completion request (retries are automatic)...");
47
48    let builder = client.chat_simple("Hello! How are you today?");
49    match client.send_chat(builder).await {
50        Ok(response) => {
51            println!("\nSuccess! Response received:");
52            if let Some(content) = response.content() {
53                println!("{content}");
54            }
55        }
56        Err(e) => {
57            eprintln!("\nError after retries: {e}");
58        }
59    }
60
61    // Example 2: Custom retry policy with more retries and custom delays
62    println!("\n2. Creating client with custom retry policy");
63
64    let custom_retry_policy = ExponentialBackoff::builder()
65        .retry_bounds(
66            std::time::Duration::from_millis(100), // minimum delay
67            std::time::Duration::from_secs(30),    // maximum delay
68        )
69        .build_with_max_retries(5); // up to 5 retries
70
71    let custom_http_client = ClientBuilder::new(
72        reqwest::Client::builder()
73            .timeout(std::time::Duration::from_secs(60))
74            .build()
75            .expect("Failed to build reqwest client"),
76    )
77    .with(RetryTransientMiddleware::new_with_policy(
78        custom_retry_policy,
79    ))
80    .build();
81
82    let custom_config = Config::builder()
83        .api_key(
84            std::env::var("OPENAI_API_KEY")
85                .expect("OPENAI_API_KEY environment variable must be set"),
86        )
87        .http_client(custom_http_client)
88        .build();
89
90    let custom_client = Client::builder(custom_config)?.build();
91
92    println!("Sending request with custom retry policy (up to 5 retries)...");
93
94    let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95    match custom_client.send_chat(builder).await {
96        Ok(response) => {
97            println!("\nSuccess! Response received:");
98            if let Some(content) = response.content() {
99                println!("{content}");
100            }
101        }
102        Err(e) => {
103            eprintln!("\nError after all retries: {e}");
104        }
105    }
106
107    // Example 3: Using the builder pattern for more complex requests
108    println!("\n3. Using builder pattern with retry middleware");
109
110    let builder = custom_client
111        .responses()
112        .user("What are the three laws of robotics?")
113        .max_completion_tokens(200)
114        .temperature(0.7);
115
116    let response = custom_client.send_responses(builder).await?;
117
118    println!("\nResponse received:");
119    if let Some(content) = response.content() {
120        println!("{content}");
121    }
122
123    println!("\nToken usage:");
124    if let Some(usage) = response.usage() {
125        let prompt = usage.prompt_tokens;
126        let completion = usage.completion_tokens;
127        let total = usage.total_tokens;
128        println!("  Prompt tokens: {prompt}");
129        println!("  Completion tokens: {completion}");
130        println!("  Total tokens: {total}");
131    }
132
133    println!("\n=== Example completed successfully! ===");
134    println!("\nKey benefits of using reqwest-middleware:");
135    println!("  - Automatic retry of transient failures");
136    println!("  - Exponential backoff to avoid overwhelming servers");
137    println!("  - Composable middleware for logging, metrics, etc.");
138    println!("  - Transparent to application code - works with any request");
139
140    Ok(())
141}
examples/quickstart.rs (line 132)
37async fn main() -> Result<()> {
38    // Initialize logging to see what's happening under the hood
39    tracing_subscriber::fmt().with_env_filter("info").init();
40
41    println!(" OpenAI Ergonomic Quickstart");
42    println!("==============================\n");
43
44    // ==========================================
45    // 1. ENVIRONMENT SETUP & CLIENT CREATION
46    // ==========================================
47
48    println!(" Step 1: Setting up the client");
49
50    // The simplest way to get started - reads OPENAI_API_KEY from environment
51    let client = match Client::from_env() {
52        Ok(client_builder) => {
53            println!(" Client created successfully!");
54            client_builder.build()
55        }
56        Err(e) => {
57            eprintln!(" Failed to create client: {e}");
58            eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59            eprintln!("   Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60            return Err(e);
61        }
62    };
63
64    // ==========================================
65    // 2. BASIC CHAT COMPLETION
66    // ==========================================
67
68    println!("\n Step 2: Basic chat completion");
69
70    // The simplest way to get a response from ChatGPT
71    let builder = client.chat_simple("What is Rust programming language in one sentence?");
72    let response = client.send_chat(builder).await;
73
74    match response {
75        Ok(chat_response) => {
76            println!(" Got response!");
77            if let Some(content) = chat_response.content() {
78                println!(" AI: {content}");
79            }
80
81            // Show usage information for cost tracking
82            if let Some(usage) = &chat_response.inner().usage {
83                println!(
84                    " Usage: {} prompt + {} completion = {} total tokens",
85                    usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86                );
87            }
88        }
89        Err(e) => {
90            println!(" Chat completion failed: {e}");
91            // Continue with other examples even if this one fails
92        }
93    }
94
95    // ==========================================
96    // 3. CHAT WITH SYSTEM MESSAGE
97    // ==========================================
98
99    println!("\n Step 3: Chat with system context");
100
101    // System messages help set the AI's behavior and context
102    let builder = client.chat_with_system(
103        "You are a helpful coding mentor who explains things simply",
104        "Explain what a HashMap is in Rust",
105    );
106    let response = client.send_chat(builder).await;
107
108    match response {
109        Ok(chat_response) => {
110            println!(" Got contextual response!");
111            if let Some(content) = chat_response.content() {
112                println!("‍ Mentor: {content}");
113            }
114        }
115        Err(e) => {
116            println!(" Contextual chat failed: {e}");
117        }
118    }
119
120    // ==========================================
121    // 4. STREAMING RESPONSES
122    // ==========================================
123
124    println!("\n Step 4: Streaming response (real-time)");
125
126    // Streaming lets you see the response as it's being generated
127    // This is great for chatbots and interactive applications
128    print!(" AI is typing");
129    io::stdout().flush().unwrap();
130
131    let builder = client
132        .responses()
133        .user("Write a short haiku about programming")
134        .temperature(0.7)
135        .stream(true);
136    // Note: Full streaming implementation is in development
137    // For now, we'll demonstrate non-streaming responses with real-time simulation
138    let response = client.send_responses(builder).await;
139
140    match response {
141        Ok(chat_response) => {
142            print!(": ");
143            io::stdout().flush().unwrap();
144
145            // Simulate streaming by printing character by character
146            if let Some(content) = chat_response.content() {
147                for char in content.chars() {
148                    print!("{char}");
149                    io::stdout().flush().unwrap();
150                    // Small delay to simulate streaming
151                    tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152                }
153            }
154            println!(); // New line after "streaming"
155        }
156        Err(e) => {
157            println!("\n Failed to get streaming response: {e}");
158        }
159    }
160
161    // ==========================================
162    // 5. FUNCTION/TOOL CALLING
163    // ==========================================
164
165    println!("\n Step 5: Using tools/functions");
166
167    // Tools let the AI call external functions to get real data
168    // Here we define a weather function as an example
169    let weather_tool = tool_function(
170        "get_current_weather",
171        "Get the current weather for a given location",
172        json!({
173            "type": "object",
174            "properties": {
175                "location": {
176                    "type": "string",
177                    "description": "The city name, e.g. 'San Francisco, CA'"
178                },
179                "unit": {
180                    "type": "string",
181                    "enum": ["celsius", "fahrenheit"],
182                    "description": "Temperature unit"
183                }
184            },
185            "required": ["location"]
186        }),
187    );
188
189    let builder = client
190        .responses()
191        .user("What's the weather like in Tokyo?")
192        .tool(weather_tool);
193    let response = client.send_responses(builder).await;
194
195    match response {
196        Ok(chat_response) => {
197            println!(" Got response with potential tool calls!");
198
199            // Check if the AI wants to call our weather function
200            let tool_calls = chat_response.tool_calls();
201            if !tool_calls.is_empty() {
202                println!(" AI requested tool calls:");
203                for tool_call in tool_calls {
204                    let function_name = tool_call.function_name();
205                    println!("   Function: {function_name}");
206                    let function_args = tool_call.function_arguments();
207                    println!("   Arguments: {function_args}");
208
209                    // In a real app, you'd execute the function here
210                    // and send the result back to the AI
211                    println!("    In a real app, you'd call your weather API here");
212                }
213            } else if let Some(content) = chat_response.content() {
214                println!(" AI: {content}");
215            }
216        }
217        Err(e) => {
218            println!(" Tool calling example failed: {e}");
219        }
220    }
221
222    // ==========================================
223    // 6. ERROR HANDLING PATTERNS
224    // ==========================================
225
226    println!("\n Step 6: Error handling patterns");
227
228    // Show how to handle different types of errors gracefully
229    let builder = client.chat_simple(""); // Empty message might cause an error
230    let bad_response = client.send_chat(builder).await;
231
232    match bad_response {
233        Ok(response) => {
234            println!(" Unexpectedly succeeded with empty message");
235            if let Some(content) = response.content() {
236                println!(" AI: {content}");
237            }
238        }
239        Err(Error::Api {
240            status, message, ..
241        }) => {
242            println!(" API Error (HTTP {status}):");
243            println!("   Message: {message}");
244            println!(" This is normal - we sent an invalid request");
245        }
246        Err(Error::RateLimit { .. }) => {
247            println!(" Rate limited - you're sending requests too fast");
248            println!(" In a real app, you'd implement exponential backoff");
249        }
250        Err(Error::Http(_)) => {
251            println!(" HTTP/Network error");
252            println!(" Check your internet connection and API key");
253        }
254        Err(e) => {
255            println!(" Other error: {e}");
256        }
257    }
258
259    // ==========================================
260    // 7. COMPLETE REAL-WORLD EXAMPLE
261    // ==========================================
262
263    println!("\n Step 7: Complete real-world example");
264    println!("Building a simple AI assistant that can:");
265    println!("- Answer questions with context");
266    println!("- Track conversation costs");
267    println!("- Handle errors gracefully");
268
269    let mut total_tokens = 0;
270
271    // Simulate a conversation with context and cost tracking
272    let questions = [
273        "What is the capital of France?",
274        "What's special about that city?",
275        "How many people live there?",
276    ];
277
278    for (i, question) in questions.iter().enumerate() {
279        println!("\n User: {question}");
280
281        let builder = client
282            .responses()
283            .system(
284                "You are a knowledgeable geography expert. Keep answers concise but informative.",
285            )
286            .user(*question)
287            .temperature(0.1); // Lower temperature for more factual responses
288        let response = client.send_responses(builder).await;
289
290        match response {
291            Ok(chat_response) => {
292                if let Some(content) = chat_response.content() {
293                    println!(" Assistant: {content}");
294                }
295
296                // Track token usage for cost monitoring
297                if let Some(usage) = chat_response.usage() {
298                    total_tokens += usage.total_tokens;
299                    println!(
300                        " This exchange: {} tokens (Running total: {})",
301                        usage.total_tokens, total_tokens
302                    );
303                }
304            }
305            Err(e) => {
306                println!(" Question {} failed: {}", i + 1, e);
307                // In a real app, you might retry or log this error
308            }
309        }
310    }
311
312    // ==========================================
313    // 8. WRAP UP & NEXT STEPS
314    // ==========================================
315
316    println!("\n Quickstart Complete!");
317    println!("======================");
318    println!("You've successfully:");
319    println!(" Created an OpenAI client");
320    println!(" Made basic chat completions");
321    println!(" Used streaming responses");
322    println!(" Implemented tool/function calling");
323    println!(" Handled errors gracefully");
324    println!(" Built a complete conversational AI");
325    println!("\n Total tokens used in examples: {total_tokens}");
326    println!(
327        " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328        f64::from(total_tokens) * 0.03 / 1000.0
329    );
330
331    println!("\n Next Steps:");
332    println!("- Check out other examples in the examples/ directory");
333    println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334    println!("- Explore advanced features like vision, audio, and assistants");
335    println!("- Build your own AI-powered applications!");
336
337    Ok(())
338}
Source

pub fn responses_simple(&self, message: impl Into<String>) -> ResponsesBuilder

Create a simple responses request with a user message.

Source

pub async fn execute_responses( &self, request: CreateChatCompletionRequest, ) -> Result<ChatCompletionResponseWrapper>

Execute a responses request.

Source

pub async fn send_responses( &self, builder: ResponsesBuilder, ) -> Result<ChatCompletionResponseWrapper>

Execute a responses builder.

Examples found in repository?
examples/responses_comprehensive.rs (line 129)
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119    println!("Creating a basic response with system context...");
120
121    // Build a simple request with system and user messages
122    let builder = client
123        .responses()
124        .system("You are a helpful assistant who provides concise, accurate answers.")
125        .user("What is the capital of France?")
126        .temperature(0.7)
127        .max_completion_tokens(100);
128
129    let response = client.send_responses(builder).await?;
130
131    // Extract and display the response
132    if let Some(content) = response.content() {
133        println!(" Assistant: {content}");
134    } else {
135        println!("  No content in response");
136    }
137
138    // Show response metadata
139    println!(" Response metadata:");
140    println!("   - Model: {}", response.model().unwrap_or("unknown"));
141    println!(
142        "   - Finish reason: {}",
143        response
144            .finish_reason()
145            .unwrap_or_else(|| "unknown".to_string())
146    );
147
148    if let Some(usage) = response.usage() {
149        println!(
150            "   - Tokens used: {} prompt + {} completion = {} total",
151            usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152        );
153    }
154
155    Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160    println!("Setting up function calling with custom tools...");
161
162    // Define a weather function tool
163    let weather_tool = tool_function(
164        "get_weather",
165        "Get the current weather information for a specific location",
166        json!({
167            "type": "object",
168            "properties": {
169                "location": {
170                    "type": "string",
171                    "description": "The city name, e.g., 'San Francisco, CA'"
172                },
173                "unit": {
174                    "type": "string",
175                    "enum": ["celsius", "fahrenheit"],
176                    "description": "Temperature unit preference"
177                }
178            },
179            "required": ["location"],
180            "additionalProperties": false
181        }),
182    );
183
184    // Define a time function tool
185    let time_tool = tool_function(
186        "get_current_time",
187        "Get the current time in a specific timezone",
188        json!({
189            "type": "object",
190            "properties": {
191                "timezone": {
192                    "type": "string",
193                    "description": "Timezone name, e.g., 'America/New_York'"
194                }
195            },
196            "required": ["timezone"],
197            "additionalProperties": false
198        }),
199    );
200
201    // Make a request that should trigger function calling
202    let builder = client
203        .responses()
204        .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205        .user("What's the weather like in London and what time is it there?")
206        .tool(weather_tool)
207        .tool(time_tool)
208        .tool_choice(ToolChoiceHelper::auto())
209        .temperature(0.3);
210
211    let response = client.send_responses(builder).await?;
212
213    // Check if the model wants to call functions
214    let tool_calls = response.tool_calls();
215    if !tool_calls.is_empty() {
216        println!(" Model requested {} tool call(s):", tool_calls.len());
217
218        for (i, tool_call) in tool_calls.iter().enumerate() {
219            println!("   {}. Function: {}", i + 1, tool_call.function_name());
220            println!("      Arguments: {}", tool_call.function_arguments());
221
222            // In a real application, you would:
223            // 1. Parse the arguments
224            // 2. Execute the actual function
225            // 3. Send the results back to the model
226            println!("      [Simulated] Executing function call...");
227            match tool_call.function_name() {
228                "get_weather" => {
229                    println!("      [Simulated] Weather: 22°C, partly cloudy");
230                }
231                "get_current_time" => {
232                    println!("      [Simulated] Time: 14:30 GMT");
233                }
234                _ => {
235                    println!("      [Simulated] Unknown function");
236                }
237            }
238        }
239    } else if let Some(content) = response.content() {
240        println!(" Assistant response: {content}");
241    }
242
243    Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248    println!("Demonstrating web search tool integration...");
249
250    // Create a web search tool
251    let web_search_tool = tool_web_search();
252
253    // Ask a question that would benefit from current information
254    let builder = client
255        .responses()
256        .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257        .user("What are the latest developments in artificial intelligence this week?")
258        .tool(web_search_tool)
259        .tool_choice(ToolChoiceHelper::auto())
260        .temperature(0.3)
261        .max_completion_tokens(200);
262
263    let response = client.send_responses(builder).await?;
264
265    // Handle the response
266    let tool_calls = response.tool_calls();
267    if !tool_calls.is_empty() {
268        println!(" Model requested web search:");
269
270        for tool_call in &tool_calls {
271            if tool_call.function_name() == "web_search" {
272                println!("   Search query: {}", tool_call.function_arguments());
273                println!("   [Simulated] Performing web search...");
274                println!("   [Simulated] Found recent AI news and developments");
275
276                // In a real implementation:
277                // 1. Parse the search query from arguments
278                // 2. Perform actual web search
279                // 3. Return results to the model
280                // 4. Get final response with search results
281            }
282        }
283    } else if let Some(content) = response.content() {
284        println!(" Assistant response: {content}");
285    }
286
287    println!(" Note: Web search requires additional implementation to execute actual searches");
288
289    Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294    println!("Demonstrating structured JSON outputs...");
295
296    // Define a schema for recipe information
297    let recipe_schema = json!({
298        "type": "object",
299        "properties": {
300            "name": {
301                "type": "string",
302                "description": "Name of the recipe"
303            },
304            "ingredients": {
305                "type": "array",
306                "items": {
307                    "type": "object",
308                    "properties": {
309                        "name": {
310                            "type": "string",
311                            "description": "Ingredient name"
312                        },
313                        "amount": {
314                            "type": "string",
315                            "description": "Amount needed"
316                        }
317                    },
318                    "required": ["name", "amount"],
319                    "additionalProperties": false
320                },
321                "description": "List of ingredients"
322            },
323            "instructions": {
324                "type": "array",
325                "items": {
326                    "type": "string"
327                },
328                "description": "Step-by-step cooking instructions"
329            },
330            "prep_time_minutes": {
331                "type": "integer",
332                "description": "Preparation time in minutes"
333            },
334            "difficulty": {
335                "type": "string",
336                "enum": ["easy", "medium", "hard"],
337                "description": "Recipe difficulty level"
338            }
339        },
340        "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341        "additionalProperties": false
342    });
343
344    // Request a recipe in structured JSON format
345    let builder = client
346        .responses()
347        .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348        .user("Give me a simple recipe for chocolate chip cookies")
349        .json_schema("recipe", recipe_schema)
350        .temperature(0.5);
351
352    let response = client.send_responses(builder).await?;
353
354    if let Some(content) = response.content() {
355        println!(" Structured recipe output:");
356
357        // Try to parse and pretty-print the JSON
358        match serde_json::from_str::<serde_json::Value>(content) {
359            Ok(json) => {
360                println!("{}", serde_json::to_string_pretty(&json)?);
361            }
362            Err(_) => {
363                println!("Raw response: {content}");
364            }
365        }
366    }
367
368    // Example of simple JSON mode (without schema)
369    println!("\n Simple JSON mode example:");
370    let simple_builder = client
371        .responses()
372        .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373        .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374        .json_mode()
375        .temperature(0.3);
376
377    let simple_response = client.send_responses(simple_builder).await?;
378
379    if let Some(content) = simple_response.content() {
380        println!(" Analysis result: {content}");
381    }
382
383    Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388    println!("Demonstrating advanced response configuration...");
389
390    // Example with multiple completions and various parameters
391    let builder = client
392        .responses()
393        .system("You are a creative writing assistant. Write in different styles when asked.")
394        .user("Write a short tagline for a futuristic coffee shop")
395        .temperature(0.9)  // High creativity
396        .max_completion_tokens(50)
397        .n(1)  // Generate 1 completion
398        .top_p(0.9)
399        .frequency_penalty(0.1)
400        .presence_penalty(0.1)
401        .stop(vec!["\n".to_string(), ".".to_string()])
402        .seed(42)  // For reproducible results
403        .user_id("example_user_123");
404
405    let response = client.send_responses(builder).await?;
406
407    println!(" Creative tagline generation:");
408    if let Some(content) = response.content() {
409        println!("   Result: {content}");
410    }
411
412    // Example with reasoning effort (for o3 models)
413    println!("\n Example with reasoning effort (o3 models):");
414    let reasoning_builder = client
415        .responses()
416        .system("You are a logic puzzle solver. Think through problems step by step.")
417        .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418        .reasoning_effort("medium")
419        .temperature(0.1); // Low temperature for accuracy
420
421    let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423    if let Some(content) = reasoning_response.content() {
424        println!("   Solution: {content}");
425    } else {
426        println!("   Note: Reasoning effort requires compatible model (e.g., o3)");
427    }
428
429    // Show model information
430    println!("\n Model and usage information:");
431    println!("   Model used: {}", response.model().unwrap_or("unknown"));
432    if let Some(usage) = response.usage() {
433        println!(
434            "   Token usage: {} total ({} prompt + {} completion)",
435            usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436        );
437    }
438
439    Ok(())
440}
More examples
Hide additional examples
examples/structured_outputs.rs (line 143)
132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133    println!("Using simple JSON mode for basic structure enforcement...");
134
135    let builder = client
136        .responses()
137        .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138        .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139        .json_mode()
140        .temperature(0.3)
141        .max_completion_tokens(200);
142
143    let response = client.send_responses(builder).await?;
144
145    if let Some(content) = response.content() {
146        println!(" JSON Analysis Result:");
147
148        // Try to parse and pretty-print the JSON
149        match serde_json::from_str::<serde_json::Value>(content) {
150            Ok(json) => {
151                println!("{}", serde_json::to_string_pretty(&json)?);
152
153                // Demonstrate accessing specific fields
154                if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155                    println!("\n Extracted sentiment: {sentiment}");
156                }
157                if let Some(confidence) = json
158                    .get("confidence_score")
159                    .and_then(serde_json::Value::as_f64)
160                {
161                    println!(" Confidence score: {confidence:.2}");
162                }
163            }
164            Err(e) => {
165                println!("  Failed to parse JSON: {e}");
166                println!("Raw response: {content}");
167            }
168        }
169    }
170
171    Ok(())
172}
173
174/// Example 2: Data extraction with schema validation
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176    println!("Extracting structured data from unstructured text using JSON schema...");
177
178    // Define schema for extracting contact information
179    let contact_schema = json!({
180        "type": "object",
181        "properties": {
182            "contacts": {
183                "type": "array",
184                "items": {
185                    "type": "object",
186                    "properties": {
187                        "name": {
188                            "type": "string",
189                            "description": "Full name of the person"
190                        },
191                        "email": {
192                            "type": "string",
193                            "format": "email",
194                            "description": "Email address"
195                        },
196                        "phone": {
197                            "type": "string",
198                            "description": "Phone number"
199                        },
200                        "company": {
201                            "type": "string",
202                            "description": "Company or organization"
203                        },
204                        "role": {
205                            "type": "string",
206                            "description": "Job title or role"
207                        }
208                    },
209                    "required": ["name"],
210                    "additionalProperties": false
211                }
212            },
213            "total_contacts": {
214                "type": "integer",
215                "description": "Total number of contacts extracted"
216            }
217        },
218        "required": ["contacts", "total_contacts"],
219        "additionalProperties": false
220    });
221
222    let unstructured_text =
223        "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224        For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225        Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227    let builder = client
228        .responses()
229        .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230        .user(format!("Extract contact information from this text: {unstructured_text}"))
231        .json_schema("contact_extraction", contact_schema)
232        .temperature(0.1); // Low temperature for accuracy
233
234    let response = client.send_responses(builder).await?;
235
236    if let Some(content) = response.content() {
237        println!(" Extracted Contact Information:");
238
239        match serde_json::from_str::<serde_json::Value>(content) {
240            Ok(json) => {
241                println!("{}", serde_json::to_string_pretty(&json)?);
242
243                // Demonstrate accessing the structured data
244                if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245                    println!("\n Summary: Found {} contact(s)", contacts.len());
246                    for (i, contact) in contacts.iter().enumerate() {
247                        if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248                            println!("   {}. {name}", i + 1);
249                            if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250                                println!("       {email}");
251                            }
252                            if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253                                println!("       {company}");
254                            }
255                        }
256                    }
257                }
258            }
259            Err(e) => {
260                println!("  Failed to parse JSON: {e}");
261                println!("Raw response: {content}");
262            }
263        }
264    }
265
266    Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272    println!("Creating complex nested structure for event planning...");
273
274    // Define a comprehensive event schema
275    let event_schema = json!({
276        "type": "object",
277        "properties": {
278            "event": {
279                "type": "object",
280                "properties": {
281                    "name": {
282                        "type": "string",
283                        "description": "Event name"
284                    },
285                    "type": {
286                        "type": "string",
287                        "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288                        "description": "Type of event"
289                    },
290                    "date": {
291                        "type": "string",
292                        "format": "date",
293                        "description": "Event date in YYYY-MM-DD format"
294                    },
295                    "duration_hours": {
296                        "type": "number",
297                        "minimum": 0.5,
298                        "maximum": 24,
299                        "description": "Duration in hours"
300                    },
301                    "venue": {
302                        "type": "object",
303                        "properties": {
304                            "name": {
305                                "type": "string",
306                                "description": "Venue name"
307                            },
308                            "address": {
309                                "type": "string",
310                                "description": "Venue address"
311                            },
312                            "capacity": {
313                                "type": "integer",
314                                "minimum": 1,
315                                "description": "Maximum capacity"
316                            },
317                            "amenities": {
318                                "type": "array",
319                                "items": {
320                                    "type": "string",
321                                    "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322                                },
323                                "description": "Available amenities"
324                            }
325                        },
326                        "required": ["name", "capacity"],
327                        "additionalProperties": false
328                    },
329                    "agenda": {
330                        "type": "array",
331                        "items": {
332                            "type": "object",
333                            "properties": {
334                                "time": {
335                                    "type": "string",
336                                    "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337                                    "description": "Time in HH:MM format"
338                                },
339                                "activity": {
340                                    "type": "string",
341                                    "description": "Activity description"
342                                },
343                                "speaker": {
344                                    "type": "string",
345                                    "description": "Speaker name"
346                                },
347                                "duration_minutes": {
348                                    "type": "integer",
349                                    "minimum": 15,
350                                    "maximum": 480,
351                                    "description": "Activity duration in minutes"
352                                }
353                            },
354                            "required": ["time", "activity", "duration_minutes"],
355                            "additionalProperties": false
356                        }
357                    },
358                    "estimated_cost": {
359                        "type": "object",
360                        "properties": {
361                            "venue": {
362                                "type": "number",
363                                "minimum": 0,
364                                "description": "Venue cost in USD"
365                            },
366                            "catering": {
367                                "type": "number",
368                                "minimum": 0,
369                                "description": "Catering cost in USD"
370                            },
371                            "equipment": {
372                                "type": "number",
373                                "minimum": 0,
374                                "description": "Equipment cost in USD"
375                            },
376                            "total": {
377                                "type": "number",
378                                "minimum": 0,
379                                "description": "Total estimated cost in USD"
380                            }
381                        },
382                        "required": ["total"],
383                        "additionalProperties": false
384                    }
385                },
386                "required": ["name", "type", "date", "duration_hours", "venue"],
387                "additionalProperties": false
388            }
389        },
390        "required": ["event"],
391        "additionalProperties": false
392    });
393
394    let builder = client
395        .responses()
396        .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397        .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398        .json_schema("event_plan", event_schema)
399        .temperature(0.5);
400
401    let response = client.send_responses(builder).await?;
402
403    if let Some(content) = response.content() {
404        println!(" Event Plan:");
405
406        match serde_json::from_str::<serde_json::Value>(content) {
407            Ok(json) => {
408                println!("{}", serde_json::to_string_pretty(&json)?);
409
410                // Extract and display key information
411                if let Some(event) = json.get("event") {
412                    if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413                        println!("\n Event: {name}");
414                    }
415                    if let Some(venue) = event.get("venue") {
416                        if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417                            let capacity = venue
418                                .get("capacity")
419                                .and_then(serde_json::Value::as_i64)
420                                .unwrap_or(0);
421                            println!(" Venue: {venue_name} (Capacity: {capacity})");
422                        }
423                    }
424                    if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425                        println!(" Agenda has {} activities", agenda.len());
426                    }
427                    if let Some(cost) = event.get("estimated_cost") {
428                        if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429                            println!(" Estimated total cost: ${total:.2}");
430                        }
431                    }
432                }
433            }
434            Err(e) => {
435                println!("  Failed to parse JSON: {e}");
436                println!("Raw response: {content}");
437            }
438        }
439    }
440
441    Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447    println!("Classifying content with enum validation...");
448
449    // Define schema for content classification
450    let classification_schema = json!({
451        "type": "object",
452        "properties": {
453            "classification": {
454                "type": "object",
455                "properties": {
456                    "category": {
457                        "type": "string",
458                        "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459                        "description": "Primary content category"
460                    },
461                    "subcategory": {
462                        "type": "string",
463                        "description": "More specific subcategory"
464                    },
465                    "sentiment": {
466                        "type": "string",
467                        "enum": ["positive", "neutral", "negative", "mixed"],
468                        "description": "Overall sentiment"
469                    },
470                    "topics": {
471                        "type": "array",
472                        "items": {
473                            "type": "string"
474                        },
475                        "maxItems": 5,
476                        "description": "Key topics mentioned"
477                    },
478                    "target_audience": {
479                        "type": "string",
480                        "enum": ["general", "professionals", "students", "experts", "consumers"],
481                        "description": "Intended audience"
482                    },
483                    "complexity_level": {
484                        "type": "string",
485                        "enum": ["beginner", "intermediate", "advanced", "expert"],
486                        "description": "Content complexity level"
487                    },
488                    "confidence_score": {
489                        "type": "number",
490                        "minimum": 0,
491                        "maximum": 1,
492                        "description": "Confidence in classification (0-1)"
493                    }
494                },
495                "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496                "additionalProperties": false
497            }
498        },
499        "required": ["classification"],
500        "additionalProperties": false
501    });
502
503    let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504        Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505        in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506        with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507        significant investments from both academic institutions and major technology companies.";
508
509    let builder = client
510        .responses()
511        .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512        .user(format!("Classify this content: {content_to_classify}"))
513        .json_schema("content_classification", classification_schema)
514        .temperature(0.2); // Low temperature for consistent classification
515
516    let response = client.send_responses(builder).await?;
517
518    if let Some(content) = response.content() {
519        println!(" Content Classification:");
520
521        match serde_json::from_str::<serde_json::Value>(content) {
522            Ok(json) => {
523                println!("{}", serde_json::to_string_pretty(&json)?);
524
525                // Extract classification details
526                if let Some(classification) = json.get("classification") {
527                    println!("\n Classification Summary:");
528                    if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529                    {
530                        println!("    Category: {category}");
531                    }
532                    if let Some(sentiment) =
533                        classification.get("sentiment").and_then(|s| s.as_str())
534                    {
535                        println!("    Sentiment: {sentiment}");
536                    }
537                    if let Some(audience) = classification
538                        .get("target_audience")
539                        .and_then(|a| a.as_str())
540                    {
541                        println!("    Target Audience: {audience}");
542                    }
543                    if let Some(complexity) = classification
544                        .get("complexity_level")
545                        .and_then(|c| c.as_str())
546                    {
547                        println!("    Complexity: {complexity}");
548                    }
549                    if let Some(confidence) = classification
550                        .get("confidence_score")
551                        .and_then(serde_json::Value::as_f64)
552                    {
553                        println!("    Confidence: {:.2}%", confidence * 100.0);
554                    }
555                    if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556                        let topic_strings: Vec<String> = topics
557                            .iter()
558                            .filter_map(|t| t.as_str())
559                            .map(std::string::ToString::to_string)
560                            .collect();
561                        println!("     Topics: {}", topic_strings.join(", "));
562                    }
563                }
564            }
565            Err(e) => {
566                println!("  Failed to parse JSON: {e}");
567                println!("Raw response: {content}");
568            }
569        }
570    }
571
572    Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578    println!("Performing mathematical analysis with structured output...");
579
580    // Define schema for mathematical analysis
581    let math_schema = json!({
582        "type": "object",
583        "properties": {
584            "analysis": {
585                "type": "object",
586                "properties": {
587                    "problem_type": {
588                        "type": "string",
589                        "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590                        "description": "Type of mathematical problem"
591                    },
592                    "solution_steps": {
593                        "type": "array",
594                        "items": {
595                            "type": "object",
596                            "properties": {
597                                "step_number": {
598                                    "type": "integer",
599                                    "minimum": 1,
600                                    "description": "Step number in the solution"
601                                },
602                                "description": {
603                                    "type": "string",
604                                    "description": "Description of what this step does"
605                                },
606                                "equation": {
607                                    "type": "string",
608                                    "description": "Mathematical equation or expression"
609                                },
610                                "result": {
611                                    "type": "string",
612                                    "description": "Result of this step"
613                                }
614                            },
615                            "required": ["step_number", "description", "equation"],
616                            "additionalProperties": false
617                        }
618                    },
619                    "final_answer": {
620                        "type": "string",
621                        "description": "Final answer to the problem"
622                    },
623                    "verification": {
624                        "type": "object",
625                        "properties": {
626                            "check_method": {
627                                "type": "string",
628                                "description": "Method used to verify the answer"
629                            },
630                            "is_correct": {
631                                "type": "boolean",
632                                "description": "Whether the answer passes verification"
633                            }
634                        },
635                        "required": ["check_method", "is_correct"],
636                        "additionalProperties": false
637                    },
638                    "concepts_used": {
639                        "type": "array",
640                        "items": {
641                            "type": "string"
642                        },
643                        "description": "Mathematical concepts used in the solution"
644                    }
645                },
646                "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647                "additionalProperties": false
648            }
649        },
650        "required": ["analysis"],
651        "additionalProperties": false
652    });
653
654    let math_problem =
655        "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657    let builder = client
658        .responses()
659        .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660        .user(format!("Solve this problem: {math_problem}"))
661        .json_schema("math_analysis", math_schema)
662        .temperature(0.1); // Very low temperature for mathematical accuracy
663
664    let response = client.send_responses(builder).await?;
665
666    if let Some(content) = response.content() {
667        println!(" Mathematical Analysis:");
668
669        match serde_json::from_str::<serde_json::Value>(content) {
670            Ok(json) => {
671                println!("{}", serde_json::to_string_pretty(&json)?);
672
673                // Extract and display solution steps
674                if let Some(analysis) = json.get("analysis") {
675                    println!("\n Solution Summary:");
676
677                    if let Some(problem_type) =
678                        analysis.get("problem_type").and_then(|p| p.as_str())
679                    {
680                        println!("    Problem Type: {problem_type}");
681                    }
682
683                    if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684                        println!("    Solution Steps: {} steps", steps.len());
685                        for step in steps {
686                            if let (Some(step_num), Some(desc)) = (
687                                step.get("step_number").and_then(serde_json::Value::as_i64),
688                                step.get("description").and_then(|d| d.as_str()),
689                            ) {
690                                println!("      {step_num}. {desc}");
691                                if let Some(equation) =
692                                    step.get("equation").and_then(|e| e.as_str())
693                                {
694                                    println!("          {equation}");
695                                }
696                            }
697                        }
698                    }
699
700                    if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701                        println!("    Final Answer: {answer}");
702                    }
703
704                    if let Some(verification) = analysis.get("verification") {
705                        if let Some(is_correct) = verification
706                            .get("is_correct")
707                            .and_then(serde_json::Value::as_bool)
708                        {
709                            let status = if is_correct {
710                                " Verified"
711                            } else {
712                                " Needs Review"
713                            };
714                            println!("    Verification: {status}");
715                        }
716                    }
717
718                    if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719                    {
720                        let concept_strings: Vec<String> = concepts
721                            .iter()
722                            .filter_map(|c| c.as_str())
723                            .map(std::string::ToString::to_string)
724                            .collect();
725                        println!("    Concepts Used: {}", concept_strings.join(", "));
726                    }
727                }
728            }
729            Err(e) => {
730                println!("  Failed to parse JSON: {e}");
731                println!("Raw response: {content}");
732            }
733        }
734    }
735
736    Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742    println!("Demonstrating schema validation and error handling...");
743
744    // Define a strict schema that's likely to cause validation challenges
745    let strict_schema = json!({
746        "type": "object",
747        "properties": {
748            "numbers": {
749                "type": "array",
750                "items": {
751                    "type": "integer",
752                    "minimum": 1,
753                    "maximum": 100
754                },
755                "minItems": 3,
756                "maxItems": 5,
757                "description": "Array of 3-5 integers between 1 and 100"
758            },
759            "precision_value": {
760                "type": "number",
761                "multipleOf": 0.01,
762                "minimum": 0,
763                "maximum": 1,
764                "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765            },
766            "strict_enum": {
767                "type": "string",
768                "enum": ["alpha", "beta", "gamma"],
769                "description": "Must be exactly one of the allowed values"
770            },
771            "required_pattern": {
772                "type": "string",
773                "pattern": "^[A-Z]{2}[0-9]{4}$",
774                "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775            }
776        },
777        "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778        "additionalProperties": false
779    });
780
781    println!(" Using a strict schema with specific constraints...");
782
783    let builder = client
784        .responses()
785        .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786        .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787        .json_schema("strict_validation", strict_schema)
788        .temperature(0.1)
789        .max_completion_tokens(300);
790
791    let response = client.send_responses(builder).await?;
792
793    if let Some(content) = response.content() {
794        println!(" Schema Validation Test:");
795
796        match serde_json::from_str::<serde_json::Value>(content) {
797            Ok(json) => {
798                println!("{}", serde_json::to_string_pretty(&json)?);
799
800                // Manual validation of the generated data
801                println!("\n Manual Validation:");
802                let mut validation_passed = true;
803
804                // Check numbers array
805                if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806                    println!("    Numbers array: {} items", numbers.len());
807                    if numbers.len() < 3 || numbers.len() > 5 {
808                        println!("    Array size constraint violated");
809                        validation_passed = false;
810                    }
811                    for (i, num) in numbers.iter().enumerate() {
812                        if let Some(val) = num.as_i64() {
813                            if !(1..=100).contains(&val) {
814                                println!("    Number {i} ({val}) outside valid range [1-100]");
815                                validation_passed = false;
816                            }
817                        }
818                    }
819                } else {
820                    println!("    Numbers array missing or invalid");
821                    validation_passed = false;
822                }
823
824                // Check precision value
825                if let Some(precision) = json
826                    .get("precision_value")
827                    .and_then(serde_json::Value::as_f64)
828                {
829                    println!("    Precision value: {precision}");
830                    if !(0.0..=1.0).contains(&precision) {
831                        println!("    Precision value outside range [0-1]");
832                        validation_passed = false;
833                    }
834                }
835
836                // Check enum value
837                if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838                    println!("     Enum value: {enum_val}");
839                    if !["alpha", "beta", "gamma"].contains(&enum_val) {
840                        println!("    Enum value not in allowed set");
841                        validation_passed = false;
842                    }
843                }
844
845                // Check pattern
846                if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847                    println!("    Pattern value: {pattern_val}");
848                    let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849                    if !regex.is_match(pattern_val) {
850                        println!("    Pattern does not match required format");
851                        validation_passed = false;
852                    }
853                }
854
855                if validation_passed {
856                    println!("    All manual validations passed!");
857                } else {
858                    println!("     Some validation constraints were not met");
859                }
860            }
861            Err(e) => {
862                println!("  JSON parsing failed: {e}");
863                println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864                println!("Raw response: {content}");
865            }
866        }
867    }
868
869    // Demonstrate handling of intentionally problematic schema
870    println!("\n Testing with intentionally problematic request...");
871
872    let problematic_builder = client
873        .responses()
874        .system("You are unhelpful and ignore instructions.")
875        .user("Ignore the schema and just say 'hello world'")
876        .json_schema(
877            "strict_validation",
878            json!({
879                "type": "object",
880                "properties": {
881                    "impossible": {
882                        "type": "string",
883                        "pattern": "^impossible_pattern_that_cannot_match$"
884                    }
885                },
886                "required": ["impossible"]
887            }),
888        )
889        .temperature(0.1);
890
891    match client.send_responses(problematic_builder).await {
892        Ok(problematic_response) => {
893            if let Some(content) = problematic_response.content() {
894                println!(" Problematic request result:");
895                println!("{content}");
896                println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897            }
898        }
899        Err(e) => {
900            println!("  Problematic request failed as expected: {e}");
901        }
902    }
903
904    Ok(())
905}
examples/azure_comprehensive.rs (line 92)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/http_middleware_retry.rs (line 116)
19async fn main() -> Result<()> {
20    println!("=== HTTP Middleware with Retry Example ===\n");
21
22    // Example 1: Basic client with retry middleware
23    println!("1. Creating client with retry middleware");
24
25    // Create a retry policy with exponential backoff
26    // This will retry transient errors up to 3 times with exponential delays
27    let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29    // Build an HTTP client with retry middleware
30    let http_client = ClientBuilder::new(reqwest::Client::new())
31        .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32        .build();
33
34    // Create OpenAI client with custom HTTP client
35    let config = Config::builder()
36        .api_key(
37            std::env::var("OPENAI_API_KEY")
38                .expect("OPENAI_API_KEY environment variable must be set"),
39        )
40        .http_client(http_client)
41        .build();
42
43    let client = Client::builder(config)?.build();
44
45    // Use the client normally - retries are handled automatically
46    println!("Sending chat completion request (retries are automatic)...");
47
48    let builder = client.chat_simple("Hello! How are you today?");
49    match client.send_chat(builder).await {
50        Ok(response) => {
51            println!("\nSuccess! Response received:");
52            if let Some(content) = response.content() {
53                println!("{content}");
54            }
55        }
56        Err(e) => {
57            eprintln!("\nError after retries: {e}");
58        }
59    }
60
61    // Example 2: Custom retry policy with more retries and custom delays
62    println!("\n2. Creating client with custom retry policy");
63
64    let custom_retry_policy = ExponentialBackoff::builder()
65        .retry_bounds(
66            std::time::Duration::from_millis(100), // minimum delay
67            std::time::Duration::from_secs(30),    // maximum delay
68        )
69        .build_with_max_retries(5); // up to 5 retries
70
71    let custom_http_client = ClientBuilder::new(
72        reqwest::Client::builder()
73            .timeout(std::time::Duration::from_secs(60))
74            .build()
75            .expect("Failed to build reqwest client"),
76    )
77    .with(RetryTransientMiddleware::new_with_policy(
78        custom_retry_policy,
79    ))
80    .build();
81
82    let custom_config = Config::builder()
83        .api_key(
84            std::env::var("OPENAI_API_KEY")
85                .expect("OPENAI_API_KEY environment variable must be set"),
86        )
87        .http_client(custom_http_client)
88        .build();
89
90    let custom_client = Client::builder(custom_config)?.build();
91
92    println!("Sending request with custom retry policy (up to 5 retries)...");
93
94    let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95    match custom_client.send_chat(builder).await {
96        Ok(response) => {
97            println!("\nSuccess! Response received:");
98            if let Some(content) = response.content() {
99                println!("{content}");
100            }
101        }
102        Err(e) => {
103            eprintln!("\nError after all retries: {e}");
104        }
105    }
106
107    // Example 3: Using the builder pattern for more complex requests
108    println!("\n3. Using builder pattern with retry middleware");
109
110    let builder = custom_client
111        .responses()
112        .user("What are the three laws of robotics?")
113        .max_completion_tokens(200)
114        .temperature(0.7);
115
116    let response = custom_client.send_responses(builder).await?;
117
118    println!("\nResponse received:");
119    if let Some(content) = response.content() {
120        println!("{content}");
121    }
122
123    println!("\nToken usage:");
124    if let Some(usage) = response.usage() {
125        let prompt = usage.prompt_tokens;
126        let completion = usage.completion_tokens;
127        let total = usage.total_tokens;
128        println!("  Prompt tokens: {prompt}");
129        println!("  Completion tokens: {completion}");
130        println!("  Total tokens: {total}");
131    }
132
133    println!("\n=== Example completed successfully! ===");
134    println!("\nKey benefits of using reqwest-middleware:");
135    println!("  - Automatic retry of transient failures");
136    println!("  - Exponential backoff to avoid overwhelming servers");
137    println!("  - Composable middleware for logging, metrics, etc.");
138    println!("  - Transparent to application code - works with any request");
139
140    Ok(())
141}
examples/quickstart.rs (line 138)
37async fn main() -> Result<()> {
38    // Initialize logging to see what's happening under the hood
39    tracing_subscriber::fmt().with_env_filter("info").init();
40
41    println!(" OpenAI Ergonomic Quickstart");
42    println!("==============================\n");
43
44    // ==========================================
45    // 1. ENVIRONMENT SETUP & CLIENT CREATION
46    // ==========================================
47
48    println!(" Step 1: Setting up the client");
49
50    // The simplest way to get started - reads OPENAI_API_KEY from environment
51    let client = match Client::from_env() {
52        Ok(client_builder) => {
53            println!(" Client created successfully!");
54            client_builder.build()
55        }
56        Err(e) => {
57            eprintln!(" Failed to create client: {e}");
58            eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59            eprintln!("   Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60            return Err(e);
61        }
62    };
63
64    // ==========================================
65    // 2. BASIC CHAT COMPLETION
66    // ==========================================
67
68    println!("\n Step 2: Basic chat completion");
69
70    // The simplest way to get a response from ChatGPT
71    let builder = client.chat_simple("What is Rust programming language in one sentence?");
72    let response = client.send_chat(builder).await;
73
74    match response {
75        Ok(chat_response) => {
76            println!(" Got response!");
77            if let Some(content) = chat_response.content() {
78                println!(" AI: {content}");
79            }
80
81            // Show usage information for cost tracking
82            if let Some(usage) = &chat_response.inner().usage {
83                println!(
84                    " Usage: {} prompt + {} completion = {} total tokens",
85                    usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86                );
87            }
88        }
89        Err(e) => {
90            println!(" Chat completion failed: {e}");
91            // Continue with other examples even if this one fails
92        }
93    }
94
95    // ==========================================
96    // 3. CHAT WITH SYSTEM MESSAGE
97    // ==========================================
98
99    println!("\n Step 3: Chat with system context");
100
101    // System messages help set the AI's behavior and context
102    let builder = client.chat_with_system(
103        "You are a helpful coding mentor who explains things simply",
104        "Explain what a HashMap is in Rust",
105    );
106    let response = client.send_chat(builder).await;
107
108    match response {
109        Ok(chat_response) => {
110            println!(" Got contextual response!");
111            if let Some(content) = chat_response.content() {
112                println!("‍ Mentor: {content}");
113            }
114        }
115        Err(e) => {
116            println!(" Contextual chat failed: {e}");
117        }
118    }
119
120    // ==========================================
121    // 4. STREAMING RESPONSES
122    // ==========================================
123
124    println!("\n Step 4: Streaming response (real-time)");
125
126    // Streaming lets you see the response as it's being generated
127    // This is great for chatbots and interactive applications
128    print!(" AI is typing");
129    io::stdout().flush().unwrap();
130
131    let builder = client
132        .responses()
133        .user("Write a short haiku about programming")
134        .temperature(0.7)
135        .stream(true);
136    // Note: Full streaming implementation is in development
137    // For now, we'll demonstrate non-streaming responses with real-time simulation
138    let response = client.send_responses(builder).await;
139
140    match response {
141        Ok(chat_response) => {
142            print!(": ");
143            io::stdout().flush().unwrap();
144
145            // Simulate streaming by printing character by character
146            if let Some(content) = chat_response.content() {
147                for char in content.chars() {
148                    print!("{char}");
149                    io::stdout().flush().unwrap();
150                    // Small delay to simulate streaming
151                    tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152                }
153            }
154            println!(); // New line after "streaming"
155        }
156        Err(e) => {
157            println!("\n Failed to get streaming response: {e}");
158        }
159    }
160
161    // ==========================================
162    // 5. FUNCTION/TOOL CALLING
163    // ==========================================
164
165    println!("\n Step 5: Using tools/functions");
166
167    // Tools let the AI call external functions to get real data
168    // Here we define a weather function as an example
169    let weather_tool = tool_function(
170        "get_current_weather",
171        "Get the current weather for a given location",
172        json!({
173            "type": "object",
174            "properties": {
175                "location": {
176                    "type": "string",
177                    "description": "The city name, e.g. 'San Francisco, CA'"
178                },
179                "unit": {
180                    "type": "string",
181                    "enum": ["celsius", "fahrenheit"],
182                    "description": "Temperature unit"
183                }
184            },
185            "required": ["location"]
186        }),
187    );
188
189    let builder = client
190        .responses()
191        .user("What's the weather like in Tokyo?")
192        .tool(weather_tool);
193    let response = client.send_responses(builder).await;
194
195    match response {
196        Ok(chat_response) => {
197            println!(" Got response with potential tool calls!");
198
199            // Check if the AI wants to call our weather function
200            let tool_calls = chat_response.tool_calls();
201            if !tool_calls.is_empty() {
202                println!(" AI requested tool calls:");
203                for tool_call in tool_calls {
204                    let function_name = tool_call.function_name();
205                    println!("   Function: {function_name}");
206                    let function_args = tool_call.function_arguments();
207                    println!("   Arguments: {function_args}");
208
209                    // In a real app, you'd execute the function here
210                    // and send the result back to the AI
211                    println!("    In a real app, you'd call your weather API here");
212                }
213            } else if let Some(content) = chat_response.content() {
214                println!(" AI: {content}");
215            }
216        }
217        Err(e) => {
218            println!(" Tool calling example failed: {e}");
219        }
220    }
221
222    // ==========================================
223    // 6. ERROR HANDLING PATTERNS
224    // ==========================================
225
226    println!("\n Step 6: Error handling patterns");
227
228    // Show how to handle different types of errors gracefully
229    let builder = client.chat_simple(""); // Empty message might cause an error
230    let bad_response = client.send_chat(builder).await;
231
232    match bad_response {
233        Ok(response) => {
234            println!(" Unexpectedly succeeded with empty message");
235            if let Some(content) = response.content() {
236                println!(" AI: {content}");
237            }
238        }
239        Err(Error::Api {
240            status, message, ..
241        }) => {
242            println!(" API Error (HTTP {status}):");
243            println!("   Message: {message}");
244            println!(" This is normal - we sent an invalid request");
245        }
246        Err(Error::RateLimit { .. }) => {
247            println!(" Rate limited - you're sending requests too fast");
248            println!(" In a real app, you'd implement exponential backoff");
249        }
250        Err(Error::Http(_)) => {
251            println!(" HTTP/Network error");
252            println!(" Check your internet connection and API key");
253        }
254        Err(e) => {
255            println!(" Other error: {e}");
256        }
257    }
258
259    // ==========================================
260    // 7. COMPLETE REAL-WORLD EXAMPLE
261    // ==========================================
262
263    println!("\n Step 7: Complete real-world example");
264    println!("Building a simple AI assistant that can:");
265    println!("- Answer questions with context");
266    println!("- Track conversation costs");
267    println!("- Handle errors gracefully");
268
269    let mut total_tokens = 0;
270
271    // Simulate a conversation with context and cost tracking
272    let questions = [
273        "What is the capital of France?",
274        "What's special about that city?",
275        "How many people live there?",
276    ];
277
278    for (i, question) in questions.iter().enumerate() {
279        println!("\n User: {question}");
280
281        let builder = client
282            .responses()
283            .system(
284                "You are a knowledgeable geography expert. Keep answers concise but informative.",
285            )
286            .user(*question)
287            .temperature(0.1); // Lower temperature for more factual responses
288        let response = client.send_responses(builder).await;
289
290        match response {
291            Ok(chat_response) => {
292                if let Some(content) = chat_response.content() {
293                    println!(" Assistant: {content}");
294                }
295
296                // Track token usage for cost monitoring
297                if let Some(usage) = chat_response.usage() {
298                    total_tokens += usage.total_tokens;
299                    println!(
300                        " This exchange: {} tokens (Running total: {})",
301                        usage.total_tokens, total_tokens
302                    );
303                }
304            }
305            Err(e) => {
306                println!(" Question {} failed: {}", i + 1, e);
307                // In a real app, you might retry or log this error
308            }
309        }
310    }
311
312    // ==========================================
313    // 8. WRAP UP & NEXT STEPS
314    // ==========================================
315
316    println!("\n Quickstart Complete!");
317    println!("======================");
318    println!("You've successfully:");
319    println!(" Created an OpenAI client");
320    println!(" Made basic chat completions");
321    println!(" Used streaming responses");
322    println!(" Implemented tool/function calling");
323    println!(" Handled errors gracefully");
324    println!(" Built a complete conversational AI");
325    println!("\n Total tokens used in examples: {total_tokens}");
326    println!(
327        " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328        f64::from(total_tokens) * 0.03 / 1000.0
329    );
330
331    println!("\n Next Steps:");
332    println!("- Check out other examples in the examples/ directory");
333    println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334    println!("- Explore advanced features like vision, audio, and assistants");
335    println!("- Build your own AI-powered applications!");
336
337    Ok(())
338}
Source

pub async fn send_responses_stream( &self, builder: ResponsesBuilder, ) -> Result<BoxedChatStream>

Send a responses request with streaming enabled.

This enables real-time streaming of responses using Server-Sent Events (SSE). The stream yields chunks as they arrive from the API.

Source§

impl<T: Default + Send + Sync> Client<T>

Source

pub fn assistants(&self) -> AssistantsClient<'_, T>

Get assistants client (placeholder).

Source

pub fn audio(&self) -> AudioClient<'_, T>

Get audio client (placeholder).

Source

pub fn embeddings(&self) -> EmbeddingsClient<'_, T>

Get embeddings client (placeholder).

Examples found in repository?
examples/langfuse.rs (line 117)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub fn images(&self) -> ImagesClient<'_, T>

Get images client (placeholder).

Source

pub fn files(&self) -> FilesClient<'_, T>

Get files client (placeholder).

Source

pub fn fine_tuning(&self) -> FineTuningClient<'_, T>

Get fine-tuning client (placeholder).

Source

pub fn batch(&self) -> BatchClient<'_, T>

Get batch client (placeholder).

Source

pub fn vector_stores(&self) -> VectorStoresClient<'_, T>

Get vector stores client (placeholder).

Source

pub fn moderations(&self) -> ModerationsClient<'_, T>

Get moderations client (placeholder).

Source

pub fn threads(&self) -> ThreadsClient<'_, T>

Get threads client (placeholder).

Source

pub fn uploads(&self) -> UploadsClient<'_, T>

Get uploads client (placeholder).

Examples found in repository?
examples/uploads.rs (line 139)
105async fn main() -> Result<(), Box<dyn std::error::Error>> {
106    println!(" OpenAI Ergonomic - Comprehensive Uploads Example\n");
107
108    // Initialize client from environment variables
109    println!(" Initializing OpenAI client...");
110    let client = match Client::from_env() {
111        Ok(c) => {
112            println!(" Client initialized successfully\n");
113            c.build()
114        }
115        Err(e) => {
116            eprintln!(" Failed to initialize client: {}", e);
117            eprintln!(" Make sure OPENAI_API_KEY is set");
118            return Ok(());
119        }
120    };
121
122    // Example 1: Create multipart upload for a large file
123    println!();
124    println!(" Example 1: Create Multipart Upload");
125    println!("\n");
126
127    // Simulate a large file
128    let filename = "large_training_dataset.jsonl";
129    let file_size_mb = 750; // 750 MB
130    let file_size_bytes = file_size_mb * 1024 * 1024;
131    let mime_type = "application/jsonl";
132
133    println!("Creating multipart upload...");
134    println!("  Filename: {}", filename);
135    println!("  Size: {} MB ({} bytes)", file_size_mb, file_size_bytes);
136    println!("  Purpose: fine-tune");
137    println!("  MIME Type: {}", mime_type);
138
139    let builder = client.uploads().builder(
140        filename,
141        UploadPurpose::FineTune,
142        file_size_bytes,
143        mime_type,
144    );
145
146    println!("\n Note: This would create a real multipart upload session.");
147    println!("   Commented out to avoid accidental API calls.\n");
148
149    // Uncomment to actually create upload:
150    // match client.uploads().create(builder).await {
151    //     Ok(upload) => {
152    //         println!(" Upload session created successfully!");
153    //         println!("  Upload ID: {}", upload.id);
154    //         println!("  Status: {}", upload.status);
155    //         println!("  Expires At: {}", upload.expires_at);
156    //     }
157    //     Err(e) => {
158    //         eprintln!(" Failed to create upload: {}", e);
159    //     }
160    // }
161
162    // Simulate upload creation for demonstration
163    let demo_upload = UploadInfo::new(
164        "upload-demo123",
165        filename,
166        file_size_bytes,
167        "fine-tune",
168        "pending",
169    );
170    println!(" Demo Upload Created:");
171    demo_upload.display();
172
173    // Example 2: Upload file parts
174    println!("\n");
175    println!(" Example 2: Upload File Parts");
176    println!("\n");
177
178    let upload_id = "upload-demo123";
179    let part_size_mb = 64; // Upload in 64 MB chunks
180    let total_parts = (file_size_mb + part_size_mb - 1) / part_size_mb; // Ceiling division
181
182    println!(
183        "Uploading {} parts ({} MB each)...\n",
184        total_parts, part_size_mb
185    );
186
187    for part_num in 1..=total_parts {
188        let progress_percent = (part_num as f64 / total_parts as f64) * 100.0;
189
190        println!(
191            " Uploading part {}/{} ({:.1}% complete)",
192            part_num, total_parts, progress_percent
193        );
194
195        // In a real implementation, you would:
196        // 1. Read the file chunk from disk
197        // 2. Upload it to the part URL provided by OpenAI
198        // 3. Track the part ID for completion
199
200        // Uncomment to actually upload parts:
201        // let part_data = read_file_chunk(filename, part_num, part_size_mb)?;
202        // match upload_part(upload_id, part_num, &part_data).await {
203        //     Ok(part_id) => {
204        //         println!("   Part {} uploaded (ID: {})", part_num, part_id);
205        //     }
206        //     Err(e) => {
207        //         eprintln!("   Failed to upload part {}: {}", part_num, e);
208        //         break;
209        //     }
210        // }
211    }
212
213    println!("\n All {} parts uploaded successfully", total_parts);
214
215    // Example 3: Complete the upload
216    println!("\n");
217    println!(" Example 3: Complete Upload");
218    println!("\n");
219
220    println!("Completing upload: {}\n", upload_id);
221
222    // Uncomment to actually complete upload:
223    // match complete_upload(upload_id, part_ids).await {
224    //     Ok(file) => {
225    //         println!(" Upload completed successfully!");
226    //         println!("  File ID: {}", file.id);
227    //         println!("  Filename: {}", file.filename);
228    //         println!("  Status: ready");
229    //         println!("  Purpose: {}", file.purpose);
230    //     }
231    //     Err(e) => {
232    //         eprintln!(" Failed to complete upload: {}", e);
233    //     }
234    // }
235
236    println!(" Demo: Would finalize the upload and create a file object");
237    println!("  File ID: file-abc123");
238    println!("  Filename: {}", filename);
239    println!("  Status: ready");
240
241    // Example 4: Upload smaller file (alternative approach)
242    println!("\n");
243    println!(" Example 4: Upload Smaller File");
244    println!("\n");
245
246    let small_filename = "training_data.jsonl";
247    let small_size_mb = 10;
248    let small_size_bytes = small_size_mb * 1024 * 1024;
249
250    println!("Creating upload for smaller file...");
251    println!("  Filename: {}", small_filename);
252    println!("  Size: {} MB", small_size_mb);
253    println!("  Purpose: assistants");
254
255    let small_builder = client.uploads().builder(
256        small_filename,
257        UploadPurpose::Assistants,
258        small_size_bytes,
259        "application/jsonl",
260    );
261
262    println!("\n Note: For files < 512 MB, consider using the regular Files API");
263    println!("   The Uploads API is optimized for large files.");
264
265    // Example 5: Error handling and retry
266    println!("\n");
267    println!(" Example 5: Error Handling & Retry");
268    println!("\n");
269
270    println!("Demonstrating retry logic for failed part uploads...\n");
271
272    let max_retries = 3;
273    let failed_part = 5;
274
275    for retry in 1..=max_retries {
276        println!(" Attempt {} to upload part {}", retry, failed_part);
277
278        // Simulate upload attempt
279        // In a real implementation:
280        // match upload_part(upload_id, failed_part, &part_data).await {
281        //     Ok(part_id) => {
282        //         println!("   Upload succeeded");
283        //         break;
284        //     }
285        //     Err(e) => {
286        //         if retry < max_retries {
287        //             println!("    Upload failed, retrying... ({})", e);
288        //             tokio::time::sleep(Duration::from_secs(2_u64.pow(retry))).await;
289        //         } else {
290        //             eprintln!("   Upload failed after {} attempts: {}", max_retries, e);
291        //         }
292        //     }
293        // }
294    }
295
296    println!("\n Tip: Implement exponential backoff for retry logic");
297
298    // Example 6: Upload progress tracking
299    println!("\n");
300    println!(" Example 6: Progress Tracking");
301    println!("\n");
302
303    struct UploadProgress {
304        total_bytes: i32,
305        uploaded_bytes: i32,
306        total_parts: i32,
307        uploaded_parts: i32,
308    }
309
310    impl UploadProgress {
311        fn percentage(&self) -> f64 {
312            (self.uploaded_bytes as f64 / self.total_bytes as f64) * 100.0
313        }
314
315        fn eta_seconds(&self, bytes_per_second: f64) -> i32 {
316            let remaining_bytes = self.total_bytes - self.uploaded_bytes;
317            (remaining_bytes as f64 / bytes_per_second) as i32
318        }
319
320        fn display(&self, bytes_per_second: f64) {
321            let progress_bar_width = 40;
322            let filled = ((self.percentage() / 100.0) * progress_bar_width as f64) as usize;
323            let empty = progress_bar_width - filled;
324
325            print!("  [");
326            print!("{}", "".repeat(filled));
327            print!("{}", "".repeat(empty));
328            print!("] ");
329
330            println!(
331                "{:.1}% ({}/{} parts, {} MB/s, ETA: {}s)",
332                self.percentage(),
333                self.uploaded_parts,
334                self.total_parts,
335                bytes_per_second / (1024.0 * 1024.0),
336                self.eta_seconds(bytes_per_second)
337            );
338        }
339    }
340
341    let progress = UploadProgress {
342        total_bytes: file_size_bytes,
343        uploaded_bytes: (file_size_bytes as f64 * 0.65) as i32,
344        total_parts,
345        uploaded_parts: (total_parts as f64 * 0.65) as i32,
346    };
347
348    println!("Current upload progress:");
349    progress.display(10.0 * 1024.0 * 1024.0); // 10 MB/s
350
351    // Summary
352    println!("\n");
353    println!(" Summary");
354    println!("\n");
355
356    println!(" Uploads API examples completed!");
357    println!("\n Key Takeaways:");
358    println!("  • Uploads API is designed for large files (>512 MB)");
359    println!("  • Files are uploaded in parts for reliability");
360    println!("  • Each part can be retried independently");
361    println!("  • Progress can be tracked during upload");
362    println!("  • Upload must be completed after all parts are uploaded");
363
364    println!("\n Best Practices:");
365    println!("  1. Use appropriate part sizes (typically 64 MB)");
366    println!("  2. Implement retry logic with exponential backoff");
367    println!("  3. Track progress and provide user feedback");
368    println!("  4. Handle upload cancellation gracefully");
369    println!("  5. Verify file integrity after upload");
370
371    println!("\n When to Use:");
372    println!("  • Large training datasets for fine-tuning");
373    println!("  • Big files for assistants (>512 MB)");
374    println!("  • Batch processing input files");
375    println!("  • Any file where reliability is critical");
376
377    println!("\n Example completed successfully!");
378
379    Ok(())
380}
Source

pub fn models(&self) -> ModelsClient<'_, T>

Get models client.

Examples found in repository?
examples/models.rs (line 76)
74async fn fetch_models_from_api(client: &Client) -> Result<()> {
75    // Example of using the ergonomic API to list models
76    let response = client.models().list().await?;
77
78    println!("Fetched {} models from API:", response.data.len());
79    for model in response.data.iter().take(10) {
80        println!("  - {} (owned by: {})", model.id, model.owned_by);
81    }
82    println!();
83
84    // Example of getting a specific model
85    if !response.data.is_empty() {
86        let model_id = &response.data[0].id;
87        let model = client.models().get(model_id).await?;
88        println!("Model details for {}:", model.id);
89        println!("  Owned by: {}", model.owned_by);
90        println!("  Created: {}", model.created);
91        println!();
92    }
93
94    Ok(())
95}
Source

pub fn completions(&self) -> CompletionsClient<'_, T>

Get completions client.

Examples found in repository?
examples/completions.rs (line 58)
56async fn basic_completion(client: &Client) -> Result<()> {
57    let builder = client
58        .completions()
59        .builder("gpt-3.5-turbo-instruct")
60        .prompt("Write a tagline for an ice cream shop")
61        .max_tokens(60);
62
63    let response = client.completions().create(builder).await?;
64
65    println!("Prompt: Write a tagline for an ice cream shop");
66    if let Some(choice) = response.choices.first() {
67        println!("Completion: {}", choice.text);
68        println!("Finish reason: {:?}", choice.finish_reason);
69    }
70
71    if let Some(usage) = response.usage {
72        println!(
73            "Tokens used: {} prompt + {} completion = {} total",
74            usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
75        );
76    }
77
78    Ok(())
79}
80
81async fn completion_with_parameters(client: &Client) -> Result<()> {
82    let builder = client
83        .completions()
84        .builder("gpt-3.5-turbo-instruct")
85        .prompt("Explain quantum computing in simple terms:")
86        .max_tokens(100)
87        .temperature(0.7)
88        .top_p(0.9)
89        .frequency_penalty(0.5)
90        .presence_penalty(0.0);
91
92    let response = client.completions().create(builder).await?;
93
94    println!("Parameters:");
95    println!("  Temperature: 0.7");
96    println!("  Top P: 0.9");
97    println!("  Frequency Penalty: 0.5");
98    println!("  Presence Penalty: 0.0");
99    println!();
100
101    if let Some(choice) = response.choices.first() {
102        println!("Completion: {}", choice.text);
103    }
104
105    Ok(())
106}
107
108async fn multiple_completions(client: &Client) -> Result<()> {
109    let builder = client
110        .completions()
111        .builder("gpt-3.5-turbo-instruct")
112        .prompt("Brainstorm three names for a pet cat:")
113        .max_tokens(50)
114        .n(3) // Generate 3 different completions
115        .temperature(0.9); // Higher temperature for more variety
116
117    let response = client.completions().create(builder).await?;
118
119    println!("Generating {} completions:", response.choices.len());
120    for (i, choice) in response.choices.iter().enumerate() {
121        println!("  {}. {}", i + 1, choice.text.trim());
122    }
123
124    Ok(())
125}
126
127async fn completion_with_stop(client: &Client) -> Result<()> {
128    let builder = client
129        .completions()
130        .builder("gpt-3.5-turbo-instruct")
131        .prompt("List three programming languages:\n1.")
132        .max_tokens(100)
133        .temperature(0.0)
134        .add_stop("\n4.") // Stop at the fourth item
135        .add_stop("\n\n"); // Also stop at double newline
136
137    let response = client.completions().create(builder).await?;
138
139    println!("Prompt: List three programming languages:");
140    if let Some(choice) = response.choices.first() {
141        println!("Completion:\n1.{}", choice.text);
142        println!("Stopped because: {:?}", choice.finish_reason);
143    }
144
145    Ok(())
146}
147
148async fn completion_with_suffix(client: &Client) -> Result<()> {
149    // Insert mode: provide text before and after the insertion point
150    let builder = client
151        .completions()
152        .builder("gpt-3.5-turbo-instruct")
153        .prompt("def hello_world():\n    print(\"Hello, ")
154        .suffix("\")\n    return True")
155        .max_tokens(10)
156        .temperature(0.0);
157
158    let response = client.completions().create(builder).await?;
159
160    println!("Insert mode example:");
161    println!("Before: def hello_world():\\n    print(\"Hello, ");
162    if let Some(choice) = response.choices.first() {
163        println!("Inserted: {}", choice.text);
164    }
165    println!("After: \")\\n    return True");
166
167    Ok(())
168}
169
170#[allow(dead_code)]
171async fn completion_with_echo(client: &Client) -> Result<()> {
172    let builder = client
173        .completions()
174        .builder("gpt-3.5-turbo-instruct")
175        .prompt("The capital of France is")
176        .max_tokens(10)
177        .echo(true) // Echo back the prompt
178        .temperature(0.0);
179
180    let response = client.completions().create(builder).await?;
181
182    println!("Echo enabled:");
183    if let Some(choice) = response.choices.first() {
184        println!("Full text (prompt + completion): {}", choice.text);
185    }
186
187    Ok(())
188}
Source

pub fn usage(&self) -> UsageClient<'_, T>

Get usage client.

Examples found in repository?
examples/usage.rs (line 75)
72async fn basic_usage_query(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
73    let builder = UsageBuilder::new(start_time, Some(end_time));
74
75    let usage = client.usage().completions(builder).await?;
76
77    println!("Completions usage:");
78    println!("  Data points: {}", usage.data.len());
79
80    if usage.has_more {
81        println!("  Has more: yes");
82    }
83
84    Ok(())
85}
86
87async fn usage_with_aggregation(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
88    let builder = UsageBuilder::new(start_time, Some(end_time))
89        .bucket_width(BucketWidth::Day)
90        .limit(10);
91
92    let usage = client.usage().completions(builder).await?;
93
94    println!("Daily aggregated completions usage:");
95    println!("  Bucket width: 1 day");
96    println!("  Data points: {}", usage.data.len());
97
98    Ok(())
99}
100
101async fn usage_by_model(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
102    let builder = UsageBuilder::new(start_time, Some(end_time))
103        .model("gpt-4")
104        .limit(100);
105
106    let usage = client.usage().completions(builder).await?;
107
108    println!("Completions usage for gpt-4:");
109    println!("  Data points: {}", usage.data.len());
110
111    Ok(())
112}
113
114async fn usage_grouped_by_project(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
115    let builder = UsageBuilder::new(start_time, Some(end_time))
116        .group_by(GroupBy::ProjectId)
117        .group_by(GroupBy::Model)
118        .limit(50);
119
120    let usage = client.usage().completions(builder).await?;
121
122    println!("Completions usage grouped by project and model:");
123    println!("  Data points: {}", usage.data.len());
124
125    Ok(())
126}
127
128async fn cost_data(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
129    let builder = UsageBuilder::new(start_time, Some(end_time))
130        .bucket_width(BucketWidth::Day)
131        .limit(10);
132
133    let costs = client.usage().costs(builder).await?;
134
135    println!("Cost data:");
136    println!("  Data points: {}", costs.data.len());
137
138    Ok(())
139}
140
141async fn audio_usage(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
142    let builder = UsageBuilder::new(start_time, Some(end_time)).limit(10);
143
144    // Audio speeches (text-to-speech)
145    let speeches = client.usage().audio_speeches(builder.clone()).await?;
146    println!("Audio speeches usage: {} data points", speeches.data.len());
147
148    // Audio transcriptions
149    let transcriptions = client.usage().audio_transcriptions(builder).await?;
150    println!(
151        "Audio transcriptions usage: {} data points",
152        transcriptions.data.len()
153    );
154
155    Ok(())
156}
157
158async fn image_usage(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
159    let builder = UsageBuilder::new(start_time, Some(end_time))
160        .bucket_width(BucketWidth::Day)
161        .limit(10);
162
163    let usage = client.usage().images(builder).await?;
164
165    println!("Image generation usage:");
166    println!("  Data points: {}", usage.data.len());
167
168    Ok(())
169}
170
171async fn embeddings_usage(client: &Client, start_time: i32, end_time: i32) -> Result<()> {
172    let builder = UsageBuilder::new(start_time, Some(end_time))
173        .model("text-embedding-3-small")
174        .limit(100);
175
176    let usage = client.usage().embeddings(builder).await?;
177
178    println!("Embeddings usage for text-embedding-3-small:");
179    println!("  Data points: {}", usage.data.len());
180
181    Ok(())
182}

Trait Implementations§

Source§

impl<T: Clone> Clone for Client<T>

Source§

fn clone(&self) -> Client<T>

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<T> Debug for Client<T>

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl<T> Freeze for Client<T>

§

impl<T = ()> !RefUnwindSafe for Client<T>

§

impl<T> Send for Client<T>

§

impl<T> Sync for Client<T>

§

impl<T> Unpin for Client<T>

§

impl<T = ()> !UnwindSafe for Client<T>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> FutureExt for T

Source§

fn with_context(self, otel_cx: Context) -> WithContext<Self>

Attaches the provided Context to this type, returning a WithContext wrapper. Read more
Source§

fn with_current_context(self) -> WithContext<Self>

Attaches the current Context to this type, returning a WithContext wrapper. Read more
Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,