ClientBuilder

Struct ClientBuilder 

Source
pub struct ClientBuilder<T = ()> { /* private fields */ }
Expand description

Builder for creating a Client with interceptors.

The builder pattern allows you to configure interceptors before the client is created. Once built, the interceptors are immutable, eliminating the need for runtime locking.

§Example

let client = Client::from_env()?
    .with_interceptor(Box::new(my_interceptor))
    .build();

Implementations§

Source§

impl ClientBuilder

Source

pub fn new(config: Config) -> Result<Self>

Create a new client builder with the given configuration.

Source

pub fn from_env() -> Result<Self>

Create a new client builder with default configuration from environment variables.

Source§

impl<T> ClientBuilder<T>

Source

pub fn with_interceptor<U>( self, interceptor: Box<dyn Interceptor<U>>, ) -> ClientBuilder<U>

Add an interceptor to the builder.

Creates a new builder with the interceptor’s state type. The interceptor provides hooks into the request/response lifecycle for observability, logging, and custom processing.

Note: This method transforms the builder’s type, so it can only be called once. For multiple interceptors with the same state type, use a composite interceptor or call this method multiple times (each will replace the previous chain).

§Examples

Simple interceptor (no state):

use openai_ergonomic::{Client, Interceptor, BeforeRequestContext};

struct LoggingInterceptor;

#[async_trait::async_trait]
impl Interceptor for LoggingInterceptor {
    async fn before_request(&self, ctx: &mut BeforeRequestContext<'_>) -> Result<()> {
        println!("Calling {}", ctx.operation);
        Ok(())
    }
}

let client = Client::from_env()?
    .with_interceptor(Box::new(LoggingInterceptor))
    .build();

Interceptor with custom state:

use openai_ergonomic::{Client, LangfuseInterceptor, LangfuseState};

let interceptor = LangfuseInterceptor::new(tracer, config);
let client: Client<LangfuseState<_>> = Client::from_env()?
    .with_interceptor(Box::new(interceptor))
    .build();
Examples found in repository?
examples/langfuse_simple.rs (line 54)
26async fn main() -> Result<(), Box<dyn std::error::Error>> {
27    // Initialize tracing for logging
28    tracing_subscriber::fmt()
29        .with_env_filter(
30            tracing_subscriber::EnvFilter::from_default_env()
31                .add_directive("openai_ergonomic=debug".parse()?),
32        )
33        .init();
34
35    println!(" Initializing OpenAI client with Langfuse observability...\n");
36
37    // 1. Build Langfuse exporter from environment variables
38    let exporter = ExporterBuilder::from_env()?.build()?;
39
40    // 2. Create tracer provider with batch processor
41    let provider = SdkTracerProvider::builder()
42        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
43        .build();
44
45    // Set as global provider
46    global::set_tracer_provider(provider.clone());
47
48    // 3. Get tracer and create interceptor
49    let tracer = provider.tracer("openai-ergonomic");
50    let langfuse_interceptor = LangfuseInterceptor::new(tracer, LangfuseConfig::new());
51
52    // 4. Create the OpenAI client and add the Langfuse interceptor
53    let client = Client::from_env()?
54        .with_interceptor(Box::new(langfuse_interceptor))
55        .build();
56
57    println!(" Client initialized successfully!");
58    println!(" Traces will be sent to Langfuse for monitoring\n");
59
60    // Make a simple chat completion - tracing is automatic!
61    println!(" Making a simple chat completion request...");
62    let request = client
63        .chat_simple("What is 2 + 2? Answer with just the number.")
64        .build()?;
65    let response = client.execute_chat(request).await?;
66
67    println!(" Response: {:?}", response.content());
68
69    println!("\n Done! Check your Langfuse dashboard to see the traces.");
70    println!("   - Look for traces with the operation name 'chat'");
71    println!("   - Each trace includes request/response details and token usage");
72
73    // Shutdown the tracer provider to flush all spans
74    println!("\n⏳ Flushing spans to Langfuse...");
75    provider.shutdown()?;
76
77    Ok(())
78}
More examples
Hide additional examples
examples/langfuse_streaming.rs (line 59)
31async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    println!("🚀 Initializing OpenAI client with Langfuse streaming observability...\n");
41
42    // 1. Build Langfuse exporter from environment variables
43    let exporter = ExporterBuilder::from_env()?.build()?;
44
45    // 2. Create tracer provider with batch processor
46    let provider = SdkTracerProvider::builder()
47        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
48        .build();
49
50    // Set as global provider
51    global::set_tracer_provider(provider.clone());
52
53    // 3. Get tracer and create interceptor
54    let tracer = provider.tracer("openai-ergonomic");
55    let langfuse_interceptor = LangfuseInterceptor::new(tracer, LangfuseConfig::new());
56
57    // 4. Create the OpenAI client and add the Langfuse interceptor
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor))
60        .build();
61
62    println!("✅ Client initialized successfully!");
63    println!("📊 Streaming traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Basic streaming with tracing
66    println!("=== Example 1: Basic Streaming ===");
67    basic_streaming(&client).await?;
68
69    // Example 2: Streaming with parameters
70    println!("\n=== Example 2: Streaming with Parameters ===");
71    streaming_with_parameters(&client).await?;
72
73    // Example 3: Collect full content
74    println!("\n=== Example 3: Collect Full Content ===");
75    collect_content(&client).await?;
76
77    println!("\n✅ Done! Check your Langfuse dashboard to see the streaming traces.");
78    println!("   - Look for traces with operation names 'chat' or 'responses'");
79    println!("   - Each trace includes:");
80    println!("     • before_request: Initial request details");
81    println!("     • on_stream_chunk: Each chunk as it arrives (real-time)");
82    println!("     • on_stream_end: Final token usage and duration");
83
84    // Give spawned interceptor tasks time to complete
85    tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
86
87    // Shutdown the tracer provider to flush all spans
88    println!("\n⏳ Flushing spans to Langfuse...");
89    provider.shutdown()?;
90
91    Ok(())
92}
examples/langfuse.rs (line 59)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub fn add_interceptor(self, interceptor: Box<dyn Interceptor<T>>) -> Self

Add an interceptor that uses the same state type.

This allows chaining multiple interceptors with the same state type without type transformation.

§Example
let client = Client::from_env()?
    .add_interceptor(Box::new(logger))
    .add_interceptor(Box::new(metrics))
    .build();
Source

pub fn build(self) -> Client<T>

Build the client with the configured interceptors.

After building, the interceptors are immutable, eliminating runtime locking overhead.

Examples found in repository?
examples/testing_patterns.rs (line 144)
138    fn client(&self) -> Result<Client> {
139        let config = Config::builder()
140            .api_key("test-api-key")
141            .api_base(&self.base_url())
142            .build();
143
144        Ok(Client::builder(config)?.build())
145    }
146
147    /// Configure error simulation
148    fn configure_errors(&self, config: ErrorSimulationConfig) {
149        *self.error_config.lock().unwrap() = config;
150    }
151
152    /// Mock a chat completion response
153    async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154        let mock_response = serde_json::json!({
155            "id": "chatcmpl-123",
156            "object": "chat.completion",
157            "created": 1677652288,
158            "model": "gpt-3.5-turbo",
159            "choices": [{
160                "index": 0,
161                "message": {
162                    "role": "assistant",
163                    "content": response_text
164                },
165                "finish_reason": "stop"
166            }],
167            "usage": {
168                "prompt_tokens": 50,
169                "completion_tokens": 20,
170                "total_tokens": 70
171            }
172        });
173
174        self.server
175            .mock("POST", "/v1/chat/completions")
176            .match_body(mockito::Matcher::JsonString(
177                serde_json::json!({
178                    "model": "gpt-3.5-turbo",
179                    "messages": [{"role": "user", "content": expected_prompt}]
180                })
181                .to_string(),
182            ))
183            .with_status(200)
184            .with_header("content-type", "application/json")
185            .with_body(mock_response.to_string())
186            .create_async()
187            .await;
188    }
189
190    /// Mock a streaming chat completion response
191    async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192        let mut sse_data = String::new();
193
194        for (i, chunk) in response_chunks.iter().enumerate() {
195            let chunk_response = serde_json::json!({
196                "id": "chatcmpl-123",
197                "object": "chat.completion.chunk",
198                "created": 1677652288,
199                "model": "gpt-3.5-turbo",
200                "choices": [{
201                    "index": 0,
202                    "delta": {
203                        "content": chunk
204                    },
205                    "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206                }]
207            });
208
209            sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210        }
211
212        sse_data.push_str("data: [DONE]\n\n");
213
214        self.server
215            .mock("POST", "/v1/chat/completions")
216            .match_header("accept", "text/event-stream")
217            .with_status(200)
218            .with_header("content-type", "text/event-stream")
219            .with_body(sse_data)
220            .create_async()
221            .await;
222    }
223
224    /// Mock an error response (rate limit, server error, etc.)
225    async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226        let (status, body) = match error_type {
227            ErrorType::RateLimit => (
228                429,
229                serde_json::json!({
230                    "error": {
231                        "type": "rate_limit_exceeded",
232                        "message": "Rate limit exceeded, please try again later"
233                    }
234                })
235                .to_string(),
236            ),
237            ErrorType::ServerError => (
238                500,
239                serde_json::json!({
240                    "error": {
241                        "type": "server_error",
242                        "message": "Internal server error"
243                    }
244                })
245                .to_string(),
246            ),
247            ErrorType::InvalidRequest => (
248                400,
249                serde_json::json!({
250                    "error": {
251                        "type": "invalid_request_error",
252                        "message": "Invalid request parameters"
253                    }
254                })
255                .to_string(),
256            ),
257            ErrorType::Unauthorized => (
258                401,
259                serde_json::json!({
260                    "error": {
261                        "type": "invalid_request_error",
262                        "message": "Incorrect API key provided"
263                    }
264                })
265                .to_string(),
266            ),
267        };
268
269        self.server
270            .mock("POST", endpoint)
271            .with_status(status)
272            .with_header("content-type", "application/json")
273            .with_body(body)
274            .create_async()
275            .await;
276    }
277
278    /// Get logged requests for verification
279    fn get_request_log(&self) -> Vec<MockRequest> {
280        self.request_log.lock().unwrap().clone()
281    }
282
283    /// Clear request log
284    fn clear_request_log(&self) {
285        self.request_log.lock().unwrap().clear();
286    }
287
288    /// Verify that a specific request was made
289    fn verify_request(&self, method: &str, path: &str) -> bool {
290        let log = self.request_log.lock().unwrap();
291        log.iter()
292            .any(|req| req.method == method && req.path == path)
293    }
294}
295
296/// Types of errors to simulate in testing
297#[derive(Debug, Clone)]
298enum ErrorType {
299    RateLimit,
300    ServerError,
301    InvalidRequest,
302    Unauthorized,
303}
304
305/// Test utilities for OpenAI API testing
306struct TestUtils;
307
308impl TestUtils {
309    /// Create a test client with mock configuration
310    fn create_test_client() -> Result<Client> {
311        let config = Config::builder()
312            .api_key("test-api-key")
313            .api_base("http://localhost:1234") // Mock server URL
314            .max_retries(2)
315            .build();
316
317        Ok(Client::builder(config)?.build())
318    }
319
320    /// Assert that a response contains expected content
321    fn assert_response_content(response: &str, expected_content: &str) {
322        assert!(
323            response.contains(expected_content),
324            "Response '{}' does not contain expected content '{}'",
325            response,
326            expected_content
327        );
328    }
329
330    /// Assert token usage is within expected bounds
331    fn assert_token_usage(usage: &TokenUsage, min_tokens: i32, max_tokens: i32) {
332        assert!(
333            usage.total_tokens >= min_tokens && usage.total_tokens <= max_tokens,
334            "Token usage {} is outside expected range {}-{}",
335            usage.total_tokens,
336            min_tokens,
337            max_tokens
338        );
339    }
340
341    /// Create test data for batch testing
342    fn create_test_prompts(count: usize) -> Vec<String> {
343        (0..count)
344            .map(|i| format!("Test prompt number {}", i + 1))
345            .collect()
346    }
347
348    /// Measure execution time of an async operation
349    async fn time_async_operation<F, T, E>(operation: F) -> (std::result::Result<T, E>, Duration)
350    where
351        F: std::future::Future<Output = std::result::Result<T, E>>,
352    {
353        let start = Instant::now();
354        let result = operation.await;
355        let duration = start.elapsed();
356        (result, duration)
357    }
358
359    /// Create a mock response with custom token usage
360    fn create_mock_response_with_usage(
361        content: &str,
362        prompt_tokens: i32,
363        completion_tokens: i32,
364    ) -> String {
365        serde_json::json!({
366            "id": "chatcmpl-test",
367            "object": "chat.completion",
368            "created": 1677652288,
369            "model": "gpt-3.5-turbo",
370            "choices": [{
371                "index": 0,
372                "message": {
373                    "role": "assistant",
374                    "content": content
375                },
376                "finish_reason": "stop"
377            }],
378            "usage": {
379                "prompt_tokens": prompt_tokens,
380                "completion_tokens": completion_tokens,
381                "total_tokens": prompt_tokens + completion_tokens
382            }
383        })
384        .to_string()
385    }
386}
387
388/// Token usage information for testing
389#[derive(Debug, Clone, Serialize, Deserialize)]
390struct TokenUsage {
391    prompt_tokens: i32,
392    completion_tokens: i32,
393    total_tokens: i32,
394}
395
396/// Integration test runner for live API testing
397struct IntegrationTestRunner {
398    client: Client,
399    test_results: Vec<IntegrationTestResult>,
400}
401
402/// Result of an integration test
403#[derive(Debug, Clone)]
404struct IntegrationTestResult {
405    test_name: String,
406    success: bool,
407    duration: Duration,
408    error_message: Option<String>,
409    response_data: Option<String>,
410}
411
412impl IntegrationTestRunner {
413    /// Create a new integration test runner
414    fn new(client: Client) -> Self {
415        Self {
416            client,
417            test_results: Vec::new(),
418        }
419    }
420
421    /// Run a basic chat completion test
422    async fn test_basic_chat_completion(&mut self) -> Result<()> {
423        let test_name = "basic_chat_completion";
424        info!("Running integration test: {}", test_name);
425
426        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
427            // Note: This would use real API in integration tests
428            // self.client.chat_simple("Hello, world!").await
429
430            // For demonstration, we'll simulate a successful response
431            Ok("Hello! How can I help you today?".to_string())
432        })
433        .await;
434
435        let test_result = match result {
436            Ok(response) => {
437                info!(" Basic chat completion test passed in {:?}", duration);
438                IntegrationTestResult {
439                    test_name: test_name.to_string(),
440                    success: true,
441                    duration,
442                    error_message: None,
443                    response_data: Some(response),
444                }
445            }
446            Err(e) => {
447                error!(" Basic chat completion test failed: {}", e);
448                IntegrationTestResult {
449                    test_name: test_name.to_string(),
450                    success: false,
451                    duration,
452                    error_message: Some(e.to_string()),
453                    response_data: None,
454                }
455            }
456        };
457
458        self.test_results.push(test_result);
459        Ok(())
460    }
461
462    /// Test streaming functionality
463    async fn test_streaming_completion(&mut self) -> Result<()> {
464        let test_name = "streaming_completion";
465        info!("Running integration test: {}", test_name);
466
467        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
468            // Note: This would use real streaming API in integration tests
469            // let mut stream = self.client.chat().user("Tell me a story").stream().await?;
470            // let mut chunks = Vec::new();
471            // while let Some(chunk) = stream.next().await {
472            //     chunks.push(chunk?.content());
473            // }
474            // Ok(chunks.join(""))
475
476            // For demonstration, simulate streaming chunks
477            let chunks = vec!["Once", " upon", " a", " time..."];
478            Ok(chunks.join(""))
479        })
480        .await;
481
482        let test_result = match result {
483            Ok(response) => {
484                info!(" Streaming completion test passed in {:?}", duration);
485                IntegrationTestResult {
486                    test_name: test_name.to_string(),
487                    success: true,
488                    duration,
489                    error_message: None,
490                    response_data: Some(response),
491                }
492            }
493            Err(e) => {
494                error!(" Streaming completion test failed: {}", e);
495                IntegrationTestResult {
496                    test_name: test_name.to_string(),
497                    success: false,
498                    duration,
499                    error_message: Some(e.to_string()),
500                    response_data: None,
501                }
502            }
503        };
504
505        self.test_results.push(test_result);
506        Ok(())
507    }
508
509    /// Test error handling
510    async fn test_error_handling(&mut self) -> Result<()> {
511        let test_name = "error_handling";
512        info!("Running integration test: {}", test_name);
513
514        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
515            // Test with invalid API key to trigger authentication error
516            let bad_config = Config::builder().api_key("invalid-key").build();
517
518            let _bad_client = Client::builder(bad_config)?.build();
519
520            // This should fail with an authentication error
521            // bad_client.chat_simple("Test").await
522
523            // For demonstration, simulate an auth error
524            Err(Error::InvalidRequest("Authentication failed".to_string()))
525        })
526        .await;
527
528        let test_result = match result {
529            Ok(_) => {
530                warn!("Error handling test unexpectedly succeeded");
531                IntegrationTestResult {
532                    test_name: test_name.to_string(),
533                    success: false,
534                    duration,
535                    error_message: Some(
536                        "Expected authentication error but request succeeded".to_string(),
537                    ),
538                    response_data: None,
539                }
540            }
541            Err(e) => {
542                info!(
543                    " Error handling test passed (correctly failed) in {:?}",
544                    duration
545                );
546                IntegrationTestResult {
547                    test_name: test_name.to_string(),
548                    success: true,
549                    duration,
550                    error_message: None,
551                    response_data: Some(format!("Expected error: {}", e)),
552                }
553            }
554        };
555
556        self.test_results.push(test_result);
557        Ok(())
558    }
More examples
Hide additional examples
examples/error_handling.rs (line 59)
57async fn basic_error_handling() {
58    let client = match Client::from_env() {
59        Ok(client_builder) => client_builder.build(),
60        Err(e) => {
61            println!("Failed to create client: {}", e);
62            return;
63        }
64    };
65
66    match client.send_chat(client.chat_simple("Hello")).await {
67        Ok(response) => {
68            if let Some(content) = response.content() {
69                println!("Success: {}", content);
70            } else {
71                println!("Success: (no content)");
72            }
73        }
74        Err(e) => println!("Error: {}", e),
75    }
76}
77
78async fn pattern_matching_errors() {
79    let Ok(client_builder) = Client::from_env() else {
80        return;
81    };
82    let client = client_builder.build();
83
84    // Simulate various errors by using invalid parameters
85    let builder = client.chat().user("test");
86    let result = client.send_chat(builder).await;
87
88    match result {
89        Ok(_) => println!("Unexpected success"),
90        Err(e) => match e {
91            Error::Api { message, .. } => {
92                println!("API Error: {}", message);
93            }
94            Error::RateLimit(message) => {
95                println!("Rate limited: {}", message);
96            }
97            Error::Authentication(message) => {
98                println!("Authentication failed: {}", message);
99            }
100            Error::Http(source) => {
101                println!("Network error: {}", source);
102            }
103            Error::Json(source) => {
104                println!("Serialization error: {}", source);
105            }
106            Error::Stream(message) => {
107                println!("Stream error: {}", message);
108            }
109            Error::InvalidRequest(message) => {
110                println!("Invalid request: {}", message);
111            }
112            Error::Config(message) => {
113                println!("Configuration error: {}", message);
114            }
115            _ => {
116                println!("Other error: {}", e);
117            }
118        },
119    }
120}
121
122async fn rate_limit_handling() {
123    const MAX_RETRIES: u32 = 3;
124
125    let Ok(client_builder) = Client::from_env() else {
126        return;
127    };
128    let client = client_builder.build();
129
130    // Retry logic for rate limiting
131    let mut retries = 0;
132
133    loop {
134        match client.send_chat(client.chat_simple("Hello")).await {
135            Ok(response) => {
136                if let Some(content) = response.content() {
137                    println!("Success: {}", content);
138                } else {
139                    println!("Success: (no content)");
140                }
141                break;
142            }
143            Err(Error::RateLimit(_message)) => {
144                if retries >= MAX_RETRIES {
145                    println!("Max retries exceeded");
146                    break;
147                }
148
149                let wait_time = Duration::from_secs(1);
150                println!("Rate limited. Waiting {:?} before retry...", wait_time);
151                sleep(wait_time).await;
152                retries += 1;
153            }
154            Err(e) => {
155                println!("Other error: {}", e);
156                break;
157            }
158        }
159    }
160}
161
162async fn token_limit_handling() {
163    let Ok(client_builder) = Client::from_env() else {
164        return;
165    };
166    let client = client_builder.build();
167
168    // Generate a very long prompt that might exceed token limits
169    let long_text = "Lorem ipsum ".repeat(10000);
170
171    match client.send_chat(client.chat_simple(&long_text)).await {
172        Ok(_) => println!("Processed long text successfully"),
173        Err(Error::InvalidRequest(message)) if message.contains("token") => {
174            println!("Token limit issue: {}", message);
175
176            // Retry with truncated text
177            let truncated = &long_text[..1000];
178            println!("Retrying with truncated text...");
179
180            match client.send_chat(client.chat_simple(truncated)).await {
181                Ok(response) => {
182                    if let Some(content) = response.content() {
183                        println!("Success with truncated: {}", content);
184                    } else {
185                        println!("Success with truncated: (no content)");
186                    }
187                }
188                Err(e) => println!("Still failed: {}", e),
189            }
190        }
191        Err(e) => println!("Other error: {}", e),
192    }
193}
194
195async fn auth_error_handling() -> Result<()> {
196    // Try with invalid API key
197    let config = Config::builder().api_key("invalid-api-key").build();
198    let invalid_client = Client::builder(config)?.build();
199
200    match invalid_client
201        .send_chat(invalid_client.chat_simple("Hello"))
202        .await
203    {
204        Ok(_) => println!("Unexpected success"),
205        Err(Error::Authentication(message)) => {
206            println!("Authentication failed as expected: {}", message);
207
208            // Suggest remediation
209            println!("Suggestions:");
210            println!("1. Check your OPENAI_API_KEY environment variable");
211            println!("2. Verify API key at https://platform.openai.com/api-keys");
212            println!("3. Ensure your API key has necessary permissions");
213        }
214        Err(e) => println!("Unexpected error type: {}", e),
215    }
216
217    Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221    use openai_ergonomic::Config;
222    use reqwest_middleware::ClientBuilder;
223
224    // Create a reqwest client with very short timeout to simulate network issues
225    let reqwest_client = reqwest::Client::builder()
226        .timeout(Duration::from_secs(1))
227        .build()
228        .expect("Failed to build reqwest client");
229
230    let http_client = ClientBuilder::new(reqwest_client).build();
231
232    let config = Config::builder()
233        .api_key("test-key")
234        .http_client(http_client)
235        .build();
236
237    let client = Client::builder(config)?.build();
238
239    match client.send_chat(client.chat_simple("Hello")).await {
240        Ok(_) => println!("Unexpected success"),
241        Err(Error::Http(source)) => {
242            println!("Network error as expected: {}", source);
243
244            // Implement exponential backoff
245            let mut backoff = Duration::from_millis(100);
246            for attempt in 1..=3 {
247                println!("Retry attempt {} after {:?}", attempt, backoff);
248                sleep(backoff).await;
249                backoff *= 2;
250
251                // In real scenario, retry with proper timeout
252                // match client.send_chat(client.chat_simple("Hello")).await { ... }
253            }
254        }
255        Err(e) => println!("Other error: {}", e),
256    }
257
258    Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262    let client = Client::from_env()?.build();
263
264    // Wrap errors with custom context
265    let result = client
266        .send_chat(client.chat_simple("Analyze this data"))
267        .await
268        .map_err(|e| {
269            eprintln!("Context: Failed during data analysis task");
270            eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271            eprintln!("Original error: {}", e);
272            e
273        })?;
274
275    if let Some(content) = result.content() {
276        println!("Result: {}", content);
277    } else {
278        println!("Result: (no content)");
279    }
280    Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284    let client = Client::from_env()?.build();
285
286    // Strategy 1: Fallback to simpler model
287    let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288    println!("Fallback strategy result: {}", result);
289
290    // Strategy 2: Circuit breaker pattern
291    let circuit_breaker = CircuitBreaker::new();
292    if circuit_breaker.is_open() {
293        println!("Circuit breaker is open, skipping API calls");
294        return Ok(());
295    }
296
297    match client.send_chat(client.chat_simple("Test")).await {
298        Ok(response) => {
299            circuit_breaker.record_success();
300            if let Some(content) = response.content() {
301                println!("Circuit breaker success: {}", content);
302            } else {
303                println!("Circuit breaker success: (no content)");
304            }
305        }
306        Err(e) => {
307            circuit_breaker.record_failure();
308            println!("Circuit breaker failure: {}", e);
309        }
310    }
311
312    // Strategy 3: Request hedging (parallel requests with first success wins)
313    let hedge_result = hedged_request(&client).await?;
314    println!("Hedged request result: {}", hedge_result);
315
316    Ok(())
317}
examples/chat_streaming.rs (line 22)
18async fn main() -> Result<()> {
19    println!("=== Chat Streaming Examples ===\n");
20
21    // Initialize client
22    let client = Client::from_env()?.build();
23
24    // Example 1: Basic streaming
25    println!("1. Basic Streaming:");
26    basic_streaming(&client).await?;
27
28    // Example 2: Streaming with parameters
29    println!("\n2. Streaming with Parameters:");
30    streaming_with_parameters(&client).await?;
31
32    // Example 3: Collect full content
33    println!("\n3. Collect Full Content:");
34    collect_content(&client).await?;
35
36    // Example 4: Stream with system message
37    println!("\n4. Stream with System Message:");
38    streaming_with_system(&client).await?;
39
40    // Example 5: Multiple user turns
41    println!("\n5. Multiple User Turns:");
42    multiple_turns(&client).await?;
43
44    println!("\n=== All examples completed successfully ===");
45
46    Ok(())
47}
examples/assistants_code_interpreter.rs (line 51)
44async fn main() -> Result<(), Box<dyn std::error::Error>> {
45    println!(" OpenAI Ergonomic - Code Interpreter Assistant Example\n");
46
47    // Initialize client from environment variables
48    let _client = match Client::from_env() {
49        Ok(client_builder) => {
50            println!(" Client initialized successfully");
51            client_builder.build()
52        }
53        Err(e) => {
54            eprintln!(" Failed to initialize client: {e}");
55            eprintln!(" Make sure OPENAI_API_KEY is set in your environment");
56            return Err(e.into());
57        }
58    };
59
60    // Demonstrate different code interpreter use cases
61    run_data_analysis_example()?;
62    run_mathematical_computation_example()?;
63    run_visualization_example()?;
64    run_file_processing_example()?;
65
66    println!("\n Code Interpreter examples completed successfully!");
67    Ok(())
68}
examples/assistants_file_search.rs (line 61)
54async fn main() -> Result<(), Box<dyn std::error::Error>> {
55    println!(" OpenAI Ergonomic - File Search Assistant Example (RAG)\n");
56
57    // Initialize client from environment variables
58    let _client = match Client::from_env() {
59        Ok(client_builder) => {
60            println!(" Client initialized successfully");
61            client_builder.build()
62        }
63        Err(e) => {
64            eprintln!(" Failed to initialize client: {e}");
65            eprintln!(" Make sure OPENAI_API_KEY is set in your environment");
66            return Err(e.into());
67        }
68    };
69
70    // Demonstrate different RAG use cases
71    run_knowledge_base_example()?;
72    run_document_qa_example()?;
73    run_research_assistant_example()?;
74    run_citation_example()?;
75    run_multi_document_analysis_example()?;
76
77    println!("\n File Search RAG examples completed successfully!");
78    Ok(())
79}
examples/vector_stores.rs (line 60)
53async fn main() -> Result<(), Box<dyn std::error::Error>> {
54    println!(" OpenAI Ergonomic - Vector Stores Example\n");
55
56    // Initialize client from environment variables
57    let _client = match Client::from_env() {
58        Ok(client_builder) => {
59            println!(" Client initialized successfully");
60            client_builder.build()
61        }
62        Err(e) => {
63            eprintln!(" Failed to initialize client: {e}");
64            eprintln!(" Make sure OPENAI_API_KEY is set in your environment");
65            return Err(e.into());
66        }
67    };
68
69    // Demonstrate different vector store use cases
70    run_basic_vector_store_example()?;
71    run_document_management_example()?;
72    run_semantic_search_example()?;
73    run_enterprise_knowledge_base_example()?;
74    run_vector_store_lifecycle_example()?;
75    run_advanced_search_patterns_example()?;
76
77    println!("\n Vector Stores examples completed successfully!");
78    Ok(())
79}

Auto Trait Implementations§

§

impl<T> Freeze for ClientBuilder<T>

§

impl<T = ()> !RefUnwindSafe for ClientBuilder<T>

§

impl<T> Send for ClientBuilder<T>

§

impl<T> Sync for ClientBuilder<T>

§

impl<T> Unpin for ClientBuilder<T>

§

impl<T = ()> !UnwindSafe for ClientBuilder<T>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> FutureExt for T

Source§

fn with_context(self, otel_cx: Context) -> WithContext<Self>

Attaches the provided Context to this type, returning a WithContext wrapper. Read more
Source§

fn with_current_context(self) -> WithContext<Self>

Attaches the current Context to this type, returning a WithContext wrapper. Read more
Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,