pub struct ConfigBuilder { /* private fields */ }Expand description
Builder for creating OpenAI client configuration.
Implementations§
Source§impl ConfigBuilder
impl ConfigBuilder
Sourcepub fn api_key(self, api_key: impl Into<String>) -> Self
pub fn api_key(self, api_key: impl Into<String>) -> Self
Set the API key.
Examples found in repository?
examples/testing_patterns.rs (line 140)
138 fn client(&self) -> Result<Client> {
139 let config = Config::builder()
140 .api_key("test-api-key")
141 .api_base(&self.base_url())
142 .build();
143
144 Ok(Client::builder(config)?.build())
145 }
146
147 /// Configure error simulation
148 fn configure_errors(&self, config: ErrorSimulationConfig) {
149 *self.error_config.lock().unwrap() = config;
150 }
151
152 /// Mock a chat completion response
153 async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154 let mock_response = serde_json::json!({
155 "id": "chatcmpl-123",
156 "object": "chat.completion",
157 "created": 1677652288,
158 "model": "gpt-3.5-turbo",
159 "choices": [{
160 "index": 0,
161 "message": {
162 "role": "assistant",
163 "content": response_text
164 },
165 "finish_reason": "stop"
166 }],
167 "usage": {
168 "prompt_tokens": 50,
169 "completion_tokens": 20,
170 "total_tokens": 70
171 }
172 });
173
174 self.server
175 .mock("POST", "/v1/chat/completions")
176 .match_body(mockito::Matcher::JsonString(
177 serde_json::json!({
178 "model": "gpt-3.5-turbo",
179 "messages": [{"role": "user", "content": expected_prompt}]
180 })
181 .to_string(),
182 ))
183 .with_status(200)
184 .with_header("content-type", "application/json")
185 .with_body(mock_response.to_string())
186 .create_async()
187 .await;
188 }
189
190 /// Mock a streaming chat completion response
191 async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192 let mut sse_data = String::new();
193
194 for (i, chunk) in response_chunks.iter().enumerate() {
195 let chunk_response = serde_json::json!({
196 "id": "chatcmpl-123",
197 "object": "chat.completion.chunk",
198 "created": 1677652288,
199 "model": "gpt-3.5-turbo",
200 "choices": [{
201 "index": 0,
202 "delta": {
203 "content": chunk
204 },
205 "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206 }]
207 });
208
209 sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210 }
211
212 sse_data.push_str("data: [DONE]\n\n");
213
214 self.server
215 .mock("POST", "/v1/chat/completions")
216 .match_header("accept", "text/event-stream")
217 .with_status(200)
218 .with_header("content-type", "text/event-stream")
219 .with_body(sse_data)
220 .create_async()
221 .await;
222 }
223
224 /// Mock an error response (rate limit, server error, etc.)
225 async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226 let (status, body) = match error_type {
227 ErrorType::RateLimit => (
228 429,
229 serde_json::json!({
230 "error": {
231 "type": "rate_limit_exceeded",
232 "message": "Rate limit exceeded, please try again later"
233 }
234 })
235 .to_string(),
236 ),
237 ErrorType::ServerError => (
238 500,
239 serde_json::json!({
240 "error": {
241 "type": "server_error",
242 "message": "Internal server error"
243 }
244 })
245 .to_string(),
246 ),
247 ErrorType::InvalidRequest => (
248 400,
249 serde_json::json!({
250 "error": {
251 "type": "invalid_request_error",
252 "message": "Invalid request parameters"
253 }
254 })
255 .to_string(),
256 ),
257 ErrorType::Unauthorized => (
258 401,
259 serde_json::json!({
260 "error": {
261 "type": "invalid_request_error",
262 "message": "Incorrect API key provided"
263 }
264 })
265 .to_string(),
266 ),
267 };
268
269 self.server
270 .mock("POST", endpoint)
271 .with_status(status)
272 .with_header("content-type", "application/json")
273 .with_body(body)
274 .create_async()
275 .await;
276 }
277
278 /// Get logged requests for verification
279 fn get_request_log(&self) -> Vec<MockRequest> {
280 self.request_log.lock().unwrap().clone()
281 }
282
283 /// Clear request log
284 fn clear_request_log(&self) {
285 self.request_log.lock().unwrap().clear();
286 }
287
288 /// Verify that a specific request was made
289 fn verify_request(&self, method: &str, path: &str) -> bool {
290 let log = self.request_log.lock().unwrap();
291 log.iter()
292 .any(|req| req.method == method && req.path == path)
293 }
294}
295
296/// Types of errors to simulate in testing
297#[derive(Debug, Clone)]
298enum ErrorType {
299 RateLimit,
300 ServerError,
301 InvalidRequest,
302 Unauthorized,
303}
304
305/// Test utilities for OpenAI API testing
306struct TestUtils;
307
308impl TestUtils {
309 /// Create a test client with mock configuration
310 fn create_test_client() -> Result<Client> {
311 let config = Config::builder()
312 .api_key("test-api-key")
313 .api_base("http://localhost:1234") // Mock server URL
314 .max_retries(2)
315 .build();
316
317 Ok(Client::builder(config)?.build())
318 }
319
320 /// Assert that a response contains expected content
321 fn assert_response_content(response: &str, expected_content: &str) {
322 assert!(
323 response.contains(expected_content),
324 "Response '{}' does not contain expected content '{}'",
325 response,
326 expected_content
327 );
328 }
329
330 /// Assert token usage is within expected bounds
331 fn assert_token_usage(usage: &TokenUsage, min_tokens: i32, max_tokens: i32) {
332 assert!(
333 usage.total_tokens >= min_tokens && usage.total_tokens <= max_tokens,
334 "Token usage {} is outside expected range {}-{}",
335 usage.total_tokens,
336 min_tokens,
337 max_tokens
338 );
339 }
340
341 /// Create test data for batch testing
342 fn create_test_prompts(count: usize) -> Vec<String> {
343 (0..count)
344 .map(|i| format!("Test prompt number {}", i + 1))
345 .collect()
346 }
347
348 /// Measure execution time of an async operation
349 async fn time_async_operation<F, T, E>(operation: F) -> (std::result::Result<T, E>, Duration)
350 where
351 F: std::future::Future<Output = std::result::Result<T, E>>,
352 {
353 let start = Instant::now();
354 let result = operation.await;
355 let duration = start.elapsed();
356 (result, duration)
357 }
358
359 /// Create a mock response with custom token usage
360 fn create_mock_response_with_usage(
361 content: &str,
362 prompt_tokens: i32,
363 completion_tokens: i32,
364 ) -> String {
365 serde_json::json!({
366 "id": "chatcmpl-test",
367 "object": "chat.completion",
368 "created": 1677652288,
369 "model": "gpt-3.5-turbo",
370 "choices": [{
371 "index": 0,
372 "message": {
373 "role": "assistant",
374 "content": content
375 },
376 "finish_reason": "stop"
377 }],
378 "usage": {
379 "prompt_tokens": prompt_tokens,
380 "completion_tokens": completion_tokens,
381 "total_tokens": prompt_tokens + completion_tokens
382 }
383 })
384 .to_string()
385 }
386}
387
388/// Token usage information for testing
389#[derive(Debug, Clone, Serialize, Deserialize)]
390struct TokenUsage {
391 prompt_tokens: i32,
392 completion_tokens: i32,
393 total_tokens: i32,
394}
395
396/// Integration test runner for live API testing
397struct IntegrationTestRunner {
398 client: Client,
399 test_results: Vec<IntegrationTestResult>,
400}
401
402/// Result of an integration test
403#[derive(Debug, Clone)]
404struct IntegrationTestResult {
405 test_name: String,
406 success: bool,
407 duration: Duration,
408 error_message: Option<String>,
409 response_data: Option<String>,
410}
411
412impl IntegrationTestRunner {
413 /// Create a new integration test runner
414 fn new(client: Client) -> Self {
415 Self {
416 client,
417 test_results: Vec::new(),
418 }
419 }
420
421 /// Run a basic chat completion test
422 async fn test_basic_chat_completion(&mut self) -> Result<()> {
423 let test_name = "basic_chat_completion";
424 info!("Running integration test: {}", test_name);
425
426 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
427 // Note: This would use real API in integration tests
428 // self.client.chat_simple("Hello, world!").await
429
430 // For demonstration, we'll simulate a successful response
431 Ok("Hello! How can I help you today?".to_string())
432 })
433 .await;
434
435 let test_result = match result {
436 Ok(response) => {
437 info!(" Basic chat completion test passed in {:?}", duration);
438 IntegrationTestResult {
439 test_name: test_name.to_string(),
440 success: true,
441 duration,
442 error_message: None,
443 response_data: Some(response),
444 }
445 }
446 Err(e) => {
447 error!(" Basic chat completion test failed: {}", e);
448 IntegrationTestResult {
449 test_name: test_name.to_string(),
450 success: false,
451 duration,
452 error_message: Some(e.to_string()),
453 response_data: None,
454 }
455 }
456 };
457
458 self.test_results.push(test_result);
459 Ok(())
460 }
461
462 /// Test streaming functionality
463 async fn test_streaming_completion(&mut self) -> Result<()> {
464 let test_name = "streaming_completion";
465 info!("Running integration test: {}", test_name);
466
467 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
468 // Note: This would use real streaming API in integration tests
469 // let mut stream = self.client.chat().user("Tell me a story").stream().await?;
470 // let mut chunks = Vec::new();
471 // while let Some(chunk) = stream.next().await {
472 // chunks.push(chunk?.content());
473 // }
474 // Ok(chunks.join(""))
475
476 // For demonstration, simulate streaming chunks
477 let chunks = vec!["Once", " upon", " a", " time..."];
478 Ok(chunks.join(""))
479 })
480 .await;
481
482 let test_result = match result {
483 Ok(response) => {
484 info!(" Streaming completion test passed in {:?}", duration);
485 IntegrationTestResult {
486 test_name: test_name.to_string(),
487 success: true,
488 duration,
489 error_message: None,
490 response_data: Some(response),
491 }
492 }
493 Err(e) => {
494 error!(" Streaming completion test failed: {}", e);
495 IntegrationTestResult {
496 test_name: test_name.to_string(),
497 success: false,
498 duration,
499 error_message: Some(e.to_string()),
500 response_data: None,
501 }
502 }
503 };
504
505 self.test_results.push(test_result);
506 Ok(())
507 }
508
509 /// Test error handling
510 async fn test_error_handling(&mut self) -> Result<()> {
511 let test_name = "error_handling";
512 info!("Running integration test: {}", test_name);
513
514 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
515 // Test with invalid API key to trigger authentication error
516 let bad_config = Config::builder().api_key("invalid-key").build();
517
518 let _bad_client = Client::builder(bad_config)?.build();
519
520 // This should fail with an authentication error
521 // bad_client.chat_simple("Test").await
522
523 // For demonstration, simulate an auth error
524 Err(Error::InvalidRequest("Authentication failed".to_string()))
525 })
526 .await;
527
528 let test_result = match result {
529 Ok(_) => {
530 warn!("Error handling test unexpectedly succeeded");
531 IntegrationTestResult {
532 test_name: test_name.to_string(),
533 success: false,
534 duration,
535 error_message: Some(
536 "Expected authentication error but request succeeded".to_string(),
537 ),
538 response_data: None,
539 }
540 }
541 Err(e) => {
542 info!(
543 " Error handling test passed (correctly failed) in {:?}",
544 duration
545 );
546 IntegrationTestResult {
547 test_name: test_name.to_string(),
548 success: true,
549 duration,
550 error_message: None,
551 response_data: Some(format!("Expected error: {}", e)),
552 }
553 }
554 };
555
556 self.test_results.push(test_result);
557 Ok(())
558 }More examples
examples/auth_patterns.rs (line 97)
94async fn direct_api_key() -> Result<()> {
95 // Create client with direct API key
96 let api_key = "sk-your-api-key-here"; // Replace with actual key
97 let config = Config::builder().api_key(api_key).build();
98 let client = Client::builder(config)?.build();
99
100 println!("Client created with direct API key");
101
102 // Note: This will fail with invalid key
103 match client.send_chat(client.chat_simple("Hello")).await {
104 Ok(response) => {
105 if let Some(content) = response.content() {
106 println!("Response: {}", content);
107 } else {
108 println!("Response: (no content)");
109 }
110 }
111 Err(e) => println!("Expected error with demo key: {}", e),
112 }
113
114 Ok(())
115}
116
117fn organization_config() -> Result<()> {
118 // Configure client with organization ID
119 let config = Config::builder()
120 .api_key("your-api-key")
121 .organization("org-123456789")
122 .build();
123
124 let _client = Client::builder(config)?.build();
125 println!("Client configured with organization ID");
126
127 // Organization ID is sent in headers with all requests
128 // Useful for:
129 // - Usage tracking per organization
130 // - Access control
131 // - Billing segregation
132
133 Ok(())
134}
135
136fn project_config() -> Result<()> {
137 // Configure client with project ID
138 let config = Config::builder()
139 .api_key("your-api-key")
140 .project("proj-abc123")
141 .build();
142
143 let _client = Client::builder(config)?.build();
144 println!("Client configured with project ID");
145
146 // Project ID helps with:
147 // - Fine-grained usage tracking
148 // - Project-specific rate limits
149 // - Cost allocation
150
151 Ok(())
152}
153
154fn custom_headers() -> Result<()> {
155 // Note: Custom headers are not yet supported in the current API
156 // This would typically be used for:
157 // - Request tracing
158 // - A/B testing
159 // - Custom routing
160
161 let config = Config::builder().api_key("your-api-key").build();
162
163 let _client = Client::builder(config)?.build();
164 println!("Client configured (custom headers not yet supported)");
165
166 // TODO: Add support for custom headers in the future
167 println!("Custom headers feature planned for future implementation");
168
169 Ok(())
170}
171
172fn proxy_config() -> Result<()> {
173 // Note: Proxy configuration is not yet supported in the current API
174 // This would typically be used for:
175 // - Enterprise security policies
176 // - Request monitoring
177 // - Network isolation
178
179 let config = Config::builder().api_key("your-api-key").build();
180
181 let _client = Client::builder(config)?.build();
182 println!("Client configured (proxy support not yet available)");
183
184 // TODO: Add proxy support in the future
185 println!("Proxy configuration feature planned for future implementation");
186
187 Ok(())
188}
189
190fn multiple_clients() -> Result<()> {
191 use reqwest_middleware::ClientBuilder;
192 use std::time::Duration;
193
194 // Create multiple clients for different use cases
195
196 // Production client with retries and longer timeout
197 let prod_http_client = ClientBuilder::new(
198 reqwest::Client::builder()
199 .timeout(Duration::from_secs(60))
200 .build()
201 .expect("Failed to build reqwest client"),
202 )
203 .build();
204
205 let prod_config = Config::builder()
206 .api_key("prod-api-key")
207 .organization("org-prod")
208 .http_client(prod_http_client)
209 .max_retries(5)
210 .build();
211 let prod_client = Client::builder(prod_config)?.build();
212
213 // Development client with debug logging and shorter timeout
214 let dev_http_client = ClientBuilder::new(
215 reqwest::Client::builder()
216 .timeout(Duration::from_secs(10))
217 .build()
218 .expect("Failed to build reqwest client"),
219 )
220 .build();
221
222 let dev_config = Config::builder()
223 .api_key("dev-api-key")
224 .organization("org-dev")
225 .api_base("https://api.openai-dev.com") // Custom endpoint
226 .http_client(dev_http_client)
227 .build();
228 let dev_client = Client::builder(dev_config)?.build();
229
230 // Test client with mocked responses
231 let test_config = Config::builder()
232 .api_key("test-api-key")
233 .api_base("http://localhost:8080") // Local mock server
234 .build();
235 let _test_client = Client::builder(test_config)?.build();
236
237 println!("Created multiple clients:");
238 println!("- Production client with retries");
239 println!("- Development client with custom endpoint");
240 println!("- Test client with mock server");
241
242 // Use appropriate client based on context
243 let _client = if cfg!(debug_assertions) {
244 &dev_client
245 } else {
246 &prod_client
247 };
248
249 println!(
250 "Using {} client",
251 if cfg!(debug_assertions) {
252 "dev"
253 } else {
254 "prod"
255 }
256 );
257
258 Ok(())
259}
260
261fn config_validation() -> Result<()> {
262 // Validate configuration before use
263
264 fn validate_api_key(key: &str) -> bool {
265 // OpenAI API keys typically start with "sk-"
266 key.starts_with("sk-") && key.len() > 20
267 }
268
269 fn validate_org_id(org: &str) -> bool {
270 // Organization IDs typically start with "org-"
271 org.starts_with("org-") && org.len() > 4
272 }
273
274 let api_key = "sk-test-key-123456789";
275 let org_id = "org-12345";
276
277 if !validate_api_key(api_key) {
278 println!("Warning: API key format appears invalid");
279 }
280
281 if !validate_org_id(org_id) {
282 println!("Warning: Organization ID format appears invalid");
283 }
284
285 // Build config only if validation passes
286 if validate_api_key(api_key) {
287 let config = Config::builder()
288 .api_key(api_key)
289 .organization(org_id)
290 .build();
291
292 let _client = Client::builder(config)?.build();
293 println!("Configuration validated and client created");
294 }
295
296 Ok(())
297}examples/error_handling.rs (line 197)
195async fn auth_error_handling() -> Result<()> {
196 // Try with invalid API key
197 let config = Config::builder().api_key("invalid-api-key").build();
198 let invalid_client = Client::builder(config)?.build();
199
200 match invalid_client
201 .send_chat(invalid_client.chat_simple("Hello"))
202 .await
203 {
204 Ok(_) => println!("Unexpected success"),
205 Err(Error::Authentication(message)) => {
206 println!("Authentication failed as expected: {}", message);
207
208 // Suggest remediation
209 println!("Suggestions:");
210 println!("1. Check your OPENAI_API_KEY environment variable");
211 println!("2. Verify API key at https://platform.openai.com/api-keys");
212 println!("3. Ensure your API key has necessary permissions");
213 }
214 Err(e) => println!("Unexpected error type: {}", e),
215 }
216
217 Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221 use openai_ergonomic::Config;
222 use reqwest_middleware::ClientBuilder;
223
224 // Create a reqwest client with very short timeout to simulate network issues
225 let reqwest_client = reqwest::Client::builder()
226 .timeout(Duration::from_secs(1))
227 .build()
228 .expect("Failed to build reqwest client");
229
230 let http_client = ClientBuilder::new(reqwest_client).build();
231
232 let config = Config::builder()
233 .api_key("test-key")
234 .http_client(http_client)
235 .build();
236
237 let client = Client::builder(config)?.build();
238
239 match client.send_chat(client.chat_simple("Hello")).await {
240 Ok(_) => println!("Unexpected success"),
241 Err(Error::Http(source)) => {
242 println!("Network error as expected: {}", source);
243
244 // Implement exponential backoff
245 let mut backoff = Duration::from_millis(100);
246 for attempt in 1..=3 {
247 println!("Retry attempt {} after {:?}", attempt, backoff);
248 sleep(backoff).await;
249 backoff *= 2;
250
251 // In real scenario, retry with proper timeout
252 // match client.send_chat(client.chat_simple("Hello")).await { ... }
253 }
254 }
255 Err(e) => println!("Other error: {}", e),
256 }
257
258 Ok(())
259}examples/retry_patterns.rs (line 403)
388async fn idempotency_example(_client: &Client) -> Result<()> {
389 // Generate idempotency key
390 let idempotency_key = generate_idempotency_key();
391 println!("Using idempotency key: {}", idempotency_key);
392
393 // Simulate retrying the same request
394 for attempt in 1..=3 {
395 println!("\nAttempt {} with same idempotency key", attempt);
396
397 // In a real implementation, you'd pass the idempotency key in headers
398 let mut headers = std::collections::HashMap::new();
399 headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400 println!(" Would send {} headers", headers.len());
401
402 let config = Config::builder()
403 .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404 .build();
405
406 // Note: Headers (including idempotency key) are not yet supported in current API
407
408 let client_with_idempotency = Client::builder(config)?.build();
409
410 match client_with_idempotency
411 .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412 .await
413 {
414 Ok(response) => {
415 if let Some(content) = response.content() {
416 println!("Response: {}", content);
417 } else {
418 println!("Response: (no content)");
419 }
420 // Server should return same response for same idempotency key
421 }
422 Err(e) => println!("Error: {}", e),
423 }
424
425 if attempt < 3 {
426 sleep(Duration::from_secs(1)).await;
427 }
428 }
429
430 Ok(())
431}examples/moderations.rs (line 52)
34async fn main() -> Result<()> {
35 use openai_ergonomic::Config;
36
37 println!("=== Content Moderation Example ===\n");
38
39 // Initialize client
40 let client = if let Ok(c) = Client::from_env() {
41 c.build()
42 } else {
43 println!("Note: OPENAI_API_KEY not found. Running in demo mode.");
44 println!("Set OPENAI_API_KEY to test real API calls.\n");
45 println!("To use the Moderations API:");
46 println!(" let client = Client::from_env()?.build();");
47 println!(" let builder = client.moderations().check(\"text to moderate\");");
48 println!(" let response = client.moderations().create(builder).await?;");
49 println!();
50 println!("Running demonstration examples...\n");
51 // Create a dummy client for demo purposes
52 Client::builder(Config::builder().api_key("demo-key").build())?.build()
53 };
54
55 // Example 1: Basic moderation
56 println!("1. Basic Moderation:");
57 basic_moderation(&client);
58
59 // Example 2: Category detection
60 println!("\n2. Category Detection:");
61 category_detection(&client);
62
63 // Example 3: Custom thresholds
64 println!("\n3. Custom Thresholds:");
65 custom_thresholds(&client);
66
67 // Example 4: Multi-language moderation
68 println!("\n4. Multi-language Moderation:");
69 multilingual_moderation(&client);
70
71 // Example 5: Batch moderation
72 println!("\n5. Batch Moderation:");
73 batch_moderation(&client);
74
75 // Example 6: Response filtering
76 println!("\n6. Response Filtering:");
77 response_filtering(&client).await?;
78
79 // Example 7: Policy enforcement
80 println!("\n7. Policy Enforcement:");
81 policy_enforcement(&client);
82
83 // Example 8: Moderation pipeline
84 println!("\n8. Moderation Pipeline:");
85 moderation_pipeline(&client).await?;
86
87 Ok(())
88}examples/azure_openai.rs (line 78)
37async fn main() -> Result<(), Box<dyn std::error::Error>> {
38 // Initialize logging
39 tracing_subscriber::fmt::init();
40
41 println!("Azure OpenAI Integration Example");
42 println!("=================================\n");
43
44 // Example 1: Using environment variables
45 println!("Example 1: Using environment variables");
46 match Client::from_env() {
47 Ok(client) => {
48 let client = client.build();
49 println!("Client created from environment variables");
50
51 // Make a simple chat request
52 let builder = client.chat_simple("Hello from Azure OpenAI!");
53 match client.send_chat(builder).await {
54 Ok(response) => {
55 if let Some(content) = response.content() {
56 println!("Response: {content}");
57 }
58 }
59 Err(e) => {
60 println!("Error: {e}");
61 }
62 }
63 }
64 Err(e) => {
65 println!("Could not create client from environment: {e}");
66 println!("Make sure to set AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT");
67 }
68 }
69
70 println!("\n---\n");
71
72 // Example 2: Manual configuration
73 println!("Example 2: Manual configuration");
74
75 // This example shows how to configure Azure `OpenAI` programmatically.
76 // Replace these values with your actual Azure `OpenAI` resource details.
77 let config = Config::builder()
78 .api_key("your-azure-api-key")
79 .api_base("https://my-resource.openai.azure.com")
80 .azure_deployment("gpt-4")
81 .azure_api_version("2024-02-01")
82 .build();
83
84 println!("Config: {config:?}");
85 println!("Is Azure: {}", config.is_azure());
86
87 // Note: This will fail unless you provide valid credentials above
88 // Uncomment the following to test with your actual credentials:
89 /*
90 let client = Client::builder(config)?.build();
91
92 // Simple chat completion
93 let response = client
94 .chat_simple("Tell me a short joke about Azure")
95 .await?;
96 println!("Response: {}", response);
97
98 // More advanced chat with custom parameters
99 let response = client
100 .chat()
101 .user("What are the main features of Azure OpenAI?")
102 .temperature(0.7)
103 .max_tokens(500)
104 .send()
105 .await?;
106
107 println!("\nAdvanced response:");
108 println!("{}", response.content());
109
110 // Streaming example
111 use futures::StreamExt;
112
113 println!("\nStreaming example:");
114 let mut stream = client
115 .chat()
116 .user("Count from 1 to 5")
117 .stream()
118 .await?;
119
120 while let Some(chunk) = stream.next().await {
121 print!("{}", chunk?.content());
122 }
123 println!();
124 */
125
126 println!("\n---\n");
127
128 // Example 3: Key differences between `OpenAI` and Azure `OpenAI`
129 println!("Example 3: Key differences between OpenAI and Azure OpenAI");
130 println!("\nOpenAI:");
131 println!(" - Endpoint: https://api.openai.com/v1");
132 println!(" - Authentication: Bearer token in Authorization header");
133 println!(" - Model specification: Use model names like 'gpt-4', 'gpt-3.5-turbo'");
134 println!(" - Example: client.chat().model('gpt-4').send().await?\n");
135
136 println!("Azure OpenAI:");
137 println!(" - Endpoint: https://{{{{resource-name}}}}.openai.azure.com");
138 println!(" - Authentication: api-key header");
139 println!(" - Deployment specification: Use your deployment name");
140 println!(" - API version required as query parameter");
141 println!(" - Example: Configure deployment in Config, then use client normally\n");
142
143 println!("With this library, you only need to configure the endpoint and deployment,");
144 println!("and the library handles all the differences automatically!");
145
146 Ok(())
147}Additional examples can be found in:
Sourcepub fn api_base(self, api_base: impl Into<String>) -> Self
pub fn api_base(self, api_base: impl Into<String>) -> Self
Set the API base URL.
Examples found in repository?
examples/testing_patterns.rs (line 141)
138 fn client(&self) -> Result<Client> {
139 let config = Config::builder()
140 .api_key("test-api-key")
141 .api_base(&self.base_url())
142 .build();
143
144 Ok(Client::builder(config)?.build())
145 }
146
147 /// Configure error simulation
148 fn configure_errors(&self, config: ErrorSimulationConfig) {
149 *self.error_config.lock().unwrap() = config;
150 }
151
152 /// Mock a chat completion response
153 async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154 let mock_response = serde_json::json!({
155 "id": "chatcmpl-123",
156 "object": "chat.completion",
157 "created": 1677652288,
158 "model": "gpt-3.5-turbo",
159 "choices": [{
160 "index": 0,
161 "message": {
162 "role": "assistant",
163 "content": response_text
164 },
165 "finish_reason": "stop"
166 }],
167 "usage": {
168 "prompt_tokens": 50,
169 "completion_tokens": 20,
170 "total_tokens": 70
171 }
172 });
173
174 self.server
175 .mock("POST", "/v1/chat/completions")
176 .match_body(mockito::Matcher::JsonString(
177 serde_json::json!({
178 "model": "gpt-3.5-turbo",
179 "messages": [{"role": "user", "content": expected_prompt}]
180 })
181 .to_string(),
182 ))
183 .with_status(200)
184 .with_header("content-type", "application/json")
185 .with_body(mock_response.to_string())
186 .create_async()
187 .await;
188 }
189
190 /// Mock a streaming chat completion response
191 async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192 let mut sse_data = String::new();
193
194 for (i, chunk) in response_chunks.iter().enumerate() {
195 let chunk_response = serde_json::json!({
196 "id": "chatcmpl-123",
197 "object": "chat.completion.chunk",
198 "created": 1677652288,
199 "model": "gpt-3.5-turbo",
200 "choices": [{
201 "index": 0,
202 "delta": {
203 "content": chunk
204 },
205 "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206 }]
207 });
208
209 sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210 }
211
212 sse_data.push_str("data: [DONE]\n\n");
213
214 self.server
215 .mock("POST", "/v1/chat/completions")
216 .match_header("accept", "text/event-stream")
217 .with_status(200)
218 .with_header("content-type", "text/event-stream")
219 .with_body(sse_data)
220 .create_async()
221 .await;
222 }
223
224 /// Mock an error response (rate limit, server error, etc.)
225 async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226 let (status, body) = match error_type {
227 ErrorType::RateLimit => (
228 429,
229 serde_json::json!({
230 "error": {
231 "type": "rate_limit_exceeded",
232 "message": "Rate limit exceeded, please try again later"
233 }
234 })
235 .to_string(),
236 ),
237 ErrorType::ServerError => (
238 500,
239 serde_json::json!({
240 "error": {
241 "type": "server_error",
242 "message": "Internal server error"
243 }
244 })
245 .to_string(),
246 ),
247 ErrorType::InvalidRequest => (
248 400,
249 serde_json::json!({
250 "error": {
251 "type": "invalid_request_error",
252 "message": "Invalid request parameters"
253 }
254 })
255 .to_string(),
256 ),
257 ErrorType::Unauthorized => (
258 401,
259 serde_json::json!({
260 "error": {
261 "type": "invalid_request_error",
262 "message": "Incorrect API key provided"
263 }
264 })
265 .to_string(),
266 ),
267 };
268
269 self.server
270 .mock("POST", endpoint)
271 .with_status(status)
272 .with_header("content-type", "application/json")
273 .with_body(body)
274 .create_async()
275 .await;
276 }
277
278 /// Get logged requests for verification
279 fn get_request_log(&self) -> Vec<MockRequest> {
280 self.request_log.lock().unwrap().clone()
281 }
282
283 /// Clear request log
284 fn clear_request_log(&self) {
285 self.request_log.lock().unwrap().clear();
286 }
287
288 /// Verify that a specific request was made
289 fn verify_request(&self, method: &str, path: &str) -> bool {
290 let log = self.request_log.lock().unwrap();
291 log.iter()
292 .any(|req| req.method == method && req.path == path)
293 }
294}
295
296/// Types of errors to simulate in testing
297#[derive(Debug, Clone)]
298enum ErrorType {
299 RateLimit,
300 ServerError,
301 InvalidRequest,
302 Unauthorized,
303}
304
305/// Test utilities for OpenAI API testing
306struct TestUtils;
307
308impl TestUtils {
309 /// Create a test client with mock configuration
310 fn create_test_client() -> Result<Client> {
311 let config = Config::builder()
312 .api_key("test-api-key")
313 .api_base("http://localhost:1234") // Mock server URL
314 .max_retries(2)
315 .build();
316
317 Ok(Client::builder(config)?.build())
318 }More examples
examples/auth_patterns.rs (line 225)
190fn multiple_clients() -> Result<()> {
191 use reqwest_middleware::ClientBuilder;
192 use std::time::Duration;
193
194 // Create multiple clients for different use cases
195
196 // Production client with retries and longer timeout
197 let prod_http_client = ClientBuilder::new(
198 reqwest::Client::builder()
199 .timeout(Duration::from_secs(60))
200 .build()
201 .expect("Failed to build reqwest client"),
202 )
203 .build();
204
205 let prod_config = Config::builder()
206 .api_key("prod-api-key")
207 .organization("org-prod")
208 .http_client(prod_http_client)
209 .max_retries(5)
210 .build();
211 let prod_client = Client::builder(prod_config)?.build();
212
213 // Development client with debug logging and shorter timeout
214 let dev_http_client = ClientBuilder::new(
215 reqwest::Client::builder()
216 .timeout(Duration::from_secs(10))
217 .build()
218 .expect("Failed to build reqwest client"),
219 )
220 .build();
221
222 let dev_config = Config::builder()
223 .api_key("dev-api-key")
224 .organization("org-dev")
225 .api_base("https://api.openai-dev.com") // Custom endpoint
226 .http_client(dev_http_client)
227 .build();
228 let dev_client = Client::builder(dev_config)?.build();
229
230 // Test client with mocked responses
231 let test_config = Config::builder()
232 .api_key("test-api-key")
233 .api_base("http://localhost:8080") // Local mock server
234 .build();
235 let _test_client = Client::builder(test_config)?.build();
236
237 println!("Created multiple clients:");
238 println!("- Production client with retries");
239 println!("- Development client with custom endpoint");
240 println!("- Test client with mock server");
241
242 // Use appropriate client based on context
243 let _client = if cfg!(debug_assertions) {
244 &dev_client
245 } else {
246 &prod_client
247 };
248
249 println!(
250 "Using {} client",
251 if cfg!(debug_assertions) {
252 "dev"
253 } else {
254 "prod"
255 }
256 );
257
258 Ok(())
259}examples/azure_openai.rs (line 79)
37async fn main() -> Result<(), Box<dyn std::error::Error>> {
38 // Initialize logging
39 tracing_subscriber::fmt::init();
40
41 println!("Azure OpenAI Integration Example");
42 println!("=================================\n");
43
44 // Example 1: Using environment variables
45 println!("Example 1: Using environment variables");
46 match Client::from_env() {
47 Ok(client) => {
48 let client = client.build();
49 println!("Client created from environment variables");
50
51 // Make a simple chat request
52 let builder = client.chat_simple("Hello from Azure OpenAI!");
53 match client.send_chat(builder).await {
54 Ok(response) => {
55 if let Some(content) = response.content() {
56 println!("Response: {content}");
57 }
58 }
59 Err(e) => {
60 println!("Error: {e}");
61 }
62 }
63 }
64 Err(e) => {
65 println!("Could not create client from environment: {e}");
66 println!("Make sure to set AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT");
67 }
68 }
69
70 println!("\n---\n");
71
72 // Example 2: Manual configuration
73 println!("Example 2: Manual configuration");
74
75 // This example shows how to configure Azure `OpenAI` programmatically.
76 // Replace these values with your actual Azure `OpenAI` resource details.
77 let config = Config::builder()
78 .api_key("your-azure-api-key")
79 .api_base("https://my-resource.openai.azure.com")
80 .azure_deployment("gpt-4")
81 .azure_api_version("2024-02-01")
82 .build();
83
84 println!("Config: {config:?}");
85 println!("Is Azure: {}", config.is_azure());
86
87 // Note: This will fail unless you provide valid credentials above
88 // Uncomment the following to test with your actual credentials:
89 /*
90 let client = Client::builder(config)?.build();
91
92 // Simple chat completion
93 let response = client
94 .chat_simple("Tell me a short joke about Azure")
95 .await?;
96 println!("Response: {}", response);
97
98 // More advanced chat with custom parameters
99 let response = client
100 .chat()
101 .user("What are the main features of Azure OpenAI?")
102 .temperature(0.7)
103 .max_tokens(500)
104 .send()
105 .await?;
106
107 println!("\nAdvanced response:");
108 println!("{}", response.content());
109
110 // Streaming example
111 use futures::StreamExt;
112
113 println!("\nStreaming example:");
114 let mut stream = client
115 .chat()
116 .user("Count from 1 to 5")
117 .stream()
118 .await?;
119
120 while let Some(chunk) = stream.next().await {
121 print!("{}", chunk?.content());
122 }
123 println!();
124 */
125
126 println!("\n---\n");
127
128 // Example 3: Key differences between `OpenAI` and Azure `OpenAI`
129 println!("Example 3: Key differences between OpenAI and Azure OpenAI");
130 println!("\nOpenAI:");
131 println!(" - Endpoint: https://api.openai.com/v1");
132 println!(" - Authentication: Bearer token in Authorization header");
133 println!(" - Model specification: Use model names like 'gpt-4', 'gpt-3.5-turbo'");
134 println!(" - Example: client.chat().model('gpt-4').send().await?\n");
135
136 println!("Azure OpenAI:");
137 println!(" - Endpoint: https://{{{{resource-name}}}}.openai.azure.com");
138 println!(" - Authentication: api-key header");
139 println!(" - Deployment specification: Use your deployment name");
140 println!(" - API version required as query parameter");
141 println!(" - Example: Configure deployment in Config, then use client normally\n");
142
143 println!("With this library, you only need to configure the endpoint and deployment,");
144 println!("and the library handles all the differences automatically!");
145
146 Ok(())
147}Sourcepub fn organization(self, organization: impl Into<String>) -> Self
pub fn organization(self, organization: impl Into<String>) -> Self
Set the organization ID.
Examples found in repository?
examples/auth_patterns.rs (line 121)
117fn organization_config() -> Result<()> {
118 // Configure client with organization ID
119 let config = Config::builder()
120 .api_key("your-api-key")
121 .organization("org-123456789")
122 .build();
123
124 let _client = Client::builder(config)?.build();
125 println!("Client configured with organization ID");
126
127 // Organization ID is sent in headers with all requests
128 // Useful for:
129 // - Usage tracking per organization
130 // - Access control
131 // - Billing segregation
132
133 Ok(())
134}
135
136fn project_config() -> Result<()> {
137 // Configure client with project ID
138 let config = Config::builder()
139 .api_key("your-api-key")
140 .project("proj-abc123")
141 .build();
142
143 let _client = Client::builder(config)?.build();
144 println!("Client configured with project ID");
145
146 // Project ID helps with:
147 // - Fine-grained usage tracking
148 // - Project-specific rate limits
149 // - Cost allocation
150
151 Ok(())
152}
153
154fn custom_headers() -> Result<()> {
155 // Note: Custom headers are not yet supported in the current API
156 // This would typically be used for:
157 // - Request tracing
158 // - A/B testing
159 // - Custom routing
160
161 let config = Config::builder().api_key("your-api-key").build();
162
163 let _client = Client::builder(config)?.build();
164 println!("Client configured (custom headers not yet supported)");
165
166 // TODO: Add support for custom headers in the future
167 println!("Custom headers feature planned for future implementation");
168
169 Ok(())
170}
171
172fn proxy_config() -> Result<()> {
173 // Note: Proxy configuration is not yet supported in the current API
174 // This would typically be used for:
175 // - Enterprise security policies
176 // - Request monitoring
177 // - Network isolation
178
179 let config = Config::builder().api_key("your-api-key").build();
180
181 let _client = Client::builder(config)?.build();
182 println!("Client configured (proxy support not yet available)");
183
184 // TODO: Add proxy support in the future
185 println!("Proxy configuration feature planned for future implementation");
186
187 Ok(())
188}
189
190fn multiple_clients() -> Result<()> {
191 use reqwest_middleware::ClientBuilder;
192 use std::time::Duration;
193
194 // Create multiple clients for different use cases
195
196 // Production client with retries and longer timeout
197 let prod_http_client = ClientBuilder::new(
198 reqwest::Client::builder()
199 .timeout(Duration::from_secs(60))
200 .build()
201 .expect("Failed to build reqwest client"),
202 )
203 .build();
204
205 let prod_config = Config::builder()
206 .api_key("prod-api-key")
207 .organization("org-prod")
208 .http_client(prod_http_client)
209 .max_retries(5)
210 .build();
211 let prod_client = Client::builder(prod_config)?.build();
212
213 // Development client with debug logging and shorter timeout
214 let dev_http_client = ClientBuilder::new(
215 reqwest::Client::builder()
216 .timeout(Duration::from_secs(10))
217 .build()
218 .expect("Failed to build reqwest client"),
219 )
220 .build();
221
222 let dev_config = Config::builder()
223 .api_key("dev-api-key")
224 .organization("org-dev")
225 .api_base("https://api.openai-dev.com") // Custom endpoint
226 .http_client(dev_http_client)
227 .build();
228 let dev_client = Client::builder(dev_config)?.build();
229
230 // Test client with mocked responses
231 let test_config = Config::builder()
232 .api_key("test-api-key")
233 .api_base("http://localhost:8080") // Local mock server
234 .build();
235 let _test_client = Client::builder(test_config)?.build();
236
237 println!("Created multiple clients:");
238 println!("- Production client with retries");
239 println!("- Development client with custom endpoint");
240 println!("- Test client with mock server");
241
242 // Use appropriate client based on context
243 let _client = if cfg!(debug_assertions) {
244 &dev_client
245 } else {
246 &prod_client
247 };
248
249 println!(
250 "Using {} client",
251 if cfg!(debug_assertions) {
252 "dev"
253 } else {
254 "prod"
255 }
256 );
257
258 Ok(())
259}
260
261fn config_validation() -> Result<()> {
262 // Validate configuration before use
263
264 fn validate_api_key(key: &str) -> bool {
265 // OpenAI API keys typically start with "sk-"
266 key.starts_with("sk-") && key.len() > 20
267 }
268
269 fn validate_org_id(org: &str) -> bool {
270 // Organization IDs typically start with "org-"
271 org.starts_with("org-") && org.len() > 4
272 }
273
274 let api_key = "sk-test-key-123456789";
275 let org_id = "org-12345";
276
277 if !validate_api_key(api_key) {
278 println!("Warning: API key format appears invalid");
279 }
280
281 if !validate_org_id(org_id) {
282 println!("Warning: Organization ID format appears invalid");
283 }
284
285 // Build config only if validation passes
286 if validate_api_key(api_key) {
287 let config = Config::builder()
288 .api_key(api_key)
289 .organization(org_id)
290 .build();
291
292 let _client = Client::builder(config)?.build();
293 println!("Configuration validated and client created");
294 }
295
296 Ok(())
297}Sourcepub fn project(self, project: impl Into<String>) -> Self
pub fn project(self, project: impl Into<String>) -> Self
Set the project ID.
Examples found in repository?
examples/auth_patterns.rs (line 140)
136fn project_config() -> Result<()> {
137 // Configure client with project ID
138 let config = Config::builder()
139 .api_key("your-api-key")
140 .project("proj-abc123")
141 .build();
142
143 let _client = Client::builder(config)?.build();
144 println!("Client configured with project ID");
145
146 // Project ID helps with:
147 // - Fine-grained usage tracking
148 // - Project-specific rate limits
149 // - Cost allocation
150
151 Ok(())
152}Sourcepub fn max_retries(self, max_retries: u32) -> Self
pub fn max_retries(self, max_retries: u32) -> Self
Set the maximum number of retries.
Examples found in repository?
More examples
examples/auth_patterns.rs (line 209)
190fn multiple_clients() -> Result<()> {
191 use reqwest_middleware::ClientBuilder;
192 use std::time::Duration;
193
194 // Create multiple clients for different use cases
195
196 // Production client with retries and longer timeout
197 let prod_http_client = ClientBuilder::new(
198 reqwest::Client::builder()
199 .timeout(Duration::from_secs(60))
200 .build()
201 .expect("Failed to build reqwest client"),
202 )
203 .build();
204
205 let prod_config = Config::builder()
206 .api_key("prod-api-key")
207 .organization("org-prod")
208 .http_client(prod_http_client)
209 .max_retries(5)
210 .build();
211 let prod_client = Client::builder(prod_config)?.build();
212
213 // Development client with debug logging and shorter timeout
214 let dev_http_client = ClientBuilder::new(
215 reqwest::Client::builder()
216 .timeout(Duration::from_secs(10))
217 .build()
218 .expect("Failed to build reqwest client"),
219 )
220 .build();
221
222 let dev_config = Config::builder()
223 .api_key("dev-api-key")
224 .organization("org-dev")
225 .api_base("https://api.openai-dev.com") // Custom endpoint
226 .http_client(dev_http_client)
227 .build();
228 let dev_client = Client::builder(dev_config)?.build();
229
230 // Test client with mocked responses
231 let test_config = Config::builder()
232 .api_key("test-api-key")
233 .api_base("http://localhost:8080") // Local mock server
234 .build();
235 let _test_client = Client::builder(test_config)?.build();
236
237 println!("Created multiple clients:");
238 println!("- Production client with retries");
239 println!("- Development client with custom endpoint");
240 println!("- Test client with mock server");
241
242 // Use appropriate client based on context
243 let _client = if cfg!(debug_assertions) {
244 &dev_client
245 } else {
246 &prod_client
247 };
248
249 println!(
250 "Using {} client",
251 if cfg!(debug_assertions) {
252 "dev"
253 } else {
254 "prod"
255 }
256 );
257
258 Ok(())
259}Sourcepub fn default_model(self, default_model: impl Into<String>) -> Self
pub fn default_model(self, default_model: impl Into<String>) -> Self
Set the default model to use.
Sourcepub fn http_client(self, client: ClientWithMiddleware) -> Self
pub fn http_client(self, client: ClientWithMiddleware) -> Self
Set a custom HTTP client.
This allows you to provide a pre-configured ClientWithMiddleware with
custom settings like retry policies, connection pooling, proxies, etc.
§Example
ⓘ
use reqwest_middleware::ClientBuilder;
use reqwest_retry::{RetryTransientMiddleware, policies::ExponentialBackoff};
let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
let client = ClientBuilder::new(reqwest::Client::new())
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
.build();
let config = Config::builder()
.api_key("sk-...")
.http_client(client)
.build();Examples found in repository?
examples/error_handling.rs (line 234)
220async fn network_error_handling() -> Result<()> {
221 use openai_ergonomic::Config;
222 use reqwest_middleware::ClientBuilder;
223
224 // Create a reqwest client with very short timeout to simulate network issues
225 let reqwest_client = reqwest::Client::builder()
226 .timeout(Duration::from_secs(1))
227 .build()
228 .expect("Failed to build reqwest client");
229
230 let http_client = ClientBuilder::new(reqwest_client).build();
231
232 let config = Config::builder()
233 .api_key("test-key")
234 .http_client(http_client)
235 .build();
236
237 let client = Client::builder(config)?.build();
238
239 match client.send_chat(client.chat_simple("Hello")).await {
240 Ok(_) => println!("Unexpected success"),
241 Err(Error::Http(source)) => {
242 println!("Network error as expected: {}", source);
243
244 // Implement exponential backoff
245 let mut backoff = Duration::from_millis(100);
246 for attempt in 1..=3 {
247 println!("Retry attempt {} after {:?}", attempt, backoff);
248 sleep(backoff).await;
249 backoff *= 2;
250
251 // In real scenario, retry with proper timeout
252 // match client.send_chat(client.chat_simple("Hello")).await { ... }
253 }
254 }
255 Err(e) => println!("Other error: {}", e),
256 }
257
258 Ok(())
259}More examples
examples/auth_patterns.rs (line 208)
190fn multiple_clients() -> Result<()> {
191 use reqwest_middleware::ClientBuilder;
192 use std::time::Duration;
193
194 // Create multiple clients for different use cases
195
196 // Production client with retries and longer timeout
197 let prod_http_client = ClientBuilder::new(
198 reqwest::Client::builder()
199 .timeout(Duration::from_secs(60))
200 .build()
201 .expect("Failed to build reqwest client"),
202 )
203 .build();
204
205 let prod_config = Config::builder()
206 .api_key("prod-api-key")
207 .organization("org-prod")
208 .http_client(prod_http_client)
209 .max_retries(5)
210 .build();
211 let prod_client = Client::builder(prod_config)?.build();
212
213 // Development client with debug logging and shorter timeout
214 let dev_http_client = ClientBuilder::new(
215 reqwest::Client::builder()
216 .timeout(Duration::from_secs(10))
217 .build()
218 .expect("Failed to build reqwest client"),
219 )
220 .build();
221
222 let dev_config = Config::builder()
223 .api_key("dev-api-key")
224 .organization("org-dev")
225 .api_base("https://api.openai-dev.com") // Custom endpoint
226 .http_client(dev_http_client)
227 .build();
228 let dev_client = Client::builder(dev_config)?.build();
229
230 // Test client with mocked responses
231 let test_config = Config::builder()
232 .api_key("test-api-key")
233 .api_base("http://localhost:8080") // Local mock server
234 .build();
235 let _test_client = Client::builder(test_config)?.build();
236
237 println!("Created multiple clients:");
238 println!("- Production client with retries");
239 println!("- Development client with custom endpoint");
240 println!("- Test client with mock server");
241
242 // Use appropriate client based on context
243 let _client = if cfg!(debug_assertions) {
244 &dev_client
245 } else {
246 &prod_client
247 };
248
249 println!(
250 "Using {} client",
251 if cfg!(debug_assertions) {
252 "dev"
253 } else {
254 "prod"
255 }
256 );
257
258 Ok(())
259}examples/http_middleware_retry.rs (line 40)
19async fn main() -> Result<()> {
20 println!("=== HTTP Middleware with Retry Example ===\n");
21
22 // Example 1: Basic client with retry middleware
23 println!("1. Creating client with retry middleware");
24
25 // Create a retry policy with exponential backoff
26 // This will retry transient errors up to 3 times with exponential delays
27 let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29 // Build an HTTP client with retry middleware
30 let http_client = ClientBuilder::new(reqwest::Client::new())
31 .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32 .build();
33
34 // Create OpenAI client with custom HTTP client
35 let config = Config::builder()
36 .api_key(
37 std::env::var("OPENAI_API_KEY")
38 .expect("OPENAI_API_KEY environment variable must be set"),
39 )
40 .http_client(http_client)
41 .build();
42
43 let client = Client::builder(config)?.build();
44
45 // Use the client normally - retries are handled automatically
46 println!("Sending chat completion request (retries are automatic)...");
47
48 let builder = client.chat_simple("Hello! How are you today?");
49 match client.send_chat(builder).await {
50 Ok(response) => {
51 println!("\nSuccess! Response received:");
52 if let Some(content) = response.content() {
53 println!("{content}");
54 }
55 }
56 Err(e) => {
57 eprintln!("\nError after retries: {e}");
58 }
59 }
60
61 // Example 2: Custom retry policy with more retries and custom delays
62 println!("\n2. Creating client with custom retry policy");
63
64 let custom_retry_policy = ExponentialBackoff::builder()
65 .retry_bounds(
66 std::time::Duration::from_millis(100), // minimum delay
67 std::time::Duration::from_secs(30), // maximum delay
68 )
69 .build_with_max_retries(5); // up to 5 retries
70
71 let custom_http_client = ClientBuilder::new(
72 reqwest::Client::builder()
73 .timeout(std::time::Duration::from_secs(60))
74 .build()
75 .expect("Failed to build reqwest client"),
76 )
77 .with(RetryTransientMiddleware::new_with_policy(
78 custom_retry_policy,
79 ))
80 .build();
81
82 let custom_config = Config::builder()
83 .api_key(
84 std::env::var("OPENAI_API_KEY")
85 .expect("OPENAI_API_KEY environment variable must be set"),
86 )
87 .http_client(custom_http_client)
88 .build();
89
90 let custom_client = Client::builder(custom_config)?.build();
91
92 println!("Sending request with custom retry policy (up to 5 retries)...");
93
94 let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95 match custom_client.send_chat(builder).await {
96 Ok(response) => {
97 println!("\nSuccess! Response received:");
98 if let Some(content) = response.content() {
99 println!("{content}");
100 }
101 }
102 Err(e) => {
103 eprintln!("\nError after all retries: {e}");
104 }
105 }
106
107 // Example 3: Using the builder pattern for more complex requests
108 println!("\n3. Using builder pattern with retry middleware");
109
110 let builder = custom_client
111 .responses()
112 .user("What are the three laws of robotics?")
113 .max_completion_tokens(200)
114 .temperature(0.7);
115
116 let response = custom_client.send_responses(builder).await?;
117
118 println!("\nResponse received:");
119 if let Some(content) = response.content() {
120 println!("{content}");
121 }
122
123 println!("\nToken usage:");
124 if let Some(usage) = response.usage() {
125 let prompt = usage.prompt_tokens;
126 let completion = usage.completion_tokens;
127 let total = usage.total_tokens;
128 println!(" Prompt tokens: {prompt}");
129 println!(" Completion tokens: {completion}");
130 println!(" Total tokens: {total}");
131 }
132
133 println!("\n=== Example completed successfully! ===");
134 println!("\nKey benefits of using reqwest-middleware:");
135 println!(" - Automatic retry of transient failures");
136 println!(" - Exponential backoff to avoid overwhelming servers");
137 println!(" - Composable middleware for logging, metrics, etc.");
138 println!(" - Transparent to application code - works with any request");
139
140 Ok(())
141}Sourcepub fn azure_deployment(self, deployment: impl Into<String>) -> Self
pub fn azure_deployment(self, deployment: impl Into<String>) -> Self
Set the Azure deployment name.
Required when using Azure OpenAI.
Examples found in repository?
examples/azure_openai.rs (line 80)
37async fn main() -> Result<(), Box<dyn std::error::Error>> {
38 // Initialize logging
39 tracing_subscriber::fmt::init();
40
41 println!("Azure OpenAI Integration Example");
42 println!("=================================\n");
43
44 // Example 1: Using environment variables
45 println!("Example 1: Using environment variables");
46 match Client::from_env() {
47 Ok(client) => {
48 let client = client.build();
49 println!("Client created from environment variables");
50
51 // Make a simple chat request
52 let builder = client.chat_simple("Hello from Azure OpenAI!");
53 match client.send_chat(builder).await {
54 Ok(response) => {
55 if let Some(content) = response.content() {
56 println!("Response: {content}");
57 }
58 }
59 Err(e) => {
60 println!("Error: {e}");
61 }
62 }
63 }
64 Err(e) => {
65 println!("Could not create client from environment: {e}");
66 println!("Make sure to set AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT");
67 }
68 }
69
70 println!("\n---\n");
71
72 // Example 2: Manual configuration
73 println!("Example 2: Manual configuration");
74
75 // This example shows how to configure Azure `OpenAI` programmatically.
76 // Replace these values with your actual Azure `OpenAI` resource details.
77 let config = Config::builder()
78 .api_key("your-azure-api-key")
79 .api_base("https://my-resource.openai.azure.com")
80 .azure_deployment("gpt-4")
81 .azure_api_version("2024-02-01")
82 .build();
83
84 println!("Config: {config:?}");
85 println!("Is Azure: {}", config.is_azure());
86
87 // Note: This will fail unless you provide valid credentials above
88 // Uncomment the following to test with your actual credentials:
89 /*
90 let client = Client::builder(config)?.build();
91
92 // Simple chat completion
93 let response = client
94 .chat_simple("Tell me a short joke about Azure")
95 .await?;
96 println!("Response: {}", response);
97
98 // More advanced chat with custom parameters
99 let response = client
100 .chat()
101 .user("What are the main features of Azure OpenAI?")
102 .temperature(0.7)
103 .max_tokens(500)
104 .send()
105 .await?;
106
107 println!("\nAdvanced response:");
108 println!("{}", response.content());
109
110 // Streaming example
111 use futures::StreamExt;
112
113 println!("\nStreaming example:");
114 let mut stream = client
115 .chat()
116 .user("Count from 1 to 5")
117 .stream()
118 .await?;
119
120 while let Some(chunk) = stream.next().await {
121 print!("{}", chunk?.content());
122 }
123 println!();
124 */
125
126 println!("\n---\n");
127
128 // Example 3: Key differences between `OpenAI` and Azure `OpenAI`
129 println!("Example 3: Key differences between OpenAI and Azure OpenAI");
130 println!("\nOpenAI:");
131 println!(" - Endpoint: https://api.openai.com/v1");
132 println!(" - Authentication: Bearer token in Authorization header");
133 println!(" - Model specification: Use model names like 'gpt-4', 'gpt-3.5-turbo'");
134 println!(" - Example: client.chat().model('gpt-4').send().await?\n");
135
136 println!("Azure OpenAI:");
137 println!(" - Endpoint: https://{{{{resource-name}}}}.openai.azure.com");
138 println!(" - Authentication: api-key header");
139 println!(" - Deployment specification: Use your deployment name");
140 println!(" - API version required as query parameter");
141 println!(" - Example: Configure deployment in Config, then use client normally\n");
142
143 println!("With this library, you only need to configure the endpoint and deployment,");
144 println!("and the library handles all the differences automatically!");
145
146 Ok(())
147}Sourcepub fn azure_api_version(self, version: impl Into<String>) -> Self
pub fn azure_api_version(self, version: impl Into<String>) -> Self
Set the Azure API version.
Defaults to “2024-02-01” if not specified.
Examples found in repository?
examples/azure_openai.rs (line 81)
37async fn main() -> Result<(), Box<dyn std::error::Error>> {
38 // Initialize logging
39 tracing_subscriber::fmt::init();
40
41 println!("Azure OpenAI Integration Example");
42 println!("=================================\n");
43
44 // Example 1: Using environment variables
45 println!("Example 1: Using environment variables");
46 match Client::from_env() {
47 Ok(client) => {
48 let client = client.build();
49 println!("Client created from environment variables");
50
51 // Make a simple chat request
52 let builder = client.chat_simple("Hello from Azure OpenAI!");
53 match client.send_chat(builder).await {
54 Ok(response) => {
55 if let Some(content) = response.content() {
56 println!("Response: {content}");
57 }
58 }
59 Err(e) => {
60 println!("Error: {e}");
61 }
62 }
63 }
64 Err(e) => {
65 println!("Could not create client from environment: {e}");
66 println!("Make sure to set AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT");
67 }
68 }
69
70 println!("\n---\n");
71
72 // Example 2: Manual configuration
73 println!("Example 2: Manual configuration");
74
75 // This example shows how to configure Azure `OpenAI` programmatically.
76 // Replace these values with your actual Azure `OpenAI` resource details.
77 let config = Config::builder()
78 .api_key("your-azure-api-key")
79 .api_base("https://my-resource.openai.azure.com")
80 .azure_deployment("gpt-4")
81 .azure_api_version("2024-02-01")
82 .build();
83
84 println!("Config: {config:?}");
85 println!("Is Azure: {}", config.is_azure());
86
87 // Note: This will fail unless you provide valid credentials above
88 // Uncomment the following to test with your actual credentials:
89 /*
90 let client = Client::builder(config)?.build();
91
92 // Simple chat completion
93 let response = client
94 .chat_simple("Tell me a short joke about Azure")
95 .await?;
96 println!("Response: {}", response);
97
98 // More advanced chat with custom parameters
99 let response = client
100 .chat()
101 .user("What are the main features of Azure OpenAI?")
102 .temperature(0.7)
103 .max_tokens(500)
104 .send()
105 .await?;
106
107 println!("\nAdvanced response:");
108 println!("{}", response.content());
109
110 // Streaming example
111 use futures::StreamExt;
112
113 println!("\nStreaming example:");
114 let mut stream = client
115 .chat()
116 .user("Count from 1 to 5")
117 .stream()
118 .await?;
119
120 while let Some(chunk) = stream.next().await {
121 print!("{}", chunk?.content());
122 }
123 println!();
124 */
125
126 println!("\n---\n");
127
128 // Example 3: Key differences between `OpenAI` and Azure `OpenAI`
129 println!("Example 3: Key differences between OpenAI and Azure OpenAI");
130 println!("\nOpenAI:");
131 println!(" - Endpoint: https://api.openai.com/v1");
132 println!(" - Authentication: Bearer token in Authorization header");
133 println!(" - Model specification: Use model names like 'gpt-4', 'gpt-3.5-turbo'");
134 println!(" - Example: client.chat().model('gpt-4').send().await?\n");
135
136 println!("Azure OpenAI:");
137 println!(" - Endpoint: https://{{{{resource-name}}}}.openai.azure.com");
138 println!(" - Authentication: api-key header");
139 println!(" - Deployment specification: Use your deployment name");
140 println!(" - API version required as query parameter");
141 println!(" - Example: Configure deployment in Config, then use client normally\n");
142
143 println!("With this library, you only need to configure the endpoint and deployment,");
144 println!("and the library handles all the differences automatically!");
145
146 Ok(())
147}Sourcepub fn build(self) -> Config
pub fn build(self) -> Config
Build the configuration.
Examples found in repository?
examples/testing_patterns.rs (line 142)
138 fn client(&self) -> Result<Client> {
139 let config = Config::builder()
140 .api_key("test-api-key")
141 .api_base(&self.base_url())
142 .build();
143
144 Ok(Client::builder(config)?.build())
145 }
146
147 /// Configure error simulation
148 fn configure_errors(&self, config: ErrorSimulationConfig) {
149 *self.error_config.lock().unwrap() = config;
150 }
151
152 /// Mock a chat completion response
153 async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154 let mock_response = serde_json::json!({
155 "id": "chatcmpl-123",
156 "object": "chat.completion",
157 "created": 1677652288,
158 "model": "gpt-3.5-turbo",
159 "choices": [{
160 "index": 0,
161 "message": {
162 "role": "assistant",
163 "content": response_text
164 },
165 "finish_reason": "stop"
166 }],
167 "usage": {
168 "prompt_tokens": 50,
169 "completion_tokens": 20,
170 "total_tokens": 70
171 }
172 });
173
174 self.server
175 .mock("POST", "/v1/chat/completions")
176 .match_body(mockito::Matcher::JsonString(
177 serde_json::json!({
178 "model": "gpt-3.5-turbo",
179 "messages": [{"role": "user", "content": expected_prompt}]
180 })
181 .to_string(),
182 ))
183 .with_status(200)
184 .with_header("content-type", "application/json")
185 .with_body(mock_response.to_string())
186 .create_async()
187 .await;
188 }
189
190 /// Mock a streaming chat completion response
191 async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192 let mut sse_data = String::new();
193
194 for (i, chunk) in response_chunks.iter().enumerate() {
195 let chunk_response = serde_json::json!({
196 "id": "chatcmpl-123",
197 "object": "chat.completion.chunk",
198 "created": 1677652288,
199 "model": "gpt-3.5-turbo",
200 "choices": [{
201 "index": 0,
202 "delta": {
203 "content": chunk
204 },
205 "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206 }]
207 });
208
209 sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210 }
211
212 sse_data.push_str("data: [DONE]\n\n");
213
214 self.server
215 .mock("POST", "/v1/chat/completions")
216 .match_header("accept", "text/event-stream")
217 .with_status(200)
218 .with_header("content-type", "text/event-stream")
219 .with_body(sse_data)
220 .create_async()
221 .await;
222 }
223
224 /// Mock an error response (rate limit, server error, etc.)
225 async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226 let (status, body) = match error_type {
227 ErrorType::RateLimit => (
228 429,
229 serde_json::json!({
230 "error": {
231 "type": "rate_limit_exceeded",
232 "message": "Rate limit exceeded, please try again later"
233 }
234 })
235 .to_string(),
236 ),
237 ErrorType::ServerError => (
238 500,
239 serde_json::json!({
240 "error": {
241 "type": "server_error",
242 "message": "Internal server error"
243 }
244 })
245 .to_string(),
246 ),
247 ErrorType::InvalidRequest => (
248 400,
249 serde_json::json!({
250 "error": {
251 "type": "invalid_request_error",
252 "message": "Invalid request parameters"
253 }
254 })
255 .to_string(),
256 ),
257 ErrorType::Unauthorized => (
258 401,
259 serde_json::json!({
260 "error": {
261 "type": "invalid_request_error",
262 "message": "Incorrect API key provided"
263 }
264 })
265 .to_string(),
266 ),
267 };
268
269 self.server
270 .mock("POST", endpoint)
271 .with_status(status)
272 .with_header("content-type", "application/json")
273 .with_body(body)
274 .create_async()
275 .await;
276 }
277
278 /// Get logged requests for verification
279 fn get_request_log(&self) -> Vec<MockRequest> {
280 self.request_log.lock().unwrap().clone()
281 }
282
283 /// Clear request log
284 fn clear_request_log(&self) {
285 self.request_log.lock().unwrap().clear();
286 }
287
288 /// Verify that a specific request was made
289 fn verify_request(&self, method: &str, path: &str) -> bool {
290 let log = self.request_log.lock().unwrap();
291 log.iter()
292 .any(|req| req.method == method && req.path == path)
293 }
294}
295
296/// Types of errors to simulate in testing
297#[derive(Debug, Clone)]
298enum ErrorType {
299 RateLimit,
300 ServerError,
301 InvalidRequest,
302 Unauthorized,
303}
304
305/// Test utilities for OpenAI API testing
306struct TestUtils;
307
308impl TestUtils {
309 /// Create a test client with mock configuration
310 fn create_test_client() -> Result<Client> {
311 let config = Config::builder()
312 .api_key("test-api-key")
313 .api_base("http://localhost:1234") // Mock server URL
314 .max_retries(2)
315 .build();
316
317 Ok(Client::builder(config)?.build())
318 }
319
320 /// Assert that a response contains expected content
321 fn assert_response_content(response: &str, expected_content: &str) {
322 assert!(
323 response.contains(expected_content),
324 "Response '{}' does not contain expected content '{}'",
325 response,
326 expected_content
327 );
328 }
329
330 /// Assert token usage is within expected bounds
331 fn assert_token_usage(usage: &TokenUsage, min_tokens: i32, max_tokens: i32) {
332 assert!(
333 usage.total_tokens >= min_tokens && usage.total_tokens <= max_tokens,
334 "Token usage {} is outside expected range {}-{}",
335 usage.total_tokens,
336 min_tokens,
337 max_tokens
338 );
339 }
340
341 /// Create test data for batch testing
342 fn create_test_prompts(count: usize) -> Vec<String> {
343 (0..count)
344 .map(|i| format!("Test prompt number {}", i + 1))
345 .collect()
346 }
347
348 /// Measure execution time of an async operation
349 async fn time_async_operation<F, T, E>(operation: F) -> (std::result::Result<T, E>, Duration)
350 where
351 F: std::future::Future<Output = std::result::Result<T, E>>,
352 {
353 let start = Instant::now();
354 let result = operation.await;
355 let duration = start.elapsed();
356 (result, duration)
357 }
358
359 /// Create a mock response with custom token usage
360 fn create_mock_response_with_usage(
361 content: &str,
362 prompt_tokens: i32,
363 completion_tokens: i32,
364 ) -> String {
365 serde_json::json!({
366 "id": "chatcmpl-test",
367 "object": "chat.completion",
368 "created": 1677652288,
369 "model": "gpt-3.5-turbo",
370 "choices": [{
371 "index": 0,
372 "message": {
373 "role": "assistant",
374 "content": content
375 },
376 "finish_reason": "stop"
377 }],
378 "usage": {
379 "prompt_tokens": prompt_tokens,
380 "completion_tokens": completion_tokens,
381 "total_tokens": prompt_tokens + completion_tokens
382 }
383 })
384 .to_string()
385 }
386}
387
388/// Token usage information for testing
389#[derive(Debug, Clone, Serialize, Deserialize)]
390struct TokenUsage {
391 prompt_tokens: i32,
392 completion_tokens: i32,
393 total_tokens: i32,
394}
395
396/// Integration test runner for live API testing
397struct IntegrationTestRunner {
398 client: Client,
399 test_results: Vec<IntegrationTestResult>,
400}
401
402/// Result of an integration test
403#[derive(Debug, Clone)]
404struct IntegrationTestResult {
405 test_name: String,
406 success: bool,
407 duration: Duration,
408 error_message: Option<String>,
409 response_data: Option<String>,
410}
411
412impl IntegrationTestRunner {
413 /// Create a new integration test runner
414 fn new(client: Client) -> Self {
415 Self {
416 client,
417 test_results: Vec::new(),
418 }
419 }
420
421 /// Run a basic chat completion test
422 async fn test_basic_chat_completion(&mut self) -> Result<()> {
423 let test_name = "basic_chat_completion";
424 info!("Running integration test: {}", test_name);
425
426 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
427 // Note: This would use real API in integration tests
428 // self.client.chat_simple("Hello, world!").await
429
430 // For demonstration, we'll simulate a successful response
431 Ok("Hello! How can I help you today?".to_string())
432 })
433 .await;
434
435 let test_result = match result {
436 Ok(response) => {
437 info!(" Basic chat completion test passed in {:?}", duration);
438 IntegrationTestResult {
439 test_name: test_name.to_string(),
440 success: true,
441 duration,
442 error_message: None,
443 response_data: Some(response),
444 }
445 }
446 Err(e) => {
447 error!(" Basic chat completion test failed: {}", e);
448 IntegrationTestResult {
449 test_name: test_name.to_string(),
450 success: false,
451 duration,
452 error_message: Some(e.to_string()),
453 response_data: None,
454 }
455 }
456 };
457
458 self.test_results.push(test_result);
459 Ok(())
460 }
461
462 /// Test streaming functionality
463 async fn test_streaming_completion(&mut self) -> Result<()> {
464 let test_name = "streaming_completion";
465 info!("Running integration test: {}", test_name);
466
467 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
468 // Note: This would use real streaming API in integration tests
469 // let mut stream = self.client.chat().user("Tell me a story").stream().await?;
470 // let mut chunks = Vec::new();
471 // while let Some(chunk) = stream.next().await {
472 // chunks.push(chunk?.content());
473 // }
474 // Ok(chunks.join(""))
475
476 // For demonstration, simulate streaming chunks
477 let chunks = vec!["Once", " upon", " a", " time..."];
478 Ok(chunks.join(""))
479 })
480 .await;
481
482 let test_result = match result {
483 Ok(response) => {
484 info!(" Streaming completion test passed in {:?}", duration);
485 IntegrationTestResult {
486 test_name: test_name.to_string(),
487 success: true,
488 duration,
489 error_message: None,
490 response_data: Some(response),
491 }
492 }
493 Err(e) => {
494 error!(" Streaming completion test failed: {}", e);
495 IntegrationTestResult {
496 test_name: test_name.to_string(),
497 success: false,
498 duration,
499 error_message: Some(e.to_string()),
500 response_data: None,
501 }
502 }
503 };
504
505 self.test_results.push(test_result);
506 Ok(())
507 }
508
509 /// Test error handling
510 async fn test_error_handling(&mut self) -> Result<()> {
511 let test_name = "error_handling";
512 info!("Running integration test: {}", test_name);
513
514 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
515 // Test with invalid API key to trigger authentication error
516 let bad_config = Config::builder().api_key("invalid-key").build();
517
518 let _bad_client = Client::builder(bad_config)?.build();
519
520 // This should fail with an authentication error
521 // bad_client.chat_simple("Test").await
522
523 // For demonstration, simulate an auth error
524 Err(Error::InvalidRequest("Authentication failed".to_string()))
525 })
526 .await;
527
528 let test_result = match result {
529 Ok(_) => {
530 warn!("Error handling test unexpectedly succeeded");
531 IntegrationTestResult {
532 test_name: test_name.to_string(),
533 success: false,
534 duration,
535 error_message: Some(
536 "Expected authentication error but request succeeded".to_string(),
537 ),
538 response_data: None,
539 }
540 }
541 Err(e) => {
542 info!(
543 " Error handling test passed (correctly failed) in {:?}",
544 duration
545 );
546 IntegrationTestResult {
547 test_name: test_name.to_string(),
548 success: true,
549 duration,
550 error_message: None,
551 response_data: Some(format!("Expected error: {}", e)),
552 }
553 }
554 };
555
556 self.test_results.push(test_result);
557 Ok(())
558 }More examples
examples/auth_patterns.rs (line 97)
94async fn direct_api_key() -> Result<()> {
95 // Create client with direct API key
96 let api_key = "sk-your-api-key-here"; // Replace with actual key
97 let config = Config::builder().api_key(api_key).build();
98 let client = Client::builder(config)?.build();
99
100 println!("Client created with direct API key");
101
102 // Note: This will fail with invalid key
103 match client.send_chat(client.chat_simple("Hello")).await {
104 Ok(response) => {
105 if let Some(content) = response.content() {
106 println!("Response: {}", content);
107 } else {
108 println!("Response: (no content)");
109 }
110 }
111 Err(e) => println!("Expected error with demo key: {}", e),
112 }
113
114 Ok(())
115}
116
117fn organization_config() -> Result<()> {
118 // Configure client with organization ID
119 let config = Config::builder()
120 .api_key("your-api-key")
121 .organization("org-123456789")
122 .build();
123
124 let _client = Client::builder(config)?.build();
125 println!("Client configured with organization ID");
126
127 // Organization ID is sent in headers with all requests
128 // Useful for:
129 // - Usage tracking per organization
130 // - Access control
131 // - Billing segregation
132
133 Ok(())
134}
135
136fn project_config() -> Result<()> {
137 // Configure client with project ID
138 let config = Config::builder()
139 .api_key("your-api-key")
140 .project("proj-abc123")
141 .build();
142
143 let _client = Client::builder(config)?.build();
144 println!("Client configured with project ID");
145
146 // Project ID helps with:
147 // - Fine-grained usage tracking
148 // - Project-specific rate limits
149 // - Cost allocation
150
151 Ok(())
152}
153
154fn custom_headers() -> Result<()> {
155 // Note: Custom headers are not yet supported in the current API
156 // This would typically be used for:
157 // - Request tracing
158 // - A/B testing
159 // - Custom routing
160
161 let config = Config::builder().api_key("your-api-key").build();
162
163 let _client = Client::builder(config)?.build();
164 println!("Client configured (custom headers not yet supported)");
165
166 // TODO: Add support for custom headers in the future
167 println!("Custom headers feature planned for future implementation");
168
169 Ok(())
170}
171
172fn proxy_config() -> Result<()> {
173 // Note: Proxy configuration is not yet supported in the current API
174 // This would typically be used for:
175 // - Enterprise security policies
176 // - Request monitoring
177 // - Network isolation
178
179 let config = Config::builder().api_key("your-api-key").build();
180
181 let _client = Client::builder(config)?.build();
182 println!("Client configured (proxy support not yet available)");
183
184 // TODO: Add proxy support in the future
185 println!("Proxy configuration feature planned for future implementation");
186
187 Ok(())
188}
189
190fn multiple_clients() -> Result<()> {
191 use reqwest_middleware::ClientBuilder;
192 use std::time::Duration;
193
194 // Create multiple clients for different use cases
195
196 // Production client with retries and longer timeout
197 let prod_http_client = ClientBuilder::new(
198 reqwest::Client::builder()
199 .timeout(Duration::from_secs(60))
200 .build()
201 .expect("Failed to build reqwest client"),
202 )
203 .build();
204
205 let prod_config = Config::builder()
206 .api_key("prod-api-key")
207 .organization("org-prod")
208 .http_client(prod_http_client)
209 .max_retries(5)
210 .build();
211 let prod_client = Client::builder(prod_config)?.build();
212
213 // Development client with debug logging and shorter timeout
214 let dev_http_client = ClientBuilder::new(
215 reqwest::Client::builder()
216 .timeout(Duration::from_secs(10))
217 .build()
218 .expect("Failed to build reqwest client"),
219 )
220 .build();
221
222 let dev_config = Config::builder()
223 .api_key("dev-api-key")
224 .organization("org-dev")
225 .api_base("https://api.openai-dev.com") // Custom endpoint
226 .http_client(dev_http_client)
227 .build();
228 let dev_client = Client::builder(dev_config)?.build();
229
230 // Test client with mocked responses
231 let test_config = Config::builder()
232 .api_key("test-api-key")
233 .api_base("http://localhost:8080") // Local mock server
234 .build();
235 let _test_client = Client::builder(test_config)?.build();
236
237 println!("Created multiple clients:");
238 println!("- Production client with retries");
239 println!("- Development client with custom endpoint");
240 println!("- Test client with mock server");
241
242 // Use appropriate client based on context
243 let _client = if cfg!(debug_assertions) {
244 &dev_client
245 } else {
246 &prod_client
247 };
248
249 println!(
250 "Using {} client",
251 if cfg!(debug_assertions) {
252 "dev"
253 } else {
254 "prod"
255 }
256 );
257
258 Ok(())
259}
260
261fn config_validation() -> Result<()> {
262 // Validate configuration before use
263
264 fn validate_api_key(key: &str) -> bool {
265 // OpenAI API keys typically start with "sk-"
266 key.starts_with("sk-") && key.len() > 20
267 }
268
269 fn validate_org_id(org: &str) -> bool {
270 // Organization IDs typically start with "org-"
271 org.starts_with("org-") && org.len() > 4
272 }
273
274 let api_key = "sk-test-key-123456789";
275 let org_id = "org-12345";
276
277 if !validate_api_key(api_key) {
278 println!("Warning: API key format appears invalid");
279 }
280
281 if !validate_org_id(org_id) {
282 println!("Warning: Organization ID format appears invalid");
283 }
284
285 // Build config only if validation passes
286 if validate_api_key(api_key) {
287 let config = Config::builder()
288 .api_key(api_key)
289 .organization(org_id)
290 .build();
291
292 let _client = Client::builder(config)?.build();
293 println!("Configuration validated and client created");
294 }
295
296 Ok(())
297}examples/error_handling.rs (line 197)
195async fn auth_error_handling() -> Result<()> {
196 // Try with invalid API key
197 let config = Config::builder().api_key("invalid-api-key").build();
198 let invalid_client = Client::builder(config)?.build();
199
200 match invalid_client
201 .send_chat(invalid_client.chat_simple("Hello"))
202 .await
203 {
204 Ok(_) => println!("Unexpected success"),
205 Err(Error::Authentication(message)) => {
206 println!("Authentication failed as expected: {}", message);
207
208 // Suggest remediation
209 println!("Suggestions:");
210 println!("1. Check your OPENAI_API_KEY environment variable");
211 println!("2. Verify API key at https://platform.openai.com/api-keys");
212 println!("3. Ensure your API key has necessary permissions");
213 }
214 Err(e) => println!("Unexpected error type: {}", e),
215 }
216
217 Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221 use openai_ergonomic::Config;
222 use reqwest_middleware::ClientBuilder;
223
224 // Create a reqwest client with very short timeout to simulate network issues
225 let reqwest_client = reqwest::Client::builder()
226 .timeout(Duration::from_secs(1))
227 .build()
228 .expect("Failed to build reqwest client");
229
230 let http_client = ClientBuilder::new(reqwest_client).build();
231
232 let config = Config::builder()
233 .api_key("test-key")
234 .http_client(http_client)
235 .build();
236
237 let client = Client::builder(config)?.build();
238
239 match client.send_chat(client.chat_simple("Hello")).await {
240 Ok(_) => println!("Unexpected success"),
241 Err(Error::Http(source)) => {
242 println!("Network error as expected: {}", source);
243
244 // Implement exponential backoff
245 let mut backoff = Duration::from_millis(100);
246 for attempt in 1..=3 {
247 println!("Retry attempt {} after {:?}", attempt, backoff);
248 sleep(backoff).await;
249 backoff *= 2;
250
251 // In real scenario, retry with proper timeout
252 // match client.send_chat(client.chat_simple("Hello")).await { ... }
253 }
254 }
255 Err(e) => println!("Other error: {}", e),
256 }
257
258 Ok(())
259}examples/retry_patterns.rs (line 404)
388async fn idempotency_example(_client: &Client) -> Result<()> {
389 // Generate idempotency key
390 let idempotency_key = generate_idempotency_key();
391 println!("Using idempotency key: {}", idempotency_key);
392
393 // Simulate retrying the same request
394 for attempt in 1..=3 {
395 println!("\nAttempt {} with same idempotency key", attempt);
396
397 // In a real implementation, you'd pass the idempotency key in headers
398 let mut headers = std::collections::HashMap::new();
399 headers.insert("Idempotency-Key".to_string(), idempotency_key.clone());
400 println!(" Would send {} headers", headers.len());
401
402 let config = Config::builder()
403 .api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
404 .build();
405
406 // Note: Headers (including idempotency key) are not yet supported in current API
407
408 let client_with_idempotency = Client::builder(config)?.build();
409
410 match client_with_idempotency
411 .send_chat(client_with_idempotency.chat_simple("Idempotent request"))
412 .await
413 {
414 Ok(response) => {
415 if let Some(content) = response.content() {
416 println!("Response: {}", content);
417 } else {
418 println!("Response: (no content)");
419 }
420 // Server should return same response for same idempotency key
421 }
422 Err(e) => println!("Error: {}", e),
423 }
424
425 if attempt < 3 {
426 sleep(Duration::from_secs(1)).await;
427 }
428 }
429
430 Ok(())
431}examples/moderations.rs (line 52)
34async fn main() -> Result<()> {
35 use openai_ergonomic::Config;
36
37 println!("=== Content Moderation Example ===\n");
38
39 // Initialize client
40 let client = if let Ok(c) = Client::from_env() {
41 c.build()
42 } else {
43 println!("Note: OPENAI_API_KEY not found. Running in demo mode.");
44 println!("Set OPENAI_API_KEY to test real API calls.\n");
45 println!("To use the Moderations API:");
46 println!(" let client = Client::from_env()?.build();");
47 println!(" let builder = client.moderations().check(\"text to moderate\");");
48 println!(" let response = client.moderations().create(builder).await?;");
49 println!();
50 println!("Running demonstration examples...\n");
51 // Create a dummy client for demo purposes
52 Client::builder(Config::builder().api_key("demo-key").build())?.build()
53 };
54
55 // Example 1: Basic moderation
56 println!("1. Basic Moderation:");
57 basic_moderation(&client);
58
59 // Example 2: Category detection
60 println!("\n2. Category Detection:");
61 category_detection(&client);
62
63 // Example 3: Custom thresholds
64 println!("\n3. Custom Thresholds:");
65 custom_thresholds(&client);
66
67 // Example 4: Multi-language moderation
68 println!("\n4. Multi-language Moderation:");
69 multilingual_moderation(&client);
70
71 // Example 5: Batch moderation
72 println!("\n5. Batch Moderation:");
73 batch_moderation(&client);
74
75 // Example 6: Response filtering
76 println!("\n6. Response Filtering:");
77 response_filtering(&client).await?;
78
79 // Example 7: Policy enforcement
80 println!("\n7. Policy Enforcement:");
81 policy_enforcement(&client);
82
83 // Example 8: Moderation pipeline
84 println!("\n8. Moderation Pipeline:");
85 moderation_pipeline(&client).await?;
86
87 Ok(())
88}examples/azure_openai.rs (line 82)
37async fn main() -> Result<(), Box<dyn std::error::Error>> {
38 // Initialize logging
39 tracing_subscriber::fmt::init();
40
41 println!("Azure OpenAI Integration Example");
42 println!("=================================\n");
43
44 // Example 1: Using environment variables
45 println!("Example 1: Using environment variables");
46 match Client::from_env() {
47 Ok(client) => {
48 let client = client.build();
49 println!("Client created from environment variables");
50
51 // Make a simple chat request
52 let builder = client.chat_simple("Hello from Azure OpenAI!");
53 match client.send_chat(builder).await {
54 Ok(response) => {
55 if let Some(content) = response.content() {
56 println!("Response: {content}");
57 }
58 }
59 Err(e) => {
60 println!("Error: {e}");
61 }
62 }
63 }
64 Err(e) => {
65 println!("Could not create client from environment: {e}");
66 println!("Make sure to set AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT");
67 }
68 }
69
70 println!("\n---\n");
71
72 // Example 2: Manual configuration
73 println!("Example 2: Manual configuration");
74
75 // This example shows how to configure Azure `OpenAI` programmatically.
76 // Replace these values with your actual Azure `OpenAI` resource details.
77 let config = Config::builder()
78 .api_key("your-azure-api-key")
79 .api_base("https://my-resource.openai.azure.com")
80 .azure_deployment("gpt-4")
81 .azure_api_version("2024-02-01")
82 .build();
83
84 println!("Config: {config:?}");
85 println!("Is Azure: {}", config.is_azure());
86
87 // Note: This will fail unless you provide valid credentials above
88 // Uncomment the following to test with your actual credentials:
89 /*
90 let client = Client::builder(config)?.build();
91
92 // Simple chat completion
93 let response = client
94 .chat_simple("Tell me a short joke about Azure")
95 .await?;
96 println!("Response: {}", response);
97
98 // More advanced chat with custom parameters
99 let response = client
100 .chat()
101 .user("What are the main features of Azure OpenAI?")
102 .temperature(0.7)
103 .max_tokens(500)
104 .send()
105 .await?;
106
107 println!("\nAdvanced response:");
108 println!("{}", response.content());
109
110 // Streaming example
111 use futures::StreamExt;
112
113 println!("\nStreaming example:");
114 let mut stream = client
115 .chat()
116 .user("Count from 1 to 5")
117 .stream()
118 .await?;
119
120 while let Some(chunk) = stream.next().await {
121 print!("{}", chunk?.content());
122 }
123 println!();
124 */
125
126 println!("\n---\n");
127
128 // Example 3: Key differences between `OpenAI` and Azure `OpenAI`
129 println!("Example 3: Key differences between OpenAI and Azure OpenAI");
130 println!("\nOpenAI:");
131 println!(" - Endpoint: https://api.openai.com/v1");
132 println!(" - Authentication: Bearer token in Authorization header");
133 println!(" - Model specification: Use model names like 'gpt-4', 'gpt-3.5-turbo'");
134 println!(" - Example: client.chat().model('gpt-4').send().await?\n");
135
136 println!("Azure OpenAI:");
137 println!(" - Endpoint: https://{{{{resource-name}}}}.openai.azure.com");
138 println!(" - Authentication: api-key header");
139 println!(" - Deployment specification: Use your deployment name");
140 println!(" - API version required as query parameter");
141 println!(" - Example: Configure deployment in Config, then use client normally\n");
142
143 println!("With this library, you only need to configure the endpoint and deployment,");
144 println!("and the library handles all the differences automatically!");
145
146 Ok(())
147}Additional examples can be found in:
Trait Implementations§
Source§impl Clone for ConfigBuilder
impl Clone for ConfigBuilder
Source§fn clone(&self) -> ConfigBuilder
fn clone(&self) -> ConfigBuilder
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Default for ConfigBuilder
impl Default for ConfigBuilder
Source§fn default() -> ConfigBuilder
fn default() -> ConfigBuilder
Returns the “default value” for a type. Read more
Auto Trait Implementations§
impl Freeze for ConfigBuilder
impl !RefUnwindSafe for ConfigBuilder
impl Send for ConfigBuilder
impl Sync for ConfigBuilder
impl Unpin for ConfigBuilder
impl !UnwindSafe for ConfigBuilder
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more