testing_patterns/
testing_patterns.rs

1//! Testing patterns and strategies for `OpenAI` API integration.
2#![allow(dead_code)]
3#![allow(clippy::too_many_lines)]
4#![allow(clippy::missing_const_for_fn)]
5#![allow(clippy::unreadable_literal)]
6#![allow(clippy::doc_markdown)]
7#![allow(clippy::needless_borrows_for_generic_args)]
8#![allow(clippy::format_push_string)]
9#![allow(clippy::uninlined_format_args)]
10#![allow(clippy::struct_field_names)]
11#![allow(clippy::cast_possible_truncation)]
12#![allow(clippy::cast_precision_loss)]
13#![allow(clippy::useless_vec)]
14#![allow(clippy::module_name_repetitions)]
15#![allow(clippy::too_many_arguments)]
16#![allow(clippy::or_fun_call)]
17#![allow(clippy::unused_self)]
18#![allow(clippy::needless_pass_by_value)]
19#![allow(clippy::if_not_else)]
20#![allow(clippy::option_if_let_else)]
21#![allow(clippy::significant_drop_tightening)]
22#![allow(clippy::struct_excessive_bools)]
23//!
24//! This example demonstrates comprehensive testing approaches including:
25//! - Mock server setup with mockito for unit testing
26//! - Integration testing with real API endpoints
27//! - Test utilities and helper functions
28//! - Response validation and assertion patterns
29//! - Error handling and edge case testing
30//! - Performance testing and benchmarking
31//! - Contract testing and API compatibility validation
32//!
33//! Testing is crucial for AI-powered applications to ensure:
34//! - Consistent behavior across different API responses
35//! - Proper error handling for rate limits and failures
36//! - Performance characteristics under load
37//! - Cost management and usage tracking accuracy
38//!
39//! Run with: `cargo run --example testing_patterns`
40
41use openai_ergonomic::{Client, Config, Error, Result};
42use serde::{Deserialize, Serialize};
43use std::collections::HashMap;
44use std::sync::{Arc, Mutex};
45use std::time::{Duration, Instant};
46use tokio::time::sleep;
47use tracing::{error, info, warn};
48
49/// Mock OpenAI server for testing purposes
50struct MockOpenAIServer {
51    /// Mock server instance
52    server: mockito::ServerGuard,
53    /// Predefined responses for different endpoints
54    responses: Arc<Mutex<HashMap<String, MockResponse>>>,
55    /// Request tracking for verification
56    request_log: Arc<Mutex<Vec<MockRequest>>>,
57    /// Error simulation settings
58    error_config: Arc<Mutex<ErrorSimulationConfig>>,
59}
60
61/// Configuration for error simulation in tests
62#[derive(Debug, Clone)]
63struct ErrorSimulationConfig {
64    /// Whether to simulate rate limits
65    simulate_rate_limits: bool,
66    /// Rate limit delay in seconds
67    rate_limit_delay: u64,
68    /// Whether to simulate server errors
69    simulate_server_errors: bool,
70    /// Error probability (0.0 to 1.0)
71    error_probability: f64,
72    /// Network timeout simulation
73    simulate_timeouts: bool,
74    /// Timeout delay in seconds
75    timeout_delay: u64,
76}
77
78impl Default for ErrorSimulationConfig {
79    fn default() -> Self {
80        Self {
81            simulate_rate_limits: false,
82            rate_limit_delay: 60,
83            simulate_server_errors: false,
84            error_probability: 0.1,
85            simulate_timeouts: false,
86            timeout_delay: 30,
87        }
88    }
89}
90
91/// Mock response configuration
92#[derive(Debug, Clone)]
93struct MockResponse {
94    /// HTTP status code
95    status: u16,
96    /// Response body
97    body: String,
98    /// Response headers
99    headers: HashMap<String, String>,
100    /// Simulated delay
101    delay: Option<Duration>,
102}
103
104/// Logged request for verification
105#[derive(Debug, Clone)]
106struct MockRequest {
107    /// HTTP method
108    method: String,
109    /// Request path
110    path: String,
111    /// Request headers
112    headers: HashMap<String, String>,
113    /// Request body
114    body: String,
115    /// Timestamp
116    timestamp: Instant,
117}
118
119impl MockOpenAIServer {
120    /// Create a new mock server with default configuration
121    async fn new() -> Self {
122        let server = mockito::Server::new_async().await;
123
124        Self {
125            server,
126            responses: Arc::new(Mutex::new(HashMap::new())),
127            request_log: Arc::new(Mutex::new(Vec::new())),
128            error_config: Arc::new(Mutex::new(ErrorSimulationConfig::default())),
129        }
130    }
131
132    /// Get the base URL for the mock server
133    fn base_url(&self) -> String {
134        self.server.url()
135    }
136
137    /// Create a client configured to use this mock server
138    fn client(&self) -> Result<Client> {
139        let config = Config::builder()
140            .api_key("test-api-key")
141            .api_base(&self.base_url())
142            .build();
143
144        Ok(Client::builder(config)?.build())
145    }
146
147    /// Configure error simulation
148    fn configure_errors(&self, config: ErrorSimulationConfig) {
149        *self.error_config.lock().unwrap() = config;
150    }
151
152    /// Mock a chat completion response
153    async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154        let mock_response = serde_json::json!({
155            "id": "chatcmpl-123",
156            "object": "chat.completion",
157            "created": 1677652288,
158            "model": "gpt-3.5-turbo",
159            "choices": [{
160                "index": 0,
161                "message": {
162                    "role": "assistant",
163                    "content": response_text
164                },
165                "finish_reason": "stop"
166            }],
167            "usage": {
168                "prompt_tokens": 50,
169                "completion_tokens": 20,
170                "total_tokens": 70
171            }
172        });
173
174        self.server
175            .mock("POST", "/v1/chat/completions")
176            .match_body(mockito::Matcher::JsonString(
177                serde_json::json!({
178                    "model": "gpt-3.5-turbo",
179                    "messages": [{"role": "user", "content": expected_prompt}]
180                })
181                .to_string(),
182            ))
183            .with_status(200)
184            .with_header("content-type", "application/json")
185            .with_body(mock_response.to_string())
186            .create_async()
187            .await;
188    }
189
190    /// Mock a streaming chat completion response
191    async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192        let mut sse_data = String::new();
193
194        for (i, chunk) in response_chunks.iter().enumerate() {
195            let chunk_response = serde_json::json!({
196                "id": "chatcmpl-123",
197                "object": "chat.completion.chunk",
198                "created": 1677652288,
199                "model": "gpt-3.5-turbo",
200                "choices": [{
201                    "index": 0,
202                    "delta": {
203                        "content": chunk
204                    },
205                    "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206                }]
207            });
208
209            sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210        }
211
212        sse_data.push_str("data: [DONE]\n\n");
213
214        self.server
215            .mock("POST", "/v1/chat/completions")
216            .match_header("accept", "text/event-stream")
217            .with_status(200)
218            .with_header("content-type", "text/event-stream")
219            .with_body(sse_data)
220            .create_async()
221            .await;
222    }
223
224    /// Mock an error response (rate limit, server error, etc.)
225    async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226        let (status, body) = match error_type {
227            ErrorType::RateLimit => (
228                429,
229                serde_json::json!({
230                    "error": {
231                        "type": "rate_limit_exceeded",
232                        "message": "Rate limit exceeded, please try again later"
233                    }
234                })
235                .to_string(),
236            ),
237            ErrorType::ServerError => (
238                500,
239                serde_json::json!({
240                    "error": {
241                        "type": "server_error",
242                        "message": "Internal server error"
243                    }
244                })
245                .to_string(),
246            ),
247            ErrorType::InvalidRequest => (
248                400,
249                serde_json::json!({
250                    "error": {
251                        "type": "invalid_request_error",
252                        "message": "Invalid request parameters"
253                    }
254                })
255                .to_string(),
256            ),
257            ErrorType::Unauthorized => (
258                401,
259                serde_json::json!({
260                    "error": {
261                        "type": "invalid_request_error",
262                        "message": "Incorrect API key provided"
263                    }
264                })
265                .to_string(),
266            ),
267        };
268
269        self.server
270            .mock("POST", endpoint)
271            .with_status(status)
272            .with_header("content-type", "application/json")
273            .with_body(body)
274            .create_async()
275            .await;
276    }
277
278    /// Get logged requests for verification
279    fn get_request_log(&self) -> Vec<MockRequest> {
280        self.request_log.lock().unwrap().clone()
281    }
282
283    /// Clear request log
284    fn clear_request_log(&self) {
285        self.request_log.lock().unwrap().clear();
286    }
287
288    /// Verify that a specific request was made
289    fn verify_request(&self, method: &str, path: &str) -> bool {
290        let log = self.request_log.lock().unwrap();
291        log.iter()
292            .any(|req| req.method == method && req.path == path)
293    }
294}
295
296/// Types of errors to simulate in testing
297#[derive(Debug, Clone)]
298enum ErrorType {
299    RateLimit,
300    ServerError,
301    InvalidRequest,
302    Unauthorized,
303}
304
305/// Test utilities for OpenAI API testing
306struct TestUtils;
307
308impl TestUtils {
309    /// Create a test client with mock configuration
310    fn create_test_client() -> Result<Client> {
311        let config = Config::builder()
312            .api_key("test-api-key")
313            .api_base("http://localhost:1234") // Mock server URL
314            .max_retries(2)
315            .build();
316
317        Ok(Client::builder(config)?.build())
318    }
319
320    /// Assert that a response contains expected content
321    fn assert_response_content(response: &str, expected_content: &str) {
322        assert!(
323            response.contains(expected_content),
324            "Response '{}' does not contain expected content '{}'",
325            response,
326            expected_content
327        );
328    }
329
330    /// Assert token usage is within expected bounds
331    fn assert_token_usage(usage: &TokenUsage, min_tokens: i32, max_tokens: i32) {
332        assert!(
333            usage.total_tokens >= min_tokens && usage.total_tokens <= max_tokens,
334            "Token usage {} is outside expected range {}-{}",
335            usage.total_tokens,
336            min_tokens,
337            max_tokens
338        );
339    }
340
341    /// Create test data for batch testing
342    fn create_test_prompts(count: usize) -> Vec<String> {
343        (0..count)
344            .map(|i| format!("Test prompt number {}", i + 1))
345            .collect()
346    }
347
348    /// Measure execution time of an async operation
349    async fn time_async_operation<F, T, E>(operation: F) -> (std::result::Result<T, E>, Duration)
350    where
351        F: std::future::Future<Output = std::result::Result<T, E>>,
352    {
353        let start = Instant::now();
354        let result = operation.await;
355        let duration = start.elapsed();
356        (result, duration)
357    }
358
359    /// Create a mock response with custom token usage
360    fn create_mock_response_with_usage(
361        content: &str,
362        prompt_tokens: i32,
363        completion_tokens: i32,
364    ) -> String {
365        serde_json::json!({
366            "id": "chatcmpl-test",
367            "object": "chat.completion",
368            "created": 1677652288,
369            "model": "gpt-3.5-turbo",
370            "choices": [{
371                "index": 0,
372                "message": {
373                    "role": "assistant",
374                    "content": content
375                },
376                "finish_reason": "stop"
377            }],
378            "usage": {
379                "prompt_tokens": prompt_tokens,
380                "completion_tokens": completion_tokens,
381                "total_tokens": prompt_tokens + completion_tokens
382            }
383        })
384        .to_string()
385    }
386}
387
388/// Token usage information for testing
389#[derive(Debug, Clone, Serialize, Deserialize)]
390struct TokenUsage {
391    prompt_tokens: i32,
392    completion_tokens: i32,
393    total_tokens: i32,
394}
395
396/// Integration test runner for live API testing
397struct IntegrationTestRunner {
398    client: Client,
399    test_results: Vec<IntegrationTestResult>,
400}
401
402/// Result of an integration test
403#[derive(Debug, Clone)]
404struct IntegrationTestResult {
405    test_name: String,
406    success: bool,
407    duration: Duration,
408    error_message: Option<String>,
409    response_data: Option<String>,
410}
411
412impl IntegrationTestRunner {
413    /// Create a new integration test runner
414    fn new(client: Client) -> Self {
415        Self {
416            client,
417            test_results: Vec::new(),
418        }
419    }
420
421    /// Run a basic chat completion test
422    async fn test_basic_chat_completion(&mut self) -> Result<()> {
423        let test_name = "basic_chat_completion";
424        info!("Running integration test: {}", test_name);
425
426        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
427            // Note: This would use real API in integration tests
428            // self.client.chat_simple("Hello, world!").await
429
430            // For demonstration, we'll simulate a successful response
431            Ok("Hello! How can I help you today?".to_string())
432        })
433        .await;
434
435        let test_result = match result {
436            Ok(response) => {
437                info!(" Basic chat completion test passed in {:?}", duration);
438                IntegrationTestResult {
439                    test_name: test_name.to_string(),
440                    success: true,
441                    duration,
442                    error_message: None,
443                    response_data: Some(response),
444                }
445            }
446            Err(e) => {
447                error!(" Basic chat completion test failed: {}", e);
448                IntegrationTestResult {
449                    test_name: test_name.to_string(),
450                    success: false,
451                    duration,
452                    error_message: Some(e.to_string()),
453                    response_data: None,
454                }
455            }
456        };
457
458        self.test_results.push(test_result);
459        Ok(())
460    }
461
462    /// Test streaming functionality
463    async fn test_streaming_completion(&mut self) -> Result<()> {
464        let test_name = "streaming_completion";
465        info!("Running integration test: {}", test_name);
466
467        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
468            // Note: This would use real streaming API in integration tests
469            // let mut stream = self.client.chat().user("Tell me a story").stream().await?;
470            // let mut chunks = Vec::new();
471            // while let Some(chunk) = stream.next().await {
472            //     chunks.push(chunk?.content());
473            // }
474            // Ok(chunks.join(""))
475
476            // For demonstration, simulate streaming chunks
477            let chunks = vec!["Once", " upon", " a", " time..."];
478            Ok(chunks.join(""))
479        })
480        .await;
481
482        let test_result = match result {
483            Ok(response) => {
484                info!(" Streaming completion test passed in {:?}", duration);
485                IntegrationTestResult {
486                    test_name: test_name.to_string(),
487                    success: true,
488                    duration,
489                    error_message: None,
490                    response_data: Some(response),
491                }
492            }
493            Err(e) => {
494                error!(" Streaming completion test failed: {}", e);
495                IntegrationTestResult {
496                    test_name: test_name.to_string(),
497                    success: false,
498                    duration,
499                    error_message: Some(e.to_string()),
500                    response_data: None,
501                }
502            }
503        };
504
505        self.test_results.push(test_result);
506        Ok(())
507    }
508
509    /// Test error handling
510    async fn test_error_handling(&mut self) -> Result<()> {
511        let test_name = "error_handling";
512        info!("Running integration test: {}", test_name);
513
514        let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
515            // Test with invalid API key to trigger authentication error
516            let bad_config = Config::builder().api_key("invalid-key").build();
517
518            let _bad_client = Client::builder(bad_config)?.build();
519
520            // This should fail with an authentication error
521            // bad_client.chat_simple("Test").await
522
523            // For demonstration, simulate an auth error
524            Err(Error::InvalidRequest("Authentication failed".to_string()))
525        })
526        .await;
527
528        let test_result = match result {
529            Ok(_) => {
530                warn!("Error handling test unexpectedly succeeded");
531                IntegrationTestResult {
532                    test_name: test_name.to_string(),
533                    success: false,
534                    duration,
535                    error_message: Some(
536                        "Expected authentication error but request succeeded".to_string(),
537                    ),
538                    response_data: None,
539                }
540            }
541            Err(e) => {
542                info!(
543                    " Error handling test passed (correctly failed) in {:?}",
544                    duration
545                );
546                IntegrationTestResult {
547                    test_name: test_name.to_string(),
548                    success: true,
549                    duration,
550                    error_message: None,
551                    response_data: Some(format!("Expected error: {}", e)),
552                }
553            }
554        };
555
556        self.test_results.push(test_result);
557        Ok(())
558    }
559
560    /// Generate test report
561    fn generate_report(&self) -> TestReport {
562        let total_tests = self.test_results.len();
563        let passed_tests = self.test_results.iter().filter(|r| r.success).count();
564        let failed_tests = total_tests - passed_tests;
565
566        let total_duration: Duration = self.test_results.iter().map(|r| r.duration).sum();
567
568        let average_duration = if total_tests > 0 {
569            total_duration / total_tests as u32
570        } else {
571            Duration::ZERO
572        };
573
574        TestReport {
575            total_tests,
576            passed_tests,
577            failed_tests,
578            total_duration,
579            average_duration,
580            test_results: self.test_results.clone(),
581        }
582    }
583}
584
585/// Comprehensive test report
586#[derive(Debug)]
587struct TestReport {
588    total_tests: usize,
589    passed_tests: usize,
590    failed_tests: usize,
591    total_duration: Duration,
592    average_duration: Duration,
593    test_results: Vec<IntegrationTestResult>,
594}
595
596impl TestReport {
597    /// Print a formatted test report
598    fn print_report(&self) {
599        info!("=== Test Report ===");
600        info!("Total tests: {}", self.total_tests);
601        info!("Passed: {}", self.passed_tests);
602        info!("Failed: {}", self.failed_tests);
603        info!(
604            "Success rate: {:.1}%",
605            (self.passed_tests as f64 / self.total_tests as f64) * 100.0
606        );
607        info!("Total duration: {:?}", self.total_duration);
608        info!("Average duration: {:?}", self.average_duration);
609
610        if self.failed_tests > 0 {
611            error!("Failed tests:");
612            for result in &self.test_results {
613                if !result.success {
614                    error!(
615                        "  - {}: {}",
616                        result.test_name,
617                        result
618                            .error_message
619                            .as_ref()
620                            .unwrap_or(&"Unknown error".to_string())
621                    );
622                }
623            }
624        }
625    }
626}
627
628/// Performance testing utilities
629struct PerformanceTestRunner {
630    client: Client,
631}
632
633impl PerformanceTestRunner {
634    fn new(client: Client) -> Self {
635        Self { client }
636    }
637
638    /// Run concurrent requests to test throughput
639    async fn test_concurrent_requests(
640        &self,
641        concurrency: usize,
642        requests_per_worker: usize,
643    ) -> PerformanceResults {
644        info!(
645            "Running performance test with {} concurrent workers, {} requests each",
646            concurrency, requests_per_worker
647        );
648
649        let start_time = Instant::now();
650        let mut handles = Vec::new();
651
652        for worker_id in 0..concurrency {
653            let _client = self.client.clone(); // Assume Client implements Clone
654            let handle = tokio::spawn(async move {
655                let mut worker_results = Vec::new();
656
657                for request_id in 0..requests_per_worker {
658                    let request_start = Instant::now();
659
660                    // Simulate API request
661                    // let result = client.chat_simple(&format!("Request {} from worker {}", request_id, worker_id)).await;
662                    let result: Result<String> =
663                        Ok(format!("Response {} from worker {}", request_id, worker_id));
664
665                    let request_duration = request_start.elapsed();
666
667                    worker_results.push(RequestResult {
668                        worker_id,
669                        request_id,
670                        duration: request_duration,
671                        success: result.is_ok(),
672                        error: result.err().map(|e| e.to_string()),
673                    });
674
675                    // Small delay to avoid overwhelming the API
676                    sleep(Duration::from_millis(100)).await;
677                }
678
679                worker_results
680            });
681
682            handles.push(handle);
683        }
684
685        let mut all_results = Vec::new();
686        for handle in handles {
687            let worker_results = handle.await.unwrap();
688            all_results.extend(worker_results);
689        }
690
691        let total_duration = start_time.elapsed();
692        self.analyze_performance_results(all_results, total_duration)
693    }
694
695    fn analyze_performance_results(
696        &self,
697        results: Vec<RequestResult>,
698        total_duration: Duration,
699    ) -> PerformanceResults {
700        let total_requests = results.len();
701        let successful_requests = results.iter().filter(|r| r.success).count();
702        let failed_requests = total_requests - successful_requests;
703
704        let request_durations: Vec<Duration> = results.iter().map(|r| r.duration).collect();
705
706        let min_duration = request_durations
707            .iter()
708            .min()
709            .copied()
710            .unwrap_or(Duration::ZERO);
711        let max_duration = request_durations
712            .iter()
713            .max()
714            .copied()
715            .unwrap_or(Duration::ZERO);
716        let avg_duration = if total_requests > 0 {
717            request_durations.iter().sum::<Duration>() / total_requests as u32
718        } else {
719            Duration::ZERO
720        };
721
722        // Calculate percentiles
723        let mut sorted_durations = request_durations;
724        sorted_durations.sort();
725
726        let p50 = if !sorted_durations.is_empty() {
727            sorted_durations[sorted_durations.len() / 2]
728        } else {
729            Duration::ZERO
730        };
731
732        let p95 = if !sorted_durations.is_empty() {
733            sorted_durations[(sorted_durations.len() * 95) / 100]
734        } else {
735            Duration::ZERO
736        };
737
738        let requests_per_second = if total_duration.as_secs() > 0 {
739            total_requests as f64 / total_duration.as_secs_f64()
740        } else {
741            0.0
742        };
743
744        PerformanceResults {
745            total_requests,
746            successful_requests,
747            failed_requests,
748            total_duration,
749            min_duration,
750            max_duration,
751            avg_duration,
752            p50_duration: p50,
753            p95_duration: p95,
754            requests_per_second,
755            error_rate: (failed_requests as f64 / total_requests as f64) * 100.0,
756        }
757    }
758}
759
760/// Result of a single performance test request
761#[derive(Debug)]
762struct RequestResult {
763    worker_id: usize,
764    request_id: usize,
765    duration: Duration,
766    success: bool,
767    error: Option<String>,
768}
769
770/// Performance test results
771#[derive(Debug)]
772struct PerformanceResults {
773    total_requests: usize,
774    successful_requests: usize,
775    failed_requests: usize,
776    total_duration: Duration,
777    min_duration: Duration,
778    max_duration: Duration,
779    avg_duration: Duration,
780    p50_duration: Duration,
781    p95_duration: Duration,
782    requests_per_second: f64,
783    error_rate: f64,
784}
785
786impl PerformanceResults {
787    fn print_results(&self) {
788        info!("=== Performance Test Results ===");
789        info!("Total requests: {}", self.total_requests);
790        info!("Successful: {}", self.successful_requests);
791        info!("Failed: {}", self.failed_requests);
792        info!("Error rate: {:.2}%", self.error_rate);
793        info!("Total duration: {:?}", self.total_duration);
794        info!("Requests per second: {:.2}", self.requests_per_second);
795        info!("Response times:");
796        info!("  Min: {:?}", self.min_duration);
797        info!("  Max: {:?}", self.max_duration);
798        info!("  Average: {:?}", self.avg_duration);
799        info!("  50th percentile: {:?}", self.p50_duration);
800        info!("  95th percentile: {:?}", self.p95_duration);
801    }
802}
803
804#[tokio::main]
805async fn main() -> Result<()> {
806    // Initialize logging
807    tracing_subscriber::fmt()
808        .with_env_filter(
809            tracing_subscriber::EnvFilter::try_from_default_env()
810                .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
811        )
812        .init();
813
814    info!("Starting testing patterns example");
815
816    // Example 1: Unit testing with mock server
817    info!("=== Example 1: Unit Testing with Mock Server ===");
818
819    let mut mock_server = MockOpenAIServer::new().await;
820
821    // Configure mock responses
822    mock_server
823        .mock_chat_completion("Hello, world!", "Hi there! How can I help you?")
824        .await;
825
826    let _client = mock_server.client()?;
827
828    // Test basic functionality (this would be in a real unit test)
829    info!("Testing basic chat completion with mock server");
830    // Note: This would work with real implementation
831    // let response = client.chat_simple("Hello, world!").await?;
832    // TestUtils::assert_response_content(&response, "Hi there!");
833    info!(" Mock server test would pass with real implementation");
834
835    // Test streaming responses
836    info!("Setting up streaming mock");
837    mock_server
838        .mock_streaming_chat(vec!["Hello", " there", "! How", " can I", " help?"])
839        .await;
840
841    // Test error scenarios
842    info!("Testing error scenarios");
843    mock_server
844        .mock_error_response("/v1/chat/completions", ErrorType::RateLimit)
845        .await;
846
847    // Verify request logging
848    info!("Requests logged: {}", mock_server.get_request_log().len());
849
850    // Example 2: Integration testing with real API
851    info!("\n=== Example 2: Integration Testing ===");
852
853    // Note: In real scenario, this would use actual API credentials
854    // For demonstration, we'll use a test client
855    let integration_client = TestUtils::create_test_client()?;
856    let mut integration_runner = IntegrationTestRunner::new(integration_client);
857
858    // Run integration tests
859    integration_runner.test_basic_chat_completion().await?;
860    integration_runner.test_streaming_completion().await?;
861    integration_runner.test_error_handling().await?;
862
863    // Generate and display test report
864    let report = integration_runner.generate_report();
865    report.print_report();
866
867    // Example 3: Performance testing
868    info!("\n=== Example 3: Performance Testing ===");
869
870    let perf_client = TestUtils::create_test_client()?;
871    let perf_runner = PerformanceTestRunner::new(perf_client);
872
873    // Run performance tests with different concurrency levels
874    for concurrency in [1, 5, 10] {
875        info!("Testing with {} concurrent workers", concurrency);
876        let results = perf_runner.test_concurrent_requests(concurrency, 5).await;
877        results.print_results();
878    }
879
880    // Example 4: Contract testing
881    info!("\n=== Example 4: Contract Testing ===");
882
883    // Test response schema validation
884    let sample_response = TestUtils::create_mock_response_with_usage(
885        "Test response content",
886        25, // prompt tokens
887        15, // completion tokens
888    );
889
890    // Parse and validate response structure
891    if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(&sample_response) {
892        // Validate required fields exist
893        assert!(parsed["id"].is_string(), "Response must have id field");
894        assert!(
895            parsed["choices"].is_array(),
896            "Response must have choices array"
897        );
898        assert!(
899            parsed["usage"]["total_tokens"].is_number(),
900            "Response must have token usage"
901        );
902        info!(" Contract validation passed for response schema");
903    }
904
905    // Example 5: Test data generation and validation
906    info!("\n=== Example 5: Test Data Generation ===");
907
908    let test_prompts = TestUtils::create_test_prompts(5);
909    info!("Generated {} test prompts", test_prompts.len());
910
911    for (i, prompt) in test_prompts.iter().enumerate() {
912        info!("  Prompt {}: {}", i + 1, prompt);
913    }
914
915    // Example 6: Stress testing and edge cases
916    info!("\n=== Example 6: Edge Case Testing ===");
917
918    // Test with very long input
919    let long_input = "word ".repeat(1000); // ~4000 characters
920    info!("Testing with long input ({} chars)", long_input.len());
921
922    // Test with empty input
923    info!("Testing with empty input");
924
925    // Test with special characters
926    let special_chars = "Testing with émojis  and spëcial çharacters!";
927    info!("Testing with special characters: {}", special_chars);
928
929    // Test with very large batch
930    info!("Testing batch size limits");
931    let large_batch = TestUtils::create_test_prompts(1000);
932    info!("Created batch with {} prompts", large_batch.len());
933
934    // Example 7: Mock configuration for different scenarios
935    info!("\n=== Example 7: Advanced Mock Scenarios ===");
936
937    let advanced_mock = MockOpenAIServer::new().await;
938
939    // Configure error simulation
940    advanced_mock.configure_errors(ErrorSimulationConfig {
941        simulate_rate_limits: true,
942        rate_limit_delay: 5,
943        simulate_server_errors: true,
944        error_probability: 0.2, // 20% error rate
945        simulate_timeouts: true,
946        timeout_delay: 10,
947    });
948
949    info!("Configured advanced error simulation");
950
951    // Test retry logic (would be implemented in real client)
952    info!("Testing retry mechanisms with simulated errors");
953
954    info!("Testing patterns example completed successfully!");
955    Ok(())
956}
957
958/// Additional test utilities
959impl TestUtils {
960    /// Validate JSON schema compliance
961    fn validate_json_schema(json_str: &str, expected_fields: &[&str]) -> bool {
962        if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(json_str) {
963            expected_fields
964                .iter()
965                .all(|field| parsed.get(field).is_some())
966        } else {
967            false
968        }
969    }
970
971    /// Generate test data with specific token characteristics
972    fn generate_test_data_with_tokens(target_tokens: usize) -> String {
973        // Rough approximation: 1 token ≈ 4 characters for English text
974        let target_chars = target_tokens * 4;
975        let base_text = "This is a test prompt that will be used for token counting validation. ";
976        let repetitions = (target_chars / base_text.len()) + 1;
977
978        base_text
979            .repeat(repetitions)
980            .chars()
981            .take(target_chars)
982            .collect()
983    }
984
985    /// Create a comprehensive test suite configuration
986    fn create_test_suite_config() -> TestSuiteConfig {
987        TestSuiteConfig {
988            include_unit_tests: true,
989            include_integration_tests: true,
990            include_performance_tests: true,
991            include_contract_tests: true,
992            max_test_duration: Duration::from_secs(30 * 60),
993            performance_test_concurrency: vec![1, 5, 10, 20],
994            error_simulation_enabled: true,
995            test_data_variants: vec![
996                "short_text".to_string(),
997                "long_text".to_string(),
998                "special_characters".to_string(),
999                "multilingual".to_string(),
1000            ],
1001        }
1002    }
1003}
1004
1005/// Configuration for comprehensive test suites
1006#[derive(Debug)]
1007struct TestSuiteConfig {
1008    include_unit_tests: bool,
1009    include_integration_tests: bool,
1010    include_performance_tests: bool,
1011    include_contract_tests: bool,
1012    max_test_duration: Duration,
1013    performance_test_concurrency: Vec<usize>,
1014    error_simulation_enabled: bool,
1015    test_data_variants: Vec<String>,
1016}