1#![allow(dead_code)]
3#![allow(clippy::too_many_lines)]
4#![allow(clippy::missing_const_for_fn)]
5#![allow(clippy::unreadable_literal)]
6#![allow(clippy::doc_markdown)]
7#![allow(clippy::needless_borrows_for_generic_args)]
8#![allow(clippy::format_push_string)]
9#![allow(clippy::uninlined_format_args)]
10#![allow(clippy::struct_field_names)]
11#![allow(clippy::cast_possible_truncation)]
12#![allow(clippy::cast_precision_loss)]
13#![allow(clippy::useless_vec)]
14#![allow(clippy::module_name_repetitions)]
15#![allow(clippy::too_many_arguments)]
16#![allow(clippy::or_fun_call)]
17#![allow(clippy::unused_self)]
18#![allow(clippy::needless_pass_by_value)]
19#![allow(clippy::if_not_else)]
20#![allow(clippy::option_if_let_else)]
21#![allow(clippy::significant_drop_tightening)]
22#![allow(clippy::struct_excessive_bools)]
23use openai_ergonomic::{Client, Config, Error, Result};
42use serde::{Deserialize, Serialize};
43use std::collections::HashMap;
44use std::sync::{Arc, Mutex};
45use std::time::{Duration, Instant};
46use tokio::time::sleep;
47use tracing::{error, info, warn};
48
49struct MockOpenAIServer {
51 server: mockito::ServerGuard,
53 responses: Arc<Mutex<HashMap<String, MockResponse>>>,
55 request_log: Arc<Mutex<Vec<MockRequest>>>,
57 error_config: Arc<Mutex<ErrorSimulationConfig>>,
59}
60
61#[derive(Debug, Clone)]
63struct ErrorSimulationConfig {
64 simulate_rate_limits: bool,
66 rate_limit_delay: u64,
68 simulate_server_errors: bool,
70 error_probability: f64,
72 simulate_timeouts: bool,
74 timeout_delay: u64,
76}
77
78impl Default for ErrorSimulationConfig {
79 fn default() -> Self {
80 Self {
81 simulate_rate_limits: false,
82 rate_limit_delay: 60,
83 simulate_server_errors: false,
84 error_probability: 0.1,
85 simulate_timeouts: false,
86 timeout_delay: 30,
87 }
88 }
89}
90
91#[derive(Debug, Clone)]
93struct MockResponse {
94 status: u16,
96 body: String,
98 headers: HashMap<String, String>,
100 delay: Option<Duration>,
102}
103
104#[derive(Debug, Clone)]
106struct MockRequest {
107 method: String,
109 path: String,
111 headers: HashMap<String, String>,
113 body: String,
115 timestamp: Instant,
117}
118
119impl MockOpenAIServer {
120 async fn new() -> Self {
122 let server = mockito::Server::new_async().await;
123
124 Self {
125 server,
126 responses: Arc::new(Mutex::new(HashMap::new())),
127 request_log: Arc::new(Mutex::new(Vec::new())),
128 error_config: Arc::new(Mutex::new(ErrorSimulationConfig::default())),
129 }
130 }
131
132 fn base_url(&self) -> String {
134 self.server.url()
135 }
136
137 fn client(&self) -> Result<Client> {
139 let config = Config::builder()
140 .api_key("test-api-key")
141 .api_base(&self.base_url())
142 .build();
143
144 Ok(Client::builder(config)?.build())
145 }
146
147 fn configure_errors(&self, config: ErrorSimulationConfig) {
149 *self.error_config.lock().unwrap() = config;
150 }
151
152 async fn mock_chat_completion(&mut self, expected_prompt: &str, response_text: &str) {
154 let mock_response = serde_json::json!({
155 "id": "chatcmpl-123",
156 "object": "chat.completion",
157 "created": 1677652288,
158 "model": "gpt-3.5-turbo",
159 "choices": [{
160 "index": 0,
161 "message": {
162 "role": "assistant",
163 "content": response_text
164 },
165 "finish_reason": "stop"
166 }],
167 "usage": {
168 "prompt_tokens": 50,
169 "completion_tokens": 20,
170 "total_tokens": 70
171 }
172 });
173
174 self.server
175 .mock("POST", "/v1/chat/completions")
176 .match_body(mockito::Matcher::JsonString(
177 serde_json::json!({
178 "model": "gpt-3.5-turbo",
179 "messages": [{"role": "user", "content": expected_prompt}]
180 })
181 .to_string(),
182 ))
183 .with_status(200)
184 .with_header("content-type", "application/json")
185 .with_body(mock_response.to_string())
186 .create_async()
187 .await;
188 }
189
190 async fn mock_streaming_chat(&mut self, response_chunks: Vec<&str>) {
192 let mut sse_data = String::new();
193
194 for (i, chunk) in response_chunks.iter().enumerate() {
195 let chunk_response = serde_json::json!({
196 "id": "chatcmpl-123",
197 "object": "chat.completion.chunk",
198 "created": 1677652288,
199 "model": "gpt-3.5-turbo",
200 "choices": [{
201 "index": 0,
202 "delta": {
203 "content": chunk
204 },
205 "finish_reason": if i == response_chunks.len() - 1 { "stop" } else { "null" }
206 }]
207 });
208
209 sse_data.push_str(&format!("data: {}\n\n", chunk_response));
210 }
211
212 sse_data.push_str("data: [DONE]\n\n");
213
214 self.server
215 .mock("POST", "/v1/chat/completions")
216 .match_header("accept", "text/event-stream")
217 .with_status(200)
218 .with_header("content-type", "text/event-stream")
219 .with_body(sse_data)
220 .create_async()
221 .await;
222 }
223
224 async fn mock_error_response(&mut self, endpoint: &str, error_type: ErrorType) {
226 let (status, body) = match error_type {
227 ErrorType::RateLimit => (
228 429,
229 serde_json::json!({
230 "error": {
231 "type": "rate_limit_exceeded",
232 "message": "Rate limit exceeded, please try again later"
233 }
234 })
235 .to_string(),
236 ),
237 ErrorType::ServerError => (
238 500,
239 serde_json::json!({
240 "error": {
241 "type": "server_error",
242 "message": "Internal server error"
243 }
244 })
245 .to_string(),
246 ),
247 ErrorType::InvalidRequest => (
248 400,
249 serde_json::json!({
250 "error": {
251 "type": "invalid_request_error",
252 "message": "Invalid request parameters"
253 }
254 })
255 .to_string(),
256 ),
257 ErrorType::Unauthorized => (
258 401,
259 serde_json::json!({
260 "error": {
261 "type": "invalid_request_error",
262 "message": "Incorrect API key provided"
263 }
264 })
265 .to_string(),
266 ),
267 };
268
269 self.server
270 .mock("POST", endpoint)
271 .with_status(status)
272 .with_header("content-type", "application/json")
273 .with_body(body)
274 .create_async()
275 .await;
276 }
277
278 fn get_request_log(&self) -> Vec<MockRequest> {
280 self.request_log.lock().unwrap().clone()
281 }
282
283 fn clear_request_log(&self) {
285 self.request_log.lock().unwrap().clear();
286 }
287
288 fn verify_request(&self, method: &str, path: &str) -> bool {
290 let log = self.request_log.lock().unwrap();
291 log.iter()
292 .any(|req| req.method == method && req.path == path)
293 }
294}
295
296#[derive(Debug, Clone)]
298enum ErrorType {
299 RateLimit,
300 ServerError,
301 InvalidRequest,
302 Unauthorized,
303}
304
305struct TestUtils;
307
308impl TestUtils {
309 fn create_test_client() -> Result<Client> {
311 let config = Config::builder()
312 .api_key("test-api-key")
313 .api_base("http://localhost:1234") .max_retries(2)
315 .build();
316
317 Ok(Client::builder(config)?.build())
318 }
319
320 fn assert_response_content(response: &str, expected_content: &str) {
322 assert!(
323 response.contains(expected_content),
324 "Response '{}' does not contain expected content '{}'",
325 response,
326 expected_content
327 );
328 }
329
330 fn assert_token_usage(usage: &TokenUsage, min_tokens: i32, max_tokens: i32) {
332 assert!(
333 usage.total_tokens >= min_tokens && usage.total_tokens <= max_tokens,
334 "Token usage {} is outside expected range {}-{}",
335 usage.total_tokens,
336 min_tokens,
337 max_tokens
338 );
339 }
340
341 fn create_test_prompts(count: usize) -> Vec<String> {
343 (0..count)
344 .map(|i| format!("Test prompt number {}", i + 1))
345 .collect()
346 }
347
348 async fn time_async_operation<F, T, E>(operation: F) -> (std::result::Result<T, E>, Duration)
350 where
351 F: std::future::Future<Output = std::result::Result<T, E>>,
352 {
353 let start = Instant::now();
354 let result = operation.await;
355 let duration = start.elapsed();
356 (result, duration)
357 }
358
359 fn create_mock_response_with_usage(
361 content: &str,
362 prompt_tokens: i32,
363 completion_tokens: i32,
364 ) -> String {
365 serde_json::json!({
366 "id": "chatcmpl-test",
367 "object": "chat.completion",
368 "created": 1677652288,
369 "model": "gpt-3.5-turbo",
370 "choices": [{
371 "index": 0,
372 "message": {
373 "role": "assistant",
374 "content": content
375 },
376 "finish_reason": "stop"
377 }],
378 "usage": {
379 "prompt_tokens": prompt_tokens,
380 "completion_tokens": completion_tokens,
381 "total_tokens": prompt_tokens + completion_tokens
382 }
383 })
384 .to_string()
385 }
386}
387
388#[derive(Debug, Clone, Serialize, Deserialize)]
390struct TokenUsage {
391 prompt_tokens: i32,
392 completion_tokens: i32,
393 total_tokens: i32,
394}
395
396struct IntegrationTestRunner {
398 client: Client,
399 test_results: Vec<IntegrationTestResult>,
400}
401
402#[derive(Debug, Clone)]
404struct IntegrationTestResult {
405 test_name: String,
406 success: bool,
407 duration: Duration,
408 error_message: Option<String>,
409 response_data: Option<String>,
410}
411
412impl IntegrationTestRunner {
413 fn new(client: Client) -> Self {
415 Self {
416 client,
417 test_results: Vec::new(),
418 }
419 }
420
421 async fn test_basic_chat_completion(&mut self) -> Result<()> {
423 let test_name = "basic_chat_completion";
424 info!("Running integration test: {}", test_name);
425
426 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
427 Ok("Hello! How can I help you today?".to_string())
432 })
433 .await;
434
435 let test_result = match result {
436 Ok(response) => {
437 info!(" Basic chat completion test passed in {:?}", duration);
438 IntegrationTestResult {
439 test_name: test_name.to_string(),
440 success: true,
441 duration,
442 error_message: None,
443 response_data: Some(response),
444 }
445 }
446 Err(e) => {
447 error!(" Basic chat completion test failed: {}", e);
448 IntegrationTestResult {
449 test_name: test_name.to_string(),
450 success: false,
451 duration,
452 error_message: Some(e.to_string()),
453 response_data: None,
454 }
455 }
456 };
457
458 self.test_results.push(test_result);
459 Ok(())
460 }
461
462 async fn test_streaming_completion(&mut self) -> Result<()> {
464 let test_name = "streaming_completion";
465 info!("Running integration test: {}", test_name);
466
467 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
468 let chunks = vec!["Once", " upon", " a", " time..."];
478 Ok(chunks.join(""))
479 })
480 .await;
481
482 let test_result = match result {
483 Ok(response) => {
484 info!(" Streaming completion test passed in {:?}", duration);
485 IntegrationTestResult {
486 test_name: test_name.to_string(),
487 success: true,
488 duration,
489 error_message: None,
490 response_data: Some(response),
491 }
492 }
493 Err(e) => {
494 error!(" Streaming completion test failed: {}", e);
495 IntegrationTestResult {
496 test_name: test_name.to_string(),
497 success: false,
498 duration,
499 error_message: Some(e.to_string()),
500 response_data: None,
501 }
502 }
503 };
504
505 self.test_results.push(test_result);
506 Ok(())
507 }
508
509 async fn test_error_handling(&mut self) -> Result<()> {
511 let test_name = "error_handling";
512 info!("Running integration test: {}", test_name);
513
514 let (result, duration) = TestUtils::time_async_operation::<_, String, Error>(async {
515 let bad_config = Config::builder().api_key("invalid-key").build();
517
518 let _bad_client = Client::builder(bad_config)?.build();
519
520 Err(Error::InvalidRequest("Authentication failed".to_string()))
525 })
526 .await;
527
528 let test_result = match result {
529 Ok(_) => {
530 warn!("Error handling test unexpectedly succeeded");
531 IntegrationTestResult {
532 test_name: test_name.to_string(),
533 success: false,
534 duration,
535 error_message: Some(
536 "Expected authentication error but request succeeded".to_string(),
537 ),
538 response_data: None,
539 }
540 }
541 Err(e) => {
542 info!(
543 " Error handling test passed (correctly failed) in {:?}",
544 duration
545 );
546 IntegrationTestResult {
547 test_name: test_name.to_string(),
548 success: true,
549 duration,
550 error_message: None,
551 response_data: Some(format!("Expected error: {}", e)),
552 }
553 }
554 };
555
556 self.test_results.push(test_result);
557 Ok(())
558 }
559
560 fn generate_report(&self) -> TestReport {
562 let total_tests = self.test_results.len();
563 let passed_tests = self.test_results.iter().filter(|r| r.success).count();
564 let failed_tests = total_tests - passed_tests;
565
566 let total_duration: Duration = self.test_results.iter().map(|r| r.duration).sum();
567
568 let average_duration = if total_tests > 0 {
569 total_duration / total_tests as u32
570 } else {
571 Duration::ZERO
572 };
573
574 TestReport {
575 total_tests,
576 passed_tests,
577 failed_tests,
578 total_duration,
579 average_duration,
580 test_results: self.test_results.clone(),
581 }
582 }
583}
584
585#[derive(Debug)]
587struct TestReport {
588 total_tests: usize,
589 passed_tests: usize,
590 failed_tests: usize,
591 total_duration: Duration,
592 average_duration: Duration,
593 test_results: Vec<IntegrationTestResult>,
594}
595
596impl TestReport {
597 fn print_report(&self) {
599 info!("=== Test Report ===");
600 info!("Total tests: {}", self.total_tests);
601 info!("Passed: {}", self.passed_tests);
602 info!("Failed: {}", self.failed_tests);
603 info!(
604 "Success rate: {:.1}%",
605 (self.passed_tests as f64 / self.total_tests as f64) * 100.0
606 );
607 info!("Total duration: {:?}", self.total_duration);
608 info!("Average duration: {:?}", self.average_duration);
609
610 if self.failed_tests > 0 {
611 error!("Failed tests:");
612 for result in &self.test_results {
613 if !result.success {
614 error!(
615 " - {}: {}",
616 result.test_name,
617 result
618 .error_message
619 .as_ref()
620 .unwrap_or(&"Unknown error".to_string())
621 );
622 }
623 }
624 }
625 }
626}
627
628struct PerformanceTestRunner {
630 client: Client,
631}
632
633impl PerformanceTestRunner {
634 fn new(client: Client) -> Self {
635 Self { client }
636 }
637
638 async fn test_concurrent_requests(
640 &self,
641 concurrency: usize,
642 requests_per_worker: usize,
643 ) -> PerformanceResults {
644 info!(
645 "Running performance test with {} concurrent workers, {} requests each",
646 concurrency, requests_per_worker
647 );
648
649 let start_time = Instant::now();
650 let mut handles = Vec::new();
651
652 for worker_id in 0..concurrency {
653 let _client = self.client.clone(); let handle = tokio::spawn(async move {
655 let mut worker_results = Vec::new();
656
657 for request_id in 0..requests_per_worker {
658 let request_start = Instant::now();
659
660 let result: Result<String> =
663 Ok(format!("Response {} from worker {}", request_id, worker_id));
664
665 let request_duration = request_start.elapsed();
666
667 worker_results.push(RequestResult {
668 worker_id,
669 request_id,
670 duration: request_duration,
671 success: result.is_ok(),
672 error: result.err().map(|e| e.to_string()),
673 });
674
675 sleep(Duration::from_millis(100)).await;
677 }
678
679 worker_results
680 });
681
682 handles.push(handle);
683 }
684
685 let mut all_results = Vec::new();
686 for handle in handles {
687 let worker_results = handle.await.unwrap();
688 all_results.extend(worker_results);
689 }
690
691 let total_duration = start_time.elapsed();
692 self.analyze_performance_results(all_results, total_duration)
693 }
694
695 fn analyze_performance_results(
696 &self,
697 results: Vec<RequestResult>,
698 total_duration: Duration,
699 ) -> PerformanceResults {
700 let total_requests = results.len();
701 let successful_requests = results.iter().filter(|r| r.success).count();
702 let failed_requests = total_requests - successful_requests;
703
704 let request_durations: Vec<Duration> = results.iter().map(|r| r.duration).collect();
705
706 let min_duration = request_durations
707 .iter()
708 .min()
709 .copied()
710 .unwrap_or(Duration::ZERO);
711 let max_duration = request_durations
712 .iter()
713 .max()
714 .copied()
715 .unwrap_or(Duration::ZERO);
716 let avg_duration = if total_requests > 0 {
717 request_durations.iter().sum::<Duration>() / total_requests as u32
718 } else {
719 Duration::ZERO
720 };
721
722 let mut sorted_durations = request_durations;
724 sorted_durations.sort();
725
726 let p50 = if !sorted_durations.is_empty() {
727 sorted_durations[sorted_durations.len() / 2]
728 } else {
729 Duration::ZERO
730 };
731
732 let p95 = if !sorted_durations.is_empty() {
733 sorted_durations[(sorted_durations.len() * 95) / 100]
734 } else {
735 Duration::ZERO
736 };
737
738 let requests_per_second = if total_duration.as_secs() > 0 {
739 total_requests as f64 / total_duration.as_secs_f64()
740 } else {
741 0.0
742 };
743
744 PerformanceResults {
745 total_requests,
746 successful_requests,
747 failed_requests,
748 total_duration,
749 min_duration,
750 max_duration,
751 avg_duration,
752 p50_duration: p50,
753 p95_duration: p95,
754 requests_per_second,
755 error_rate: (failed_requests as f64 / total_requests as f64) * 100.0,
756 }
757 }
758}
759
760#[derive(Debug)]
762struct RequestResult {
763 worker_id: usize,
764 request_id: usize,
765 duration: Duration,
766 success: bool,
767 error: Option<String>,
768}
769
770#[derive(Debug)]
772struct PerformanceResults {
773 total_requests: usize,
774 successful_requests: usize,
775 failed_requests: usize,
776 total_duration: Duration,
777 min_duration: Duration,
778 max_duration: Duration,
779 avg_duration: Duration,
780 p50_duration: Duration,
781 p95_duration: Duration,
782 requests_per_second: f64,
783 error_rate: f64,
784}
785
786impl PerformanceResults {
787 fn print_results(&self) {
788 info!("=== Performance Test Results ===");
789 info!("Total requests: {}", self.total_requests);
790 info!("Successful: {}", self.successful_requests);
791 info!("Failed: {}", self.failed_requests);
792 info!("Error rate: {:.2}%", self.error_rate);
793 info!("Total duration: {:?}", self.total_duration);
794 info!("Requests per second: {:.2}", self.requests_per_second);
795 info!("Response times:");
796 info!(" Min: {:?}", self.min_duration);
797 info!(" Max: {:?}", self.max_duration);
798 info!(" Average: {:?}", self.avg_duration);
799 info!(" 50th percentile: {:?}", self.p50_duration);
800 info!(" 95th percentile: {:?}", self.p95_duration);
801 }
802}
803
804#[tokio::main]
805async fn main() -> Result<()> {
806 tracing_subscriber::fmt()
808 .with_env_filter(
809 tracing_subscriber::EnvFilter::try_from_default_env()
810 .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
811 )
812 .init();
813
814 info!("Starting testing patterns example");
815
816 info!("=== Example 1: Unit Testing with Mock Server ===");
818
819 let mut mock_server = MockOpenAIServer::new().await;
820
821 mock_server
823 .mock_chat_completion("Hello, world!", "Hi there! How can I help you?")
824 .await;
825
826 let _client = mock_server.client()?;
827
828 info!("Testing basic chat completion with mock server");
830 info!(" Mock server test would pass with real implementation");
834
835 info!("Setting up streaming mock");
837 mock_server
838 .mock_streaming_chat(vec!["Hello", " there", "! How", " can I", " help?"])
839 .await;
840
841 info!("Testing error scenarios");
843 mock_server
844 .mock_error_response("/v1/chat/completions", ErrorType::RateLimit)
845 .await;
846
847 info!("Requests logged: {}", mock_server.get_request_log().len());
849
850 info!("\n=== Example 2: Integration Testing ===");
852
853 let integration_client = TestUtils::create_test_client()?;
856 let mut integration_runner = IntegrationTestRunner::new(integration_client);
857
858 integration_runner.test_basic_chat_completion().await?;
860 integration_runner.test_streaming_completion().await?;
861 integration_runner.test_error_handling().await?;
862
863 let report = integration_runner.generate_report();
865 report.print_report();
866
867 info!("\n=== Example 3: Performance Testing ===");
869
870 let perf_client = TestUtils::create_test_client()?;
871 let perf_runner = PerformanceTestRunner::new(perf_client);
872
873 for concurrency in [1, 5, 10] {
875 info!("Testing with {} concurrent workers", concurrency);
876 let results = perf_runner.test_concurrent_requests(concurrency, 5).await;
877 results.print_results();
878 }
879
880 info!("\n=== Example 4: Contract Testing ===");
882
883 let sample_response = TestUtils::create_mock_response_with_usage(
885 "Test response content",
886 25, 15, );
889
890 if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(&sample_response) {
892 assert!(parsed["id"].is_string(), "Response must have id field");
894 assert!(
895 parsed["choices"].is_array(),
896 "Response must have choices array"
897 );
898 assert!(
899 parsed["usage"]["total_tokens"].is_number(),
900 "Response must have token usage"
901 );
902 info!(" Contract validation passed for response schema");
903 }
904
905 info!("\n=== Example 5: Test Data Generation ===");
907
908 let test_prompts = TestUtils::create_test_prompts(5);
909 info!("Generated {} test prompts", test_prompts.len());
910
911 for (i, prompt) in test_prompts.iter().enumerate() {
912 info!(" Prompt {}: {}", i + 1, prompt);
913 }
914
915 info!("\n=== Example 6: Edge Case Testing ===");
917
918 let long_input = "word ".repeat(1000); info!("Testing with long input ({} chars)", long_input.len());
921
922 info!("Testing with empty input");
924
925 let special_chars = "Testing with émojis and spëcial çharacters!";
927 info!("Testing with special characters: {}", special_chars);
928
929 info!("Testing batch size limits");
931 let large_batch = TestUtils::create_test_prompts(1000);
932 info!("Created batch with {} prompts", large_batch.len());
933
934 info!("\n=== Example 7: Advanced Mock Scenarios ===");
936
937 let advanced_mock = MockOpenAIServer::new().await;
938
939 advanced_mock.configure_errors(ErrorSimulationConfig {
941 simulate_rate_limits: true,
942 rate_limit_delay: 5,
943 simulate_server_errors: true,
944 error_probability: 0.2, simulate_timeouts: true,
946 timeout_delay: 10,
947 });
948
949 info!("Configured advanced error simulation");
950
951 info!("Testing retry mechanisms with simulated errors");
953
954 info!("Testing patterns example completed successfully!");
955 Ok(())
956}
957
958impl TestUtils {
960 fn validate_json_schema(json_str: &str, expected_fields: &[&str]) -> bool {
962 if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(json_str) {
963 expected_fields
964 .iter()
965 .all(|field| parsed.get(field).is_some())
966 } else {
967 false
968 }
969 }
970
971 fn generate_test_data_with_tokens(target_tokens: usize) -> String {
973 let target_chars = target_tokens * 4;
975 let base_text = "This is a test prompt that will be used for token counting validation. ";
976 let repetitions = (target_chars / base_text.len()) + 1;
977
978 base_text
979 .repeat(repetitions)
980 .chars()
981 .take(target_chars)
982 .collect()
983 }
984
985 fn create_test_suite_config() -> TestSuiteConfig {
987 TestSuiteConfig {
988 include_unit_tests: true,
989 include_integration_tests: true,
990 include_performance_tests: true,
991 include_contract_tests: true,
992 max_test_duration: Duration::from_secs(30 * 60),
993 performance_test_concurrency: vec![1, 5, 10, 20],
994 error_simulation_enabled: true,
995 test_data_variants: vec![
996 "short_text".to_string(),
997 "long_text".to_string(),
998 "special_characters".to_string(),
999 "multilingual".to_string(),
1000 ],
1001 }
1002 }
1003}
1004
1005#[derive(Debug)]
1007struct TestSuiteConfig {
1008 include_unit_tests: bool,
1009 include_integration_tests: bool,
1010 include_performance_tests: bool,
1011 include_contract_tests: bool,
1012 max_test_duration: Duration,
1013 performance_test_concurrency: Vec<usize>,
1014 error_simulation_enabled: bool,
1015 test_data_variants: Vec<String>,
1016}