#![ allow( clippy::unreadable_literal ) ]
#![ allow( clippy::uninlined_format_args ) ]
#![ allow( clippy::std_instead_of_core ) ]
#![ allow( clippy::useless_vec ) ]
#![ allow( clippy::unused_async ) ]
#![ allow( clippy::must_use_candidate ) ]
#![ allow( clippy::missing_panics_doc ) ]
#![ allow( clippy::missing_errors_doc ) ]
#![ allow( clippy::doc_markdown ) ]
#[ cfg( test ) ]
mod performance_monitoring_tests
{
use api_openai::
{
Client,
environment ::OpenaiEnvironmentImpl,
secret ::Secret,
performance_monitoring ::*,
};
use std::time::Duration;
fn create_test_client() -> Result< Client< OpenaiEnvironmentImpl >, Box< dyn std::error::Error > >
{
let secret = Secret::new_unchecked("sk-test_key_12345".to_string());
let env = OpenaiEnvironmentImpl::build(secret, None, None, api_openai::environment::OpenAIRecommended::base_url().to_string(), api_openai::environment::OpenAIRecommended::realtime_base_url().to_string())
.expect("Failed to create environment");
Ok(Client::build(env)?)
}
#[ tokio::test ]
async fn test_request_overhead_measurement_succeeds()
{
let _client = create_test_client().expect("Failed to create client");
let result = measure_request_overhead().await;
assert!(result.is_ok(), "Request overhead measurement should succeed with implementation");
let overhead = result.unwrap();
assert!(overhead.as_millis() < 10, "Request overhead should be less than 10ms");
}
#[ tokio::test ]
async fn test_multiple_request_overhead_consistency_succeeds()
{
let _client = create_test_client().expect("Failed to create client");
configure_performance_monitoring( PerformanceConfig
{
max_request_overhead_ms : 50, enable_memory_monitoring : true,
enable_regression_detection : false, baseline_performance : None,
regression_threshold_percent : 20.0,
overhead_consistency_threshold : 5.0, });
let result = measure_overhead_consistency(10).await;
assert!(result.is_ok(), "Overhead consistency measurement should succeed with implementation");
let measurements = result.unwrap();
assert_eq!(measurements.len(), 10);
for measurement in measurements
{
assert!( measurement.as_millis() < 10, "Each overhead measurement should be less than 10ms" );
}
}
#[ tokio::test ]
async fn test_concurrent_request_performance_succeeds()
{
let _client = create_test_client().expect("Failed to create client");
let result = measure_concurrent_performance(20).await;
assert!(result.is_ok(), "Concurrent performance measurement should succeed with implementation");
let results = result.unwrap();
assert_eq!(results.len(), 20);
for duration in results
{
assert!(duration.as_millis() < 100, "Each concurrent request should complete in under 100ms");
}
}
#[ tokio::test ]
async fn test_memory_usage_monitoring_succeeds()
{
let _client = create_test_client().expect("Failed to create client");
let result = monitor_memory_usage().await;
assert!(result.is_ok(), "Memory usage monitoring should succeed with implementation");
let report = result.unwrap();
assert!(report.initial_usage > 0, "Initial memory usage should be positive");
assert!(report.peak_usage >= report.initial_usage, "Peak usage should be at least initial usage");
assert!(report.final_usage > 0, "Final memory usage should be positive");
}
#[ tokio::test ]
async fn test_performance_regression_detection_succeeds()
{
let _client = create_test_client().expect("Failed to create client");
let config = PerformanceConfig {
enable_regression_detection : true,
baseline_performance : Some(Duration::from_millis(5)),
regression_threshold_percent : 50.0,
..Default::default()
};
configure_performance_monitoring(config);
let result = detect_performance_regression().await;
assert!(result.is_ok(), "Performance regression detection should succeed with implementation");
let report = result.unwrap();
assert_eq!(report.baseline_performance, Duration::from_millis(5));
assert!(report.current_performance.as_millis() > 0, "Current performance should be measured");
}
#[ tokio::test ]
async fn test_throughput_measurement_under_load_succeeds()
{
let _client = create_test_client().expect("Failed to create client");
let result = measure_throughput_under_load(10, Duration::from_secs(1)).await;
assert!(result.is_ok(), "Throughput measurement should succeed with implementation");
let metrics = result.unwrap();
assert!(metrics.requests_per_second > 0.0, "Should measure positive requests per second");
assert!(metrics.successful_requests > 0, "Should have some successful requests");
assert!(metrics.average_latency.as_millis() > 0, "Should measure positive average latency");
}
}