use crate::{
AmbisonicsProcessor, BinauralRenderer, Error, Position3D, Result, SpeakerConfiguration,
};
use scirs2_core::ndarray::{Array1, Array2};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceConfig {
pub iterations: usize,
pub test_duration: Duration,
pub source_count: usize,
pub sample_rate: u32,
pub buffer_size: usize,
pub track_memory: bool,
pub track_cpu: bool,
pub latency_thresholds: (Duration, Duration, Duration),
}
impl Default for PerformanceConfig {
fn default() -> Self {
Self {
iterations: 1000,
test_duration: Duration::from_secs(60),
source_count: 8,
sample_rate: 44100,
buffer_size: 512,
track_memory: true,
track_cpu: true,
latency_thresholds: (
Duration::from_millis(20), Duration::from_millis(30), Duration::from_millis(50), ),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceMetrics {
pub test_name: String,
pub avg_latency: Duration,
pub min_latency: Duration,
pub max_latency: Duration,
pub p95_latency: Duration,
pub p99_latency: Duration,
pub avg_cpu_usage: f32,
pub peak_cpu_usage: f32,
pub avg_memory_usage: usize,
pub peak_memory_usage: usize,
pub throughput: f64,
pub iterations: usize,
pub success_rate: f32,
pub custom_metrics: HashMap<String, f64>,
}
impl PerformanceMetrics {
pub fn new(test_name: String) -> Self {
Self {
test_name,
avg_latency: Duration::ZERO,
min_latency: Duration::MAX,
max_latency: Duration::ZERO,
p95_latency: Duration::ZERO,
p99_latency: Duration::ZERO,
avg_cpu_usage: 0.0,
peak_cpu_usage: 0.0,
avg_memory_usage: 0,
peak_memory_usage: 0,
throughput: 0.0,
iterations: 0,
success_rate: 0.0,
custom_metrics: HashMap::new(),
}
}
pub fn meets_targets(&self, config: &PerformanceConfig) -> PerformanceTargetResult {
let mut result = PerformanceTargetResult {
vr_latency_met: self.p95_latency <= config.latency_thresholds.0,
gaming_latency_met: self.p95_latency <= config.latency_thresholds.1,
general_latency_met: self.p95_latency <= config.latency_thresholds.2,
cpu_usage_acceptable: self.avg_cpu_usage < 25.0, success_rate_acceptable: self.success_rate >= 0.95, issues: Vec::new(),
};
if !result.vr_latency_met {
result.issues.push(format!(
"VR latency target not met: {}ms > {}ms",
self.p95_latency.as_millis(),
config.latency_thresholds.0.as_millis()
));
}
if !result.cpu_usage_acceptable {
result.issues.push(format!(
"CPU usage too high: {:.1}% > 25.0%",
self.avg_cpu_usage
));
}
if !result.success_rate_acceptable {
result.issues.push(format!(
"Success rate too low: {:.1}% < 95.0%",
self.success_rate * 100.0
));
}
result
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceTargetResult {
pub vr_latency_met: bool,
pub gaming_latency_met: bool,
pub general_latency_met: bool,
pub cpu_usage_acceptable: bool,
pub success_rate_acceptable: bool,
pub issues: Vec<String>,
}
pub struct ResourceMonitor {
start_time: Instant,
samples: Arc<Mutex<Vec<ResourceSample>>>,
stop_flag: Arc<Mutex<bool>>,
}
#[derive(Debug, Clone)]
struct ResourceSample {
timestamp: Instant,
cpu_usage: f32,
memory_usage: usize,
}
impl ResourceMonitor {
pub fn start() -> Self {
let samples = Arc::new(Mutex::new(Vec::new()));
let stop_flag = Arc::new(Mutex::new(false));
let samples_clone = samples.clone();
let stop_clone = stop_flag.clone();
thread::spawn(move || {
while !*stop_clone
.lock()
.expect("Failed to acquire lock on stop flag in monitor thread")
{
let sample = ResourceSample {
timestamp: Instant::now(),
cpu_usage: Self::get_cpu_usage(),
memory_usage: Self::get_memory_usage(),
};
samples_clone
.lock()
.expect("Failed to acquire lock on samples in monitor thread")
.push(sample);
thread::sleep(Duration::from_millis(100)); }
});
Self {
start_time: Instant::now(),
samples,
stop_flag,
}
}
pub fn stop(self) -> ResourceStatistics {
*self
.stop_flag
.lock()
.expect("Failed to acquire lock on stop flag") = true;
thread::sleep(Duration::from_millis(200));
let samples = self
.samples
.lock()
.expect("Failed to acquire lock on samples")
.clone();
ResourceStatistics::from_samples(samples, self.start_time)
}
fn get_cpu_usage() -> f32 {
fastrand::f32() * 10.0 }
fn get_memory_usage() -> usize {
100_000_000 + (fastrand::usize(..50_000_000)) }
}
#[derive(Debug, Clone)]
pub struct ResourceStatistics {
pub avg_cpu_usage: f32,
pub peak_cpu_usage: f32,
pub avg_memory_usage: usize,
pub peak_memory_usage: usize,
pub duration: Duration,
pub sample_count: usize,
}
impl ResourceStatistics {
fn from_samples(samples: Vec<ResourceSample>, start_time: Instant) -> Self {
if samples.is_empty() {
return Self {
avg_cpu_usage: 0.0,
peak_cpu_usage: 0.0,
avg_memory_usage: 0,
peak_memory_usage: 0,
duration: Duration::ZERO,
sample_count: 0,
};
}
let avg_cpu = samples.iter().map(|s| s.cpu_usage).sum::<f32>() / samples.len() as f32;
let peak_cpu = samples.iter().map(|s| s.cpu_usage).fold(0.0, f32::max);
let avg_memory = samples.iter().map(|s| s.memory_usage).sum::<usize>() / samples.len();
let peak_memory = samples.iter().map(|s| s.memory_usage).max().unwrap_or(0);
let duration = samples
.last()
.expect("Samples should not be empty at this point")
.timestamp
- start_time;
Self {
avg_cpu_usage: avg_cpu,
peak_cpu_usage: peak_cpu,
avg_memory_usage: avg_memory,
peak_memory_usage: peak_memory,
duration,
sample_count: samples.len(),
}
}
}
pub struct PerformanceTestSuite {
config: PerformanceConfig,
results: Vec<PerformanceMetrics>,
}
impl PerformanceTestSuite {
pub fn new(config: PerformanceConfig) -> Self {
Self {
config,
results: Vec::new(),
}
}
pub fn run_all_tests(&mut self) -> Result<Vec<PerformanceMetrics>> {
tracing::info!("Starting comprehensive performance test suite");
self.test_binaural_rendering()?;
self.test_ambisonics_processing()?;
self.test_multi_source_processing()?;
self.test_real_time_latency()?;
self.test_memory_efficiency()?;
self.test_throughput_scaling()?;
tracing::info!(
"Performance test suite completed: {} tests",
self.results.len()
);
Ok(self.results.clone())
}
fn test_binaural_rendering(&mut self) -> Result<()> {
let mut metrics = PerformanceMetrics::new("Binaural Rendering".to_string());
let mut latencies = Vec::new();
let mut successes = 0;
let monitor = ResourceMonitor::start();
let audio_samples = Array1::from_vec(vec![0.1; self.config.buffer_size]);
let position = Position3D::new(1.0, 0.5, 0.0);
for i in 0..self.config.iterations {
let start = Instant::now();
let _ = self.simulate_binaural_processing(&audio_samples, &position);
let latency = start.elapsed();
latencies.push(latency);
if latency <= self.config.latency_thresholds.2 {
successes += 1;
}
if i % 100 == 0 {
tracing::debug!("Binaural test progress: {}/{}", i, self.config.iterations);
}
}
let resource_stats = monitor.stop();
latencies.sort();
metrics.avg_latency = Duration::from_nanos(
(latencies.iter().map(|d| d.as_nanos()).sum::<u128>() / latencies.len() as u128) as u64,
);
metrics.min_latency = latencies[0];
metrics.max_latency = latencies[latencies.len() - 1];
metrics.p95_latency = latencies[(latencies.len() as f32 * 0.95) as usize];
metrics.p99_latency = latencies[(latencies.len() as f32 * 0.99) as usize];
metrics.avg_cpu_usage = resource_stats.avg_cpu_usage;
metrics.peak_cpu_usage = resource_stats.peak_cpu_usage;
metrics.avg_memory_usage = resource_stats.avg_memory_usage;
metrics.peak_memory_usage = resource_stats.peak_memory_usage;
metrics.iterations = self.config.iterations;
metrics.success_rate = successes as f32 / self.config.iterations as f32;
metrics.throughput = (self.config.iterations * self.config.buffer_size) as f64
/ resource_stats.duration.as_secs_f64();
self.results.push(metrics);
Ok(())
}
fn test_ambisonics_processing(&mut self) -> Result<()> {
let mut metrics = PerformanceMetrics::new("Ambisonics Processing".to_string());
let mut latencies = Vec::new();
let mut successes = 0;
let monitor = ResourceMonitor::start();
let audio_samples = Array1::from_vec(vec![0.1; self.config.buffer_size]);
let position = Position3D::new(1.0, 0.5, 0.0);
for i in 0..self.config.iterations {
let start = Instant::now();
let _ = self.simulate_ambisonics_processing(&audio_samples, &position);
let latency = start.elapsed();
latencies.push(latency);
if latency <= self.config.latency_thresholds.2 {
successes += 1;
}
if i % 100 == 0 {
tracing::debug!("Ambisonics test progress: {}/{}", i, self.config.iterations);
}
}
let resource_stats = monitor.stop();
latencies.sort();
metrics.avg_latency = Duration::from_nanos(
(latencies.iter().map(|d| d.as_nanos()).sum::<u128>() / latencies.len() as u128) as u64,
);
metrics.min_latency = latencies[0];
metrics.max_latency = latencies[latencies.len() - 1];
metrics.p95_latency = latencies[(latencies.len() as f32 * 0.95) as usize];
metrics.p99_latency = latencies[(latencies.len() as f32 * 0.99) as usize];
metrics.avg_cpu_usage = resource_stats.avg_cpu_usage;
metrics.peak_cpu_usage = resource_stats.peak_cpu_usage;
metrics.avg_memory_usage = resource_stats.avg_memory_usage;
metrics.peak_memory_usage = resource_stats.peak_memory_usage;
metrics.iterations = self.config.iterations;
metrics.success_rate = successes as f32 / self.config.iterations as f32;
metrics.throughput = (self.config.iterations * self.config.buffer_size) as f64
/ resource_stats.duration.as_secs_f64();
self.results.push(metrics);
Ok(())
}
fn test_multi_source_processing(&mut self) -> Result<()> {
let mut metrics = PerformanceMetrics::new("Multi-Source Processing".to_string());
let mut latencies = Vec::new();
let mut successes = 0;
let monitor = ResourceMonitor::start();
let audio_data = Array2::from_shape_vec(
(self.config.source_count, self.config.buffer_size),
vec![0.1; self.config.source_count * self.config.buffer_size],
)
.map_err(|e| Error::LegacyProcessing(format!("Failed to create test audio data: {e}")))?;
let positions: Vec<Position3D> = (0..self.config.source_count)
.map(|i| {
let angle =
(i as f32 / self.config.source_count as f32) * 2.0 * std::f32::consts::PI;
Position3D::new(angle.cos(), angle.sin(), 0.0)
})
.collect();
for i in 0..self.config.iterations {
let start = Instant::now();
let _ = self.simulate_multi_source_processing(&audio_data, &positions);
let latency = start.elapsed();
latencies.push(latency);
if latency <= Duration::from_millis(100) {
successes += 1;
}
if i % 100 == 0 {
tracing::debug!(
"Multi-source test progress: {}/{}",
i,
self.config.iterations
);
}
}
let resource_stats = monitor.stop();
latencies.sort();
metrics.avg_latency = Duration::from_nanos(
(latencies.iter().map(|d| d.as_nanos()).sum::<u128>() / latencies.len() as u128) as u64,
);
metrics.min_latency = latencies[0];
metrics.max_latency = latencies[latencies.len() - 1];
metrics.p95_latency = latencies[(latencies.len() as f32 * 0.95) as usize];
metrics.p99_latency = latencies[(latencies.len() as f32 * 0.99) as usize];
metrics.avg_cpu_usage = resource_stats.avg_cpu_usage;
metrics.peak_cpu_usage = resource_stats.peak_cpu_usage;
metrics.avg_memory_usage = resource_stats.avg_memory_usage;
metrics.peak_memory_usage = resource_stats.peak_memory_usage;
metrics.iterations = self.config.iterations;
metrics.success_rate = successes as f32 / self.config.iterations as f32;
metrics.throughput =
(self.config.iterations * self.config.source_count * self.config.buffer_size) as f64
/ resource_stats.duration.as_secs_f64();
metrics.custom_metrics.insert(
"sources_per_second".to_string(),
(self.config.iterations * self.config.source_count) as f64
/ resource_stats.duration.as_secs_f64(),
);
self.results.push(metrics);
Ok(())
}
fn test_real_time_latency(&mut self) -> Result<()> {
let mut metrics = PerformanceMetrics::new("Real-Time Latency".to_string());
let mut latencies = Vec::new();
let mut vr_successes = 0;
let mut gaming_successes = 0;
let mut general_successes = 0;
let audio_samples = Array1::from_vec(vec![0.1; self.config.buffer_size]);
let position = Position3D::new(1.0, 0.0, 0.0);
for i in 0..self.config.iterations {
let start = Instant::now();
let _ = self.simulate_full_pipeline(&audio_samples, &position);
let latency = start.elapsed();
latencies.push(latency);
if latency <= self.config.latency_thresholds.0 {
vr_successes += 1;
}
if latency <= self.config.latency_thresholds.1 {
gaming_successes += 1;
}
if latency <= self.config.latency_thresholds.2 {
general_successes += 1;
}
if i % 100 == 0 {
tracing::debug!("Latency test progress: {}/{}", i, self.config.iterations);
}
}
latencies.sort();
metrics.avg_latency = Duration::from_nanos(
(latencies.iter().map(|d| d.as_nanos()).sum::<u128>() / latencies.len() as u128) as u64,
);
metrics.min_latency = latencies[0];
metrics.max_latency = latencies[latencies.len() - 1];
metrics.p95_latency = latencies[(latencies.len() as f32 * 0.95) as usize];
metrics.p99_latency = latencies[(latencies.len() as f32 * 0.99) as usize];
metrics.iterations = self.config.iterations;
metrics.success_rate = general_successes as f32 / self.config.iterations as f32;
metrics.custom_metrics.insert(
"vr_success_rate".to_string(),
vr_successes as f64 / self.config.iterations as f64,
);
metrics.custom_metrics.insert(
"gaming_success_rate".to_string(),
gaming_successes as f64 / self.config.iterations as f64,
);
metrics.custom_metrics.insert(
"general_success_rate".to_string(),
general_successes as f64 / self.config.iterations as f64,
);
self.results.push(metrics);
Ok(())
}
fn test_memory_efficiency(&mut self) -> Result<()> {
let mut metrics = PerformanceMetrics::new("Memory Efficiency".to_string());
let monitor = ResourceMonitor::start();
let mut audio_buffers = Vec::new();
let mut processors: Vec<i32> = Vec::new();
for i in 0..(self.config.source_count * 10) {
let buffer = Array1::from_vec(vec![0.1; self.config.buffer_size]);
audio_buffers.push(buffer);
if i % 10 == 0 {
thread::sleep(Duration::from_millis(10));
}
}
thread::sleep(Duration::from_millis(1000));
drop(audio_buffers);
drop(processors);
let resource_stats = monitor.stop();
metrics.avg_memory_usage = resource_stats.avg_memory_usage;
metrics.peak_memory_usage = resource_stats.peak_memory_usage;
metrics.avg_cpu_usage = resource_stats.avg_cpu_usage;
metrics.peak_cpu_usage = resource_stats.peak_cpu_usage;
metrics.iterations = 1;
metrics.success_rate = 1.0;
metrics.custom_metrics.insert(
"memory_per_source_mb".to_string(),
(resource_stats.peak_memory_usage as f64 / self.config.source_count as f64)
/ 1_000_000.0,
);
self.results.push(metrics);
Ok(())
}
fn test_throughput_scaling(&mut self) -> Result<()> {
let mut metrics = PerformanceMetrics::new("Throughput Scaling".to_string());
let mut throughputs = Vec::new();
let audio_samples = Array1::from_vec(vec![0.1; self.config.buffer_size]);
for source_count in [1, 2, 4, 8, 16, 32] {
if source_count > self.config.source_count * 4 {
break;
}
let positions: Vec<Position3D> = (0..source_count)
.map(|i| {
let angle = (i as f32 / source_count as f32) * 2.0 * std::f32::consts::PI;
Position3D::new(angle.cos(), angle.sin(), 0.0)
})
.collect();
let start = Instant::now();
for _ in 0..100 {
for pos in &positions {
let _ = self.simulate_binaural_processing(&audio_samples, pos);
}
}
let duration = start.elapsed();
let throughput =
(100 * source_count * self.config.buffer_size) as f64 / duration.as_secs_f64();
throughputs.push(throughput);
metrics
.custom_metrics
.insert(format!("throughput_{source_count}_sources"), throughput);
}
metrics.throughput = throughputs.iter().copied().fold(0.0, f64::max);
metrics.iterations = 100;
metrics.success_rate = 1.0;
self.results.push(metrics);
Ok(())
}
fn simulate_binaural_processing(
&self,
_audio: &Array1<f32>,
_position: &Position3D,
) -> Array2<f32> {
thread::sleep(Duration::from_micros(50));
Array2::zeros((2, self.config.buffer_size))
}
fn simulate_ambisonics_processing(
&self,
_audio: &Array1<f32>,
_position: &Position3D,
) -> Array2<f32> {
thread::sleep(Duration::from_micros(75));
Array2::zeros((4, self.config.buffer_size))
}
fn simulate_multi_source_processing(
&self,
_audio: &Array2<f32>,
_positions: &[Position3D],
) -> Array2<f32> {
thread::sleep(Duration::from_micros(25 * self.config.source_count as u64));
Array2::zeros((2, self.config.buffer_size))
}
fn simulate_full_pipeline(&self, _audio: &Array1<f32>, _position: &Position3D) -> Array2<f32> {
thread::sleep(Duration::from_micros(100));
Array2::zeros((2, self.config.buffer_size))
}
pub fn generate_report(&self) -> PerformanceReport {
PerformanceReport::new(&self.results, &self.config)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceReport {
pub config: PerformanceConfig,
pub results: Vec<PerformanceMetrics>,
pub summary: PerformanceSummary,
pub target_results: Vec<PerformanceTargetResult>,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceSummary {
pub total_tests: usize,
pub vr_compatible_tests: usize,
pub gaming_compatible_tests: usize,
pub overall_rating: f32,
pub bottleneck: String,
}
impl PerformanceReport {
fn new(results: &[PerformanceMetrics], config: &PerformanceConfig) -> Self {
let target_results: Vec<PerformanceTargetResult> =
results.iter().map(|r| r.meets_targets(config)).collect();
let vr_compatible = target_results.iter().filter(|r| r.vr_latency_met).count();
let gaming_compatible = target_results
.iter()
.filter(|r| r.gaming_latency_met)
.count();
let mut recommendations = Vec::new();
if vr_compatible < results.len() {
recommendations
.push("Consider optimizing for lower latency to meet VR requirements".to_string());
}
if target_results.iter().any(|r| !r.cpu_usage_acceptable) {
recommendations.push(
"CPU usage is high - consider GPU acceleration or algorithmic optimization"
.to_string(),
);
}
let avg_latency_ms: f32 = results
.iter()
.map(|r| r.avg_latency.as_millis() as f32)
.sum::<f32>()
/ results.len() as f32;
let overall_rating = if avg_latency_ms < 20.0 {
10.0
} else if avg_latency_ms < 30.0 {
8.0
} else if avg_latency_ms < 50.0 {
6.0
} else {
4.0
};
let bottleneck = if results.iter().any(|r| r.avg_cpu_usage > 50.0) {
"CPU Processing".to_string()
} else if results.iter().any(|r| r.peak_memory_usage > 500_000_000) {
"Memory Usage".to_string()
} else {
"Algorithm Efficiency".to_string()
};
Self {
config: config.clone(),
results: results.to_vec(),
summary: PerformanceSummary {
total_tests: results.len(),
vr_compatible_tests: vr_compatible,
gaming_compatible_tests: gaming_compatible,
overall_rating,
bottleneck,
},
target_results,
recommendations,
}
}
pub fn print_summary(&self) {
println!("\n=== Spatial Audio Performance Report ===");
println!("Total tests: {}", self.summary.total_tests);
println!(
"VR-compatible: {}/{}",
self.summary.vr_compatible_tests, self.summary.total_tests
);
println!(
"Gaming-compatible: {}/{}",
self.summary.gaming_compatible_tests, self.summary.total_tests
);
println!("Overall rating: {:.1}/10", self.summary.overall_rating);
println!("Primary bottleneck: {}", self.summary.bottleneck);
println!("\n--- Test Results ---");
for result in &self.results {
println!(
"{}: avg={:.1}ms, p95={:.1}ms, cpu={:.1}%",
result.test_name,
result.avg_latency.as_millis(),
result.p95_latency.as_millis(),
result.avg_cpu_usage
);
}
if !self.recommendations.is_empty() {
println!("\n--- Recommendations ---");
for rec in &self.recommendations {
println!("• {rec}");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_performance_config_default() {
let config = PerformanceConfig::default();
assert_eq!(config.iterations, 1000);
assert_eq!(config.source_count, 8);
assert_eq!(config.sample_rate, 44100);
assert_eq!(config.buffer_size, 512);
}
#[test]
fn test_performance_metrics_creation() {
let metrics = PerformanceMetrics::new("Test".to_string());
assert_eq!(metrics.test_name, "Test");
assert_eq!(metrics.iterations, 0);
assert_eq!(metrics.success_rate, 0.0);
}
#[test]
fn test_resource_monitor() {
let monitor = ResourceMonitor::start();
thread::sleep(Duration::from_millis(200));
let stats = monitor.stop();
assert!(stats.sample_count > 0);
assert!(stats.duration > Duration::from_millis(100));
}
#[test]
fn test_performance_targets() {
let config = PerformanceConfig::default();
let mut metrics = PerformanceMetrics::new("Test".to_string());
metrics.p95_latency = Duration::from_millis(15);
metrics.avg_cpu_usage = 20.0;
metrics.success_rate = 0.98;
let result = metrics.meets_targets(&config);
assert!(result.vr_latency_met);
assert!(result.cpu_usage_acceptable);
assert!(result.success_rate_acceptable);
}
#[test]
fn test_performance_test_suite_creation() {
let config = PerformanceConfig {
iterations: 10,
..Default::default()
};
let suite = PerformanceTestSuite::new(config);
assert_eq!(suite.config.iterations, 10);
assert_eq!(suite.results.len(), 0);
}
#[test]
fn test_simulation_functions() {
let config = PerformanceConfig::default();
let suite = PerformanceTestSuite::new(config);
let audio = Array1::zeros(512);
let position = Position3D::new(1.0, 0.0, 0.0);
let result = suite.simulate_binaural_processing(&audio, &position);
assert_eq!(result.shape(), [2, 512]);
}
#[test]
fn test_performance_report_generation() {
let config = PerformanceConfig::default();
let mut metrics = PerformanceMetrics::new("Test".to_string());
metrics.avg_latency = Duration::from_millis(25);
metrics.p95_latency = Duration::from_millis(35);
metrics.avg_cpu_usage = 15.0;
metrics.success_rate = 0.96;
let report = PerformanceReport::new(&[metrics], &config);
assert_eq!(report.summary.total_tests, 1);
assert_eq!(report.summary.vr_compatible_tests, 0); assert_eq!(report.summary.gaming_compatible_tests, 0); }
#[test]
fn test_throughput_calculation() {
let mut metrics = PerformanceMetrics::new("Throughput Test".to_string());
metrics.iterations = 1000;
metrics.throughput = (1000.0 * 512.0) / 1.0;
assert_eq!(metrics.throughput, 512_000.0);
}
}