Skip to main content

voirs_spatial/
validation.rs

1//! Perceptual Validation Testing Suite
2//!
3//! This module provides comprehensive testing for spatial audio perception,
4//! including localization accuracy, distance perception, immersion quality,
5//! and HRTF validation across different populations and use cases.
6
7use crate::hrtf::HrtfProcessor;
8use crate::position::{Listener, SoundSource};
9use crate::types::{AudioChannel, Position3D};
10use crate::{Error, Result};
11use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13use std::time::{Duration, Instant};
14
15/// Perceptual validation test suite
16pub struct PerceptualTestSuite {
17    /// Test configurations
18    configs: Vec<ValidationTestConfig>,
19    /// Results storage
20    results: Vec<ValidationTestResult>,
21    /// Test subjects data
22    subjects: Vec<TestSubject>,
23    /// HRTF processor for testing
24    hrtf_processor: HrtfProcessor,
25}
26
27/// Validation test configuration
28#[derive(Debug, Clone, Serialize, Deserialize)]
29pub struct ValidationTestConfig {
30    /// Test name
31    pub name: String,
32    /// Test type
33    pub test_type: ValidationTestType,
34    /// Test parameters
35    pub parameters: TestParameters,
36    /// Success criteria
37    pub success_criteria: SuccessCriteria,
38    /// Number of trials
39    pub trial_count: u32,
40    /// Test duration
41    pub duration: Duration,
42}
43
44/// Types of validation tests
45#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
46pub enum ValidationTestType {
47    /// Sound localization accuracy test
48    LocalizationAccuracy,
49    /// Distance perception test
50    DistancePerception,
51    /// Elevation perception test
52    ElevationPerception,
53    /// Front/back discrimination test
54    FrontBackDiscrimination,
55    /// Immersion quality assessment
56    ImmersionQuality,
57    /// HRTF validation across populations
58    HrtfValidation,
59    /// Motion tracking accuracy
60    MotionTracking,
61    /// Doppler effect accuracy
62    DopplerAccuracy,
63    /// Room acoustics validation
64    RoomAcoustics,
65    /// Binaural rendering quality
66    BinauralQuality,
67}
68
69/// Test parameters for different validation tests
70#[derive(Debug, Clone, Serialize, Deserialize)]
71pub struct TestParameters {
72    /// Source positions to test
73    pub source_positions: Vec<Position3D>,
74    /// Listener positions
75    pub listener_positions: Vec<Position3D>,
76    /// Audio frequencies to test
77    pub test_frequencies: Vec<f32>,
78    /// Sound levels (dB)
79    pub sound_levels: Vec<f32>,
80    /// Environment parameters
81    pub environment: EnvironmentParameters,
82    /// Test-specific parameters
83    pub specific_params: HashMap<String, f32>,
84}
85
86/// Environment parameters for testing
87#[derive(Debug, Clone, Serialize, Deserialize)]
88pub struct EnvironmentParameters {
89    /// Room size (length, width, height)
90    pub room_size: (f32, f32, f32),
91    /// Reverberation time (RT60)
92    pub reverberation_time: f32,
93    /// Background noise level
94    pub noise_level: f32,
95    /// Temperature (affects air absorption)
96    pub temperature: f32,
97    /// Humidity (affects air absorption)
98    pub humidity: f32,
99}
100
101/// Success criteria for tests
102#[derive(Debug, Clone, Serialize, Deserialize)]
103pub struct SuccessCriteria {
104    /// Minimum accuracy percentage
105    pub min_accuracy: f32,
106    /// Maximum error tolerance
107    pub max_error: f32,
108    /// Minimum Mean Opinion Score (MOS)
109    pub min_mos: f32,
110    /// Maximum acceptable latency (ms)
111    pub max_latency_ms: f32,
112}
113
114/// Test subject information
115#[derive(Debug, Clone, Serialize, Deserialize)]
116pub struct TestSubject {
117    /// Unique subject ID
118    pub id: String,
119    /// Age
120    pub age: u32,
121    /// Gender
122    pub gender: Gender,
123    /// Hearing ability
124    pub hearing_ability: HearingAbility,
125    /// Head measurements
126    pub head_measurements: HeadMeasurements,
127    /// Previous VR/AR experience level
128    pub experience_level: ExperienceLevel,
129    /// Audio expertise level
130    pub audio_expertise: AudioExpertise,
131}
132
133/// Gender enumeration
134#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
135pub enum Gender {
136    /// Male gender
137    Male,
138    /// Female gender
139    Female,
140    /// Other gender identity
141    Other,
142    /// Prefer not to specify gender
143    PreferNotToSay,
144}
145
146/// Hearing ability levels
147#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
148pub enum HearingAbility {
149    /// Normal hearing ability
150    Normal,
151    /// Mild hearing loss
152    MildLoss,
153    /// Moderate hearing loss
154    ModerateLoss,
155    /// Severe hearing loss
156    SevereLoss,
157    /// Complete hearing loss
158    Deaf,
159}
160
161/// Head measurements for HRTF validation
162#[derive(Debug, Clone, Serialize, Deserialize)]
163pub struct HeadMeasurements {
164    /// Head width (cm)
165    pub head_width: f32,
166    /// Head depth (cm)
167    pub head_depth: f32,
168    /// Inter-aural distance (cm)
169    pub interaural_distance: f32,
170    /// Shoulder width (cm)
171    pub shoulder_width: f32,
172    /// Pinna measurements
173    pub pinna: PinnaMeasurements,
174}
175
176/// Pinna (ear) measurements
177#[derive(Debug, Clone, Serialize, Deserialize)]
178pub struct PinnaMeasurements {
179    /// Pinna height (cm)
180    pub height: f32,
181    /// Pinna width (cm)
182    pub width: f32,
183    /// Concha depth (cm)
184    pub concha_depth: f32,
185}
186
187/// Experience levels with VR/AR
188#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
189pub enum ExperienceLevel {
190    /// No prior experience with VR/AR
191    Novice,
192    /// Limited experience with VR/AR
193    Beginner,
194    /// Some experience with VR/AR
195    Intermediate,
196    /// Substantial experience with VR/AR
197    Advanced,
198    /// Extensive professional experience with VR/AR
199    Expert,
200}
201
202/// Audio expertise levels
203#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
204pub enum AudioExpertise {
205    /// General consumer with basic audio knowledge
206    General,
207    /// Enthusiast with high-end audio equipment and trained ear
208    Audiophile,
209    /// Professional music producer with studio experience
210    MusicProducer,
211    /// Professional audio engineer with technical expertise
212    AudioEngineer,
213    /// Academic or industry researcher in audio technology
214    Researcher,
215}
216
217/// Validation test result  
218#[derive(Debug, Clone)]
219pub struct ValidationTestResult {
220    /// Test configuration used
221    pub test_config: ValidationTestConfig,
222    /// Subject who performed the test
223    pub subject: TestSubject,
224    /// Test outcomes
225    pub outcomes: Vec<TestOutcome>,
226    /// Overall statistics
227    pub statistics: TestStatistics,
228    /// Subjective ratings
229    pub subjective_ratings: SubjectiveRatings,
230    /// Test timestamp
231    pub timestamp: Instant,
232}
233
234/// Individual test outcome
235#[derive(Debug, Clone, Serialize, Deserialize)]
236pub struct TestOutcome {
237    /// Trial number
238    pub trial_number: u32,
239    /// Presented stimulus
240    pub stimulus: StimulusData,
241    /// Subject response
242    pub response: ResponseData,
243    /// Accuracy metrics
244    pub accuracy: AccuracyMetrics,
245    /// Response time
246    pub response_time: Duration,
247}
248
249/// Stimulus data for a test trial
250#[derive(Debug, Clone, Serialize, Deserialize)]
251pub struct StimulusData {
252    /// Sound source position
253    pub source_position: Position3D,
254    /// Listener position
255    pub listener_position: Position3D,
256    /// Audio frequency (Hz)
257    pub frequency: f32,
258    /// Sound level (dB)
259    pub level: f32,
260    /// Duration (seconds)
261    pub duration: f32,
262    /// Additional stimulus properties
263    pub properties: HashMap<String, f32>,
264}
265
266/// Subject response data
267#[derive(Debug, Clone, Serialize, Deserialize)]
268pub struct ResponseData {
269    /// Perceived source position
270    pub perceived_position: Position3D,
271    /// Confidence rating (1-7 scale)
272    pub confidence: u32,
273    /// Additional response data
274    pub additional_data: HashMap<String, f32>,
275}
276
277/// Accuracy metrics for a trial
278#[derive(Debug, Clone, Serialize, Deserialize)]
279pub struct AccuracyMetrics {
280    /// Angular error (degrees)
281    pub angular_error: f32,
282    /// Distance error (meters)
283    pub distance_error: f32,
284    /// Elevation error (degrees)
285    pub elevation_error: f32,
286    /// Front/back confusion (binary)
287    pub front_back_confusion: bool,
288    /// Overall accuracy score (0-1)
289    pub overall_accuracy: f32,
290}
291
292/// Test statistics summary
293#[derive(Debug, Clone, Serialize, Deserialize)]
294pub struct TestStatistics {
295    /// Total trials completed
296    pub total_trials: u32,
297    /// Mean accuracy
298    pub mean_accuracy: f32,
299    /// Standard deviation of accuracy
300    pub accuracy_std_dev: f32,
301    /// Mean angular error (degrees)
302    pub mean_angular_error: f32,
303    /// Mean distance error (meters)
304    pub mean_distance_error: f32,
305    /// Front/back confusion rate
306    pub front_back_confusion_rate: f32,
307    /// Response time statistics
308    pub response_time_stats: ResponseTimeStats,
309}
310
311/// Response time statistics
312#[derive(Debug, Clone, Serialize, Deserialize)]
313pub struct ResponseTimeStats {
314    /// Mean response time
315    pub mean: Duration,
316    /// Median response time
317    pub median: Duration,
318    /// Standard deviation
319    pub std_dev: Duration,
320    /// 95th percentile
321    pub p95: Duration,
322}
323
324/// Subjective quality ratings
325#[derive(Debug, Clone, Serialize, Deserialize)]
326pub struct SubjectiveRatings {
327    /// Overall quality (1-5 MOS scale)
328    pub overall_quality: f32,
329    /// Localization naturalness (1-5)
330    pub localization_naturalness: f32,
331    /// Immersion level (1-5)
332    pub immersion_level: f32,
333    /// Comfort level (1-5)
334    pub comfort_level: f32,
335    /// Presence feeling (1-5)
336    pub presence: f32,
337    /// Any reported artifacts or issues
338    pub artifacts: Vec<String>,
339}
340
341/// Perceptual validation report
342#[derive(Debug, Clone)]
343pub struct ValidationReport {
344    /// Test suite summary
345    pub summary: ValidationSummary,
346    /// Results by test type
347    pub results_by_type: HashMap<ValidationTestType, Vec<ValidationTestResult>>,
348    /// Population analysis
349    pub population_analysis: PopulationAnalysis,
350    /// Recommendations
351    pub recommendations: Vec<String>,
352    /// Report timestamp
353    pub generated_at: Instant,
354}
355
356/// Summary of validation test suite
357#[derive(Debug, Clone, Serialize, Deserialize)]
358pub struct ValidationSummary {
359    /// Total subjects tested
360    pub total_subjects: u32,
361    /// Total trials completed
362    pub total_trials: u32,
363    /// Overall pass rate
364    pub overall_pass_rate: f32,
365    /// Mean accuracy across all tests
366    pub mean_accuracy: f32,
367    /// Mean MOS score
368    pub mean_mos: f32,
369    /// Tests that passed success criteria
370    pub passing_tests: Vec<String>,
371    /// Tests that failed success criteria
372    pub failing_tests: Vec<String>,
373}
374
375/// Population-based analysis
376#[derive(Debug, Clone, Serialize, Deserialize)]
377pub struct PopulationAnalysis {
378    /// Results by age group
379    pub by_age_group: HashMap<String, PopulationStats>,
380    /// Results by gender
381    pub by_gender: HashMap<Gender, PopulationStats>,
382    /// Results by hearing ability
383    pub by_hearing_ability: HashMap<HearingAbility, PopulationStats>,
384    /// Results by experience level
385    pub by_experience_level: HashMap<ExperienceLevel, PopulationStats>,
386}
387
388/// Statistics for a population group
389#[derive(Debug, Clone, Serialize, Deserialize)]
390pub struct PopulationStats {
391    /// Number of subjects
392    pub subject_count: u32,
393    /// Mean accuracy
394    pub mean_accuracy: f32,
395    /// Standard deviation
396    pub accuracy_std_dev: f32,
397    /// Mean MOS score
398    pub mean_mos: f32,
399    /// Pass rate
400    pub pass_rate: f32,
401}
402
403impl PerceptualTestSuite {
404    /// Create new perceptual test suite
405    pub fn new(hrtf_processor: HrtfProcessor) -> Self {
406        Self {
407            configs: Vec::new(),
408            results: Vec::new(),
409            subjects: Vec::new(),
410            hrtf_processor,
411        }
412    }
413
414    /// Add test configuration
415    pub fn add_test_config(&mut self, config: ValidationTestConfig) {
416        self.configs.push(config);
417    }
418
419    /// Add test subject
420    pub fn add_subject(&mut self, subject: TestSubject) {
421        self.subjects.push(subject);
422    }
423
424    /// Run all validation tests
425    pub async fn run_all_tests(&mut self) -> Result<ValidationReport> {
426        tracing::info!("Starting perceptual validation test suite");
427
428        for config in self.configs.clone() {
429            for subject in self.subjects.clone() {
430                let result = self.run_test(&config, &subject).await?;
431                self.results.push(result);
432            }
433        }
434
435        let report = self.generate_report().await?;
436        tracing::info!("Completed perceptual validation test suite");
437        Ok(report)
438    }
439
440    /// Run a specific test for a subject
441    pub async fn run_test(
442        &self,
443        config: &ValidationTestConfig,
444        subject: &TestSubject,
445    ) -> Result<ValidationTestResult> {
446        tracing::info!(
447            "Running test '{}' for subject '{}'",
448            config.name,
449            subject.id
450        );
451
452        let mut outcomes = Vec::new();
453
454        for trial_num in 0..config.trial_count {
455            let outcome = self.run_trial(config, subject, trial_num).await?;
456            outcomes.push(outcome);
457        }
458
459        let statistics = self.calculate_statistics(&outcomes);
460        let subjective_ratings = self.collect_subjective_ratings(config, subject).await?;
461
462        Ok(ValidationTestResult {
463            test_config: config.clone(),
464            subject: subject.clone(),
465            outcomes,
466            statistics,
467            subjective_ratings,
468            timestamp: Instant::now(),
469        })
470    }
471
472    /// Run a single trial
473    async fn run_trial(
474        &self,
475        config: &ValidationTestConfig,
476        subject: &TestSubject,
477        trial_num: u32,
478    ) -> Result<TestOutcome> {
479        // Select test parameters for this trial
480        let stimulus = self.generate_stimulus(config, trial_num)?;
481
482        let start_time = Instant::now();
483
484        // Simulate spatial audio processing
485        let processed_audio = self.process_spatial_audio(&stimulus, subject).await?;
486
487        // Simulate subject response (in real implementation, this would be user input)
488        let response = self.simulate_subject_response(&stimulus, &processed_audio, subject)?;
489
490        let response_time = start_time.elapsed();
491
492        // Calculate accuracy metrics
493        let accuracy = self.calculate_accuracy(&stimulus, &response)?;
494
495        Ok(TestOutcome {
496            trial_number: trial_num,
497            stimulus,
498            response,
499            accuracy,
500            response_time,
501        })
502    }
503
504    /// Generate stimulus for a trial
505    fn generate_stimulus(
506        &self,
507        config: &ValidationTestConfig,
508        trial_num: u32,
509    ) -> Result<StimulusData> {
510        let params = &config.parameters;
511
512        // Select position based on trial number and test type
513        let source_position = if params.source_positions.is_empty() {
514            self.generate_random_position(config.test_type)?
515        } else {
516            params.source_positions[trial_num as usize % params.source_positions.len()]
517        };
518
519        let listener_position = if params.listener_positions.is_empty() {
520            Position3D::new(0.0, 1.7, 0.0) // Default head height
521        } else {
522            params.listener_positions[trial_num as usize % params.listener_positions.len()]
523        };
524
525        let frequency = if params.test_frequencies.is_empty() {
526            1000.0 // Default 1kHz
527        } else {
528            params.test_frequencies[trial_num as usize % params.test_frequencies.len()]
529        };
530
531        let level = if params.sound_levels.is_empty() {
532            70.0 // Default 70dB
533        } else {
534            params.sound_levels[trial_num as usize % params.sound_levels.len()]
535        };
536
537        Ok(StimulusData {
538            source_position,
539            listener_position,
540            frequency,
541            level,
542            duration: 2.0, // 2 second duration
543            properties: HashMap::new(),
544        })
545    }
546
547    /// Generate random position based on test type
548    fn generate_random_position(&self, test_type: ValidationTestType) -> Result<Position3D> {
549        use fastrand;
550
551        match test_type {
552            ValidationTestType::LocalizationAccuracy => {
553                // Random positions on sphere around listener
554                let azimuth = fastrand::f32() * 2.0 * std::f32::consts::PI;
555                let elevation = (fastrand::f32() - 0.5) * std::f32::consts::PI;
556                let distance = 2.0 + fastrand::f32() * 3.0; // 2-5 meters
557
558                Ok(Position3D::new(
559                    distance * elevation.cos() * azimuth.cos(),
560                    distance * elevation.sin(),
561                    distance * elevation.cos() * azimuth.sin(),
562                ))
563            }
564            ValidationTestType::DistancePerception => {
565                // Fixed angle, varying distance
566                let distance = 0.5 + fastrand::f32() * 19.5; // 0.5-20 meters
567                Ok(Position3D::new(distance, 1.7, 0.0))
568            }
569            ValidationTestType::ElevationPerception => {
570                // Fixed horizontal plane, varying elevation
571                let elevation = (fastrand::f32() - 0.5) * std::f32::consts::PI * 0.8; // ±72 degrees
572                let distance = 3.0;
573                Ok(Position3D::new(
574                    0.0,
575                    distance * elevation.sin(),
576                    distance * elevation.cos(),
577                ))
578            }
579            ValidationTestType::FrontBackDiscrimination => {
580                // Front or back positions
581                let is_front = fastrand::bool();
582                let angle_offset = (fastrand::f32() - 0.5) * 0.7; // ±20 degrees
583                let base_angle = if is_front { 0.0 } else { std::f32::consts::PI };
584                let angle = base_angle + angle_offset;
585                let distance = 2.0;
586
587                Ok(Position3D::new(
588                    distance * angle.sin(),
589                    1.7,
590                    distance * angle.cos(),
591                ))
592            }
593            _ => {
594                // Default random position
595                Ok(Position3D::new(
596                    (fastrand::f32() - 0.5) * 10.0,
597                    fastrand::f32() * 3.0,
598                    (fastrand::f32() - 0.5) * 10.0,
599                ))
600            }
601        }
602    }
603
604    /// Process spatial audio for stimulus
605    async fn process_spatial_audio(
606        &self,
607        stimulus: &StimulusData,
608        subject: &TestSubject,
609    ) -> Result<Vec<f32>> {
610        // Create listener and source
611        let mut listener = Listener::new();
612        listener.set_position(stimulus.listener_position);
613        let source = SoundSource::new_point("test_source".to_string(), stimulus.source_position);
614
615        // Generate test signal
616        let sample_rate = 44100;
617        let duration_samples = (stimulus.duration * sample_rate as f32) as usize;
618        let mut audio_signal = vec![0.0f32; duration_samples];
619
620        // Generate pure tone
621        for (i, sample) in audio_signal.iter_mut().enumerate().take(duration_samples) {
622            let t = i as f32 / sample_rate as f32;
623            *sample = (2.0 * std::f32::consts::PI * stimulus.frequency * t).sin() * 0.1;
624            // Reduce amplitude
625        }
626
627        // Apply HRTF processing (simplified)
628        // In a real implementation, this would use the full HRTF processor
629        // For now, just return the original signal
630        let processed_signal = audio_signal.clone();
631
632        Ok(processed_signal)
633    }
634
635    /// Simulate subject response (in real test, this would be user input)
636    fn simulate_subject_response(
637        &self,
638        stimulus: &StimulusData,
639        _processed_audio: &[f32],
640        subject: &TestSubject,
641    ) -> Result<ResponseData> {
642        use fastrand;
643
644        // Simulate human localization accuracy based on subject characteristics
645        let base_accuracy = match subject.audio_expertise {
646            AudioExpertise::General => 0.7,
647            AudioExpertise::Audiophile => 0.8,
648            AudioExpertise::MusicProducer => 0.85,
649            AudioExpertise::AudioEngineer => 0.9,
650            AudioExpertise::Researcher => 0.95,
651        };
652
653        // Add noise based on hearing ability
654        let hearing_factor = match subject.hearing_ability {
655            HearingAbility::Normal => 1.0,
656            HearingAbility::MildLoss => 0.9,
657            HearingAbility::ModerateLoss => 0.7,
658            HearingAbility::SevereLoss => 0.5,
659            HearingAbility::Deaf => 0.1,
660        };
661
662        let accuracy = base_accuracy * hearing_factor;
663
664        // Add random error
665        let error_scale = (1.0 - accuracy) * 2.0; // Higher error for lower accuracy
666        let position_error = Position3D::new(
667            (fastrand::f32() - 0.5) * error_scale,
668            (fastrand::f32() - 0.5) * error_scale,
669            (fastrand::f32() - 0.5) * error_scale,
670        );
671
672        let perceived_position = Position3D::new(
673            stimulus.source_position.x + position_error.x,
674            stimulus.source_position.y + position_error.y,
675            stimulus.source_position.z + position_error.z,
676        );
677
678        // Confidence correlates with accuracy
679        let confidence = ((accuracy * 5.0) as u32).clamp(1, 7);
680
681        Ok(ResponseData {
682            perceived_position,
683            confidence,
684            additional_data: HashMap::new(),
685        })
686    }
687
688    /// Calculate accuracy metrics
689    fn calculate_accuracy(
690        &self,
691        stimulus: &StimulusData,
692        response: &ResponseData,
693    ) -> Result<AccuracyMetrics> {
694        let true_pos = &stimulus.source_position;
695        let perceived_pos = &response.perceived_position;
696
697        // Calculate angular error
698        let true_vec = Position3D::new(true_pos.x, 0.0, true_pos.z).normalized();
699        let perceived_vec = Position3D::new(perceived_pos.x, 0.0, perceived_pos.z).normalized();
700        let angular_error = true_vec.dot(&perceived_vec).acos() * 180.0 / std::f32::consts::PI;
701
702        // Calculate distance error
703        let true_distance = true_pos.magnitude();
704        let perceived_distance = perceived_pos.magnitude();
705        let distance_error = (true_distance - perceived_distance).abs();
706
707        // Calculate elevation error
708        let true_elevation =
709            (true_pos.y / true_pos.magnitude()).asin() * 180.0 / std::f32::consts::PI;
710        let perceived_elevation =
711            (perceived_pos.y / perceived_pos.magnitude()).asin() * 180.0 / std::f32::consts::PI;
712        let elevation_error = (true_elevation - perceived_elevation).abs();
713
714        // Front/back confusion check
715        let front_back_confusion = (true_pos.z > 0.0) != (perceived_pos.z > 0.0);
716
717        // Overall accuracy score (inverse of normalized error)
718        let overall_accuracy =
719            1.0 / (1.0 + angular_error / 180.0 + distance_error / 10.0 + elevation_error / 90.0);
720
721        Ok(AccuracyMetrics {
722            angular_error,
723            distance_error,
724            elevation_error,
725            front_back_confusion,
726            overall_accuracy,
727        })
728    }
729
730    /// Calculate statistics for test outcomes
731    fn calculate_statistics(&self, outcomes: &[TestOutcome]) -> TestStatistics {
732        if outcomes.is_empty() {
733            return TestStatistics {
734                total_trials: 0,
735                mean_accuracy: 0.0,
736                accuracy_std_dev: 0.0,
737                mean_angular_error: 0.0,
738                mean_distance_error: 0.0,
739                front_back_confusion_rate: 0.0,
740                response_time_stats: ResponseTimeStats {
741                    mean: Duration::from_secs(0),
742                    median: Duration::from_secs(0),
743                    std_dev: Duration::from_secs(0),
744                    p95: Duration::from_secs(0),
745                },
746            };
747        }
748
749        let total_trials = outcomes.len() as u32;
750
751        // Accuracy statistics
752        let accuracies: Vec<f32> = outcomes
753            .iter()
754            .map(|o| o.accuracy.overall_accuracy)
755            .collect();
756        let mean_accuracy = accuracies.iter().sum::<f32>() / accuracies.len() as f32;
757        let accuracy_variance = accuracies
758            .iter()
759            .map(|&x| (x - mean_accuracy).powi(2))
760            .sum::<f32>()
761            / accuracies.len() as f32;
762        let accuracy_std_dev = accuracy_variance.sqrt();
763
764        // Error statistics
765        let angular_errors: Vec<f32> = outcomes.iter().map(|o| o.accuracy.angular_error).collect();
766        let mean_angular_error = angular_errors.iter().sum::<f32>() / angular_errors.len() as f32;
767
768        let distance_errors: Vec<f32> =
769            outcomes.iter().map(|o| o.accuracy.distance_error).collect();
770        let mean_distance_error =
771            distance_errors.iter().sum::<f32>() / distance_errors.len() as f32;
772
773        // Front/back confusion rate
774        let confusion_count = outcomes
775            .iter()
776            .filter(|o| o.accuracy.front_back_confusion)
777            .count();
778        let front_back_confusion_rate = confusion_count as f32 / total_trials as f32;
779
780        // Response time statistics
781        let mut response_times: Vec<Duration> = outcomes.iter().map(|o| o.response_time).collect();
782        response_times.sort();
783
784        let mean_response_time = Duration::from_nanos(
785            (response_times.iter().map(|d| d.as_nanos()).sum::<u128>()
786                / response_times.len() as u128) as u64,
787        );
788        let median_response_time = response_times[response_times.len() / 2];
789        let p95_index = (response_times.len() as f32 * 0.95) as usize;
790        let p95_response_time = response_times[p95_index.min(response_times.len() - 1)];
791
792        // Response time standard deviation
793        let mean_nanos = mean_response_time.as_nanos() as f64;
794        let variance = response_times
795            .iter()
796            .map(|d| (d.as_nanos() as f64 - mean_nanos).powi(2))
797            .sum::<f64>()
798            / response_times.len() as f64;
799        let std_dev_response_time = Duration::from_nanos(variance.sqrt() as u64);
800
801        TestStatistics {
802            total_trials,
803            mean_accuracy,
804            accuracy_std_dev,
805            mean_angular_error,
806            mean_distance_error,
807            front_back_confusion_rate,
808            response_time_stats: ResponseTimeStats {
809                mean: mean_response_time,
810                median: median_response_time,
811                std_dev: std_dev_response_time,
812                p95: p95_response_time,
813            },
814        }
815    }
816
817    /// Collect subjective ratings (simulated for testing)
818    async fn collect_subjective_ratings(
819        &self,
820        _config: &ValidationTestConfig,
821        subject: &TestSubject,
822    ) -> Result<SubjectiveRatings> {
823        use fastrand;
824
825        // Simulate ratings based on subject characteristics
826        let base_quality = match subject.audio_expertise {
827            AudioExpertise::General => 3.0,
828            AudioExpertise::Audiophile => 3.5,
829            AudioExpertise::MusicProducer => 4.0,
830            AudioExpertise::AudioEngineer => 4.2,
831            AudioExpertise::Researcher => 4.5,
832        };
833
834        let noise = (fastrand::f32() - 0.5) * 0.5; // ±0.25
835        let quality = (base_quality + noise).clamp(1.0, 5.0);
836
837        Ok(SubjectiveRatings {
838            overall_quality: quality,
839            localization_naturalness: quality + (fastrand::f32() - 0.5) * 0.3,
840            immersion_level: quality + (fastrand::f32() - 0.5) * 0.4,
841            comfort_level: quality + (fastrand::f32() - 0.5) * 0.2,
842            presence: quality + (fastrand::f32() - 0.5) * 0.6,
843            artifacts: Vec::new(),
844        })
845    }
846
847    /// Generate validation report
848    async fn generate_report(&self) -> Result<ValidationReport> {
849        let mut results_by_type: HashMap<ValidationTestType, Vec<ValidationTestResult>> =
850            HashMap::new();
851
852        for result in &self.results {
853            results_by_type
854                .entry(result.test_config.test_type)
855                .or_default()
856                .push(result.clone());
857        }
858
859        let summary = self.generate_summary(&results_by_type);
860        let population_analysis = self.generate_population_analysis(&self.results);
861        let recommendations = self.generate_recommendations(&summary, &population_analysis);
862
863        Ok(ValidationReport {
864            summary,
865            results_by_type,
866            population_analysis,
867            recommendations,
868            generated_at: Instant::now(),
869        })
870    }
871
872    /// Generate summary statistics
873    fn generate_summary(
874        &self,
875        results_by_type: &HashMap<ValidationTestType, Vec<ValidationTestResult>>,
876    ) -> ValidationSummary {
877        let total_subjects = self.subjects.len() as u32;
878        let total_trials: u32 = self.results.iter().map(|r| r.statistics.total_trials).sum();
879
880        let mean_accuracy = if self.results.is_empty() {
881            0.0
882        } else {
883            self.results
884                .iter()
885                .map(|r| r.statistics.mean_accuracy)
886                .sum::<f32>()
887                / self.results.len() as f32
888        };
889
890        let mean_mos = if self.results.is_empty() {
891            0.0
892        } else {
893            self.results
894                .iter()
895                .map(|r| r.subjective_ratings.overall_quality)
896                .sum::<f32>()
897                / self.results.len() as f32
898        };
899
900        // Count passing/failing tests based on criteria
901        let mut passing_tests = Vec::new();
902        let mut failing_tests = Vec::new();
903
904        for (test_type, results) in results_by_type {
905            let test_name = format!("{test_type:?}");
906            let pass_rate = results
907                .iter()
908                .filter(|r| {
909                    let criteria = &r.test_config.success_criteria;
910                    r.statistics.mean_accuracy >= criteria.min_accuracy
911                        && r.subjective_ratings.overall_quality >= criteria.min_mos
912                })
913                .count() as f32
914                / results.len() as f32;
915
916            if pass_rate >= 0.8 {
917                // 80% pass rate threshold
918                passing_tests.push(test_name);
919            } else {
920                failing_tests.push(test_name);
921            }
922        }
923
924        let overall_pass_rate =
925            passing_tests.len() as f32 / (passing_tests.len() + failing_tests.len()) as f32;
926
927        ValidationSummary {
928            total_subjects,
929            total_trials,
930            overall_pass_rate,
931            mean_accuracy,
932            mean_mos,
933            passing_tests,
934            failing_tests,
935        }
936    }
937
938    /// Generate population analysis
939    fn generate_population_analysis(&self, results: &[ValidationTestResult]) -> PopulationAnalysis {
940        let mut by_age_group = HashMap::new();
941        let mut by_gender = HashMap::new();
942        let mut by_hearing_ability = HashMap::new();
943        let mut by_experience_level = HashMap::new();
944
945        // Group results by demographics
946        for result in results {
947            let subject = &result.subject;
948
949            // Age groups
950            let age_group = if subject.age < 25 {
951                "18-24"
952            } else if subject.age < 35 {
953                "25-34"
954            } else if subject.age < 45 {
955                "35-44"
956            } else if subject.age < 55 {
957                "45-54"
958            } else {
959                "55+"
960            }
961            .to_string();
962
963            self.update_population_stats(&mut by_age_group, age_group, result);
964            self.update_population_stats(&mut by_gender, subject.gender, result);
965            self.update_population_stats(&mut by_hearing_ability, subject.hearing_ability, result);
966            self.update_population_stats(
967                &mut by_experience_level,
968                subject.experience_level,
969                result,
970            );
971        }
972
973        PopulationAnalysis {
974            by_age_group,
975            by_gender,
976            by_hearing_ability,
977            by_experience_level,
978        }
979    }
980
981    /// Update population statistics
982    fn update_population_stats<K: Clone + std::hash::Hash + Eq>(
983        &self,
984        map: &mut HashMap<K, PopulationStats>,
985        key: K,
986        result: &ValidationTestResult,
987    ) {
988        let stats = map.entry(key).or_insert_with(|| PopulationStats {
989            subject_count: 0,
990            mean_accuracy: 0.0,
991            accuracy_std_dev: 0.0,
992            mean_mos: 0.0,
993            pass_rate: 0.0,
994        });
995
996        stats.subject_count += 1;
997
998        // Update running averages (simplified)
999        let n = stats.subject_count as f32;
1000        stats.mean_accuracy =
1001            (stats.mean_accuracy * (n - 1.0) + result.statistics.mean_accuracy) / n;
1002        stats.mean_mos =
1003            (stats.mean_mos * (n - 1.0) + result.subjective_ratings.overall_quality) / n;
1004
1005        // Calculate pass rate
1006        let passed =
1007            result.statistics.mean_accuracy >= result.test_config.success_criteria.min_accuracy;
1008        stats.pass_rate = (stats.pass_rate * (n - 1.0) + if passed { 1.0 } else { 0.0 }) / n;
1009    }
1010
1011    /// Generate recommendations based on results
1012    fn generate_recommendations(
1013        &self,
1014        summary: &ValidationSummary,
1015        population_analysis: &PopulationAnalysis,
1016    ) -> Vec<String> {
1017        let mut recommendations = Vec::new();
1018
1019        if summary.overall_pass_rate < 0.8 {
1020            recommendations.push(
1021                "Overall pass rate is below 80%. Consider improving core spatial audio algorithms."
1022                    .to_string(),
1023            );
1024        }
1025
1026        if summary.mean_accuracy < 0.85 {
1027            recommendations.push(
1028                "Mean accuracy is below 85%. Focus on improving localization algorithms."
1029                    .to_string(),
1030            );
1031        }
1032
1033        if summary.mean_mos < 4.0 {
1034            recommendations.push(
1035                "Mean Opinion Score is below 4.0. Improve perceptual quality of spatial rendering."
1036                    .to_string(),
1037            );
1038        }
1039
1040        // Check for demographic-specific issues
1041        for (hearing_ability, stats) in &population_analysis.by_hearing_ability {
1042            if matches!(
1043                hearing_ability,
1044                HearingAbility::MildLoss | HearingAbility::ModerateLoss
1045            ) && stats.mean_accuracy < 0.7
1046            {
1047                recommendations.push(format!(
1048                    "Users with {:?} show lower accuracy ({}%). Consider accessibility improvements.",
1049                    hearing_ability, (stats.mean_accuracy * 100.0) as u32
1050                ));
1051            }
1052        }
1053
1054        recommendations
1055    }
1056}
1057
1058/// Create standard validation test configurations
1059pub fn create_standard_test_configs() -> Vec<ValidationTestConfig> {
1060    vec![
1061        // Localization accuracy test
1062        ValidationTestConfig {
1063            name: "Localization Accuracy Test".to_string(),
1064            test_type: ValidationTestType::LocalizationAccuracy,
1065            parameters: TestParameters {
1066                source_positions: vec![], // Will be generated randomly
1067                listener_positions: vec![Position3D::new(0.0, 1.7, 0.0)],
1068                test_frequencies: vec![250.0, 500.0, 1000.0, 2000.0, 4000.0],
1069                sound_levels: vec![60.0, 70.0, 80.0],
1070                environment: EnvironmentParameters {
1071                    room_size: (10.0, 10.0, 3.0),
1072                    reverberation_time: 0.3,
1073                    noise_level: 40.0,
1074                    temperature: 20.0,
1075                    humidity: 50.0,
1076                },
1077                specific_params: HashMap::new(),
1078            },
1079            success_criteria: SuccessCriteria {
1080                min_accuracy: 0.85,
1081                max_error: 15.0, // degrees
1082                min_mos: 4.0,
1083                max_latency_ms: 20.0,
1084            },
1085            trial_count: 50,
1086            duration: Duration::from_secs(2),
1087        },
1088        // Distance perception test
1089        ValidationTestConfig {
1090            name: "Distance Perception Test".to_string(),
1091            test_type: ValidationTestType::DistancePerception,
1092            parameters: TestParameters {
1093                source_positions: (0..20)
1094                    .map(|i| {
1095                        let distance = 0.5 + (i as f32) * 0.975; // 0.5m to 20m
1096                        Position3D::new(distance, 1.7, 0.0)
1097                    })
1098                    .collect(),
1099                listener_positions: vec![Position3D::new(0.0, 1.7, 0.0)],
1100                test_frequencies: vec![1000.0],
1101                sound_levels: vec![70.0],
1102                environment: EnvironmentParameters {
1103                    room_size: (30.0, 30.0, 5.0),
1104                    reverberation_time: 0.5,
1105                    noise_level: 35.0,
1106                    temperature: 20.0,
1107                    humidity: 50.0,
1108                },
1109                specific_params: HashMap::new(),
1110            },
1111            success_criteria: SuccessCriteria {
1112                min_accuracy: 0.80,
1113                max_error: 1.0, // meters
1114                min_mos: 3.8,
1115                max_latency_ms: 20.0,
1116            },
1117            trial_count: 20,
1118            duration: Duration::from_secs(3),
1119        },
1120        // Front/back discrimination test
1121        ValidationTestConfig {
1122            name: "Front/Back Discrimination Test".to_string(),
1123            test_type: ValidationTestType::FrontBackDiscrimination,
1124            parameters: TestParameters {
1125                source_positions: vec![], // Will be generated
1126                listener_positions: vec![Position3D::new(0.0, 1.7, 0.0)],
1127                test_frequencies: vec![1000.0, 2000.0, 4000.0],
1128                sound_levels: vec![70.0],
1129                environment: EnvironmentParameters {
1130                    room_size: (8.0, 8.0, 3.0),
1131                    reverberation_time: 0.2,
1132                    noise_level: 30.0,
1133                    temperature: 20.0,
1134                    humidity: 50.0,
1135                },
1136                specific_params: HashMap::new(),
1137            },
1138            success_criteria: SuccessCriteria {
1139                min_accuracy: 0.95, // High requirement for front/back
1140                max_error: 5.0,     // degrees
1141                min_mos: 4.2,
1142                max_latency_ms: 20.0,
1143            },
1144            trial_count: 40,
1145            duration: Duration::from_secs(2),
1146        },
1147    ]
1148}
1149
1150/// Create diverse test subject pool
1151pub fn create_test_subjects() -> Vec<TestSubject> {
1152    vec![
1153        TestSubject {
1154            id: "subject_001".to_string(),
1155            age: 25,
1156            gender: Gender::Male,
1157            hearing_ability: HearingAbility::Normal,
1158            head_measurements: HeadMeasurements {
1159                head_width: 15.5,
1160                head_depth: 19.0,
1161                interaural_distance: 17.5,
1162                shoulder_width: 45.0,
1163                pinna: PinnaMeasurements {
1164                    height: 6.2,
1165                    width: 3.5,
1166                    concha_depth: 1.2,
1167                },
1168            },
1169            experience_level: ExperienceLevel::Intermediate,
1170            audio_expertise: AudioExpertise::General,
1171        },
1172        TestSubject {
1173            id: "subject_002".to_string(),
1174            age: 32,
1175            gender: Gender::Female,
1176            hearing_ability: HearingAbility::Normal,
1177            head_measurements: HeadMeasurements {
1178                head_width: 14.2,
1179                head_depth: 17.8,
1180                interaural_distance: 16.5,
1181                shoulder_width: 38.0,
1182                pinna: PinnaMeasurements {
1183                    height: 5.8,
1184                    width: 3.2,
1185                    concha_depth: 1.0,
1186                },
1187            },
1188            experience_level: ExperienceLevel::Advanced,
1189            audio_expertise: AudioExpertise::Audiophile,
1190        },
1191        TestSubject {
1192            id: "subject_003".to_string(),
1193            age: 45,
1194            gender: Gender::Male,
1195            hearing_ability: HearingAbility::MildLoss,
1196            head_measurements: HeadMeasurements {
1197                head_width: 16.0,
1198                head_depth: 19.5,
1199                interaural_distance: 18.0,
1200                shoulder_width: 48.0,
1201                pinna: PinnaMeasurements {
1202                    height: 6.5,
1203                    width: 3.8,
1204                    concha_depth: 1.3,
1205                },
1206            },
1207            experience_level: ExperienceLevel::Novice,
1208            audio_expertise: AudioExpertise::General,
1209        },
1210        TestSubject {
1211            id: "subject_004".to_string(),
1212            age: 28,
1213            gender: Gender::Female,
1214            hearing_ability: HearingAbility::Normal,
1215            head_measurements: HeadMeasurements {
1216                head_width: 14.0,
1217                head_depth: 17.5,
1218                interaural_distance: 16.2,
1219                shoulder_width: 36.0,
1220                pinna: PinnaMeasurements {
1221                    height: 5.6,
1222                    width: 3.0,
1223                    concha_depth: 0.9,
1224                },
1225            },
1226            experience_level: ExperienceLevel::Expert,
1227            audio_expertise: AudioExpertise::AudioEngineer,
1228        },
1229    ]
1230}
1231
1232#[cfg(test)]
1233mod tests {
1234    use super::*;
1235    use crate::hrtf::HrtfProcessor;
1236
1237    #[tokio::test]
1238    async fn test_perceptual_test_suite() {
1239        let hrtf_processor = HrtfProcessor::new_default()
1240            .await
1241            .expect("Failed to create HRTF processor");
1242        let mut suite = PerceptualTestSuite::new(hrtf_processor);
1243
1244        // Add test configurations
1245        let configs = create_standard_test_configs();
1246        for config in configs {
1247            suite.add_test_config(config);
1248        }
1249
1250        // Add test subjects
1251        let subjects = create_test_subjects();
1252        for subject in subjects {
1253            suite.add_subject(subject);
1254        }
1255
1256        // Run a single test (to avoid long test times)
1257        if let (Some(config), Some(subject)) = (suite.configs.first(), suite.subjects.first()) {
1258            let result = suite
1259                .run_test(config, subject)
1260                .await
1261                .expect("Test run should succeed");
1262            assert!(result.outcomes.len() > 0);
1263            assert!(result.statistics.mean_accuracy >= 0.0);
1264        }
1265    }
1266
1267    #[tokio::test]
1268    async fn test_stimulus_generation() {
1269        let hrtf_processor = HrtfProcessor::new_default()
1270            .await
1271            .expect("Failed to create HRTF processor");
1272        let suite = PerceptualTestSuite::new(hrtf_processor);
1273
1274        let config = &create_standard_test_configs()[0];
1275        let stimulus = suite
1276            .generate_stimulus(config, 0)
1277            .expect("Stimulus generation should succeed");
1278
1279        assert!(stimulus.frequency > 0.0);
1280        assert!(stimulus.level > 0.0);
1281        assert!(stimulus.duration > 0.0);
1282    }
1283
1284    #[tokio::test]
1285    async fn test_accuracy_calculation() {
1286        let hrtf_processor = HrtfProcessor::new_default()
1287            .await
1288            .expect("Failed to create HRTF processor");
1289        let suite = PerceptualTestSuite::new(hrtf_processor);
1290
1291        let stimulus = StimulusData {
1292            source_position: Position3D::new(2.0, 0.0, 0.0),
1293            listener_position: Position3D::new(0.0, 1.7, 0.0),
1294            frequency: 1000.0,
1295            level: 70.0,
1296            duration: 2.0,
1297            properties: HashMap::new(),
1298        };
1299
1300        let response = ResponseData {
1301            perceived_position: Position3D::new(2.1, 0.1, 0.0),
1302            confidence: 5,
1303            additional_data: HashMap::new(),
1304        };
1305
1306        let accuracy = suite
1307            .calculate_accuracy(&stimulus, &response)
1308            .expect("Accuracy calculation should succeed");
1309        assert!(accuracy.angular_error < 10.0); // Small error for close positions
1310        assert!(accuracy.distance_error < 0.5);
1311        assert!(accuracy.overall_accuracy > 0.8);
1312    }
1313
1314    #[test]
1315    fn test_standard_configs_creation() {
1316        let configs = create_standard_test_configs();
1317        assert_eq!(configs.len(), 3);
1318
1319        let localization_test = configs
1320            .iter()
1321            .find(|c| c.test_type == ValidationTestType::LocalizationAccuracy)
1322            .expect("Localization test should exist");
1323        assert!(localization_test.trial_count > 0);
1324        assert!(localization_test.success_criteria.min_accuracy > 0.0);
1325    }
1326
1327    #[test]
1328    fn test_subjects_creation() {
1329        let subjects = create_test_subjects();
1330        assert_eq!(subjects.len(), 4);
1331
1332        // Check diversity
1333        let genders: std::collections::HashSet<_> = subjects.iter().map(|s| s.gender).collect();
1334        assert!(genders.len() >= 2); // At least 2 different genders
1335
1336        let hearing_abilities: std::collections::HashSet<_> =
1337            subjects.iter().map(|s| s.hearing_ability).collect();
1338        assert!(hearing_abilities.len() >= 2); // At least 2 different hearing abilities
1339    }
1340}