use crate::Curve;
use crate::error::{AutoeqError, Result};
use log::{debug, info, warn};
use math_audio_dsp::analysis::compute_average_response;
use math_audio_iir_fir::Biquad;
use num_complex::Complex64;
use std::collections::HashMap;
use std::f64::consts::PI;
use std::path::Path;
use std::sync::{Arc, Mutex};
use super::config::validate_room_config;
use super::fir;
use super::output;
use super::phase_alignment;
use super::pipeline::{
PipelineControl, PipelineEvent, PipelineObserver, PipelineStepId, PipelineStepStatus,
RoomPipeline, RoomPipelineRequest,
};
use super::types::{
ChannelDspChain, DspChainOutput, MeasurementSource, OptimizationMetadata, OptimizerConfig,
PerceptualMetrics, ProcessingMode, RoomConfig, SpeakerConfig, SystemModel, TargetCurveConfig,
};
use super::speaker_eq::process_single_speaker;
use super::crossover_utils::check_group_consistency;
use super::group_processing::{
process_cardioid, process_dba, process_multisub_group, process_speaker_group,
};
mod gd;
mod phase;
mod reports;
use gd::*;
use phase::*;
use reports::*;
type SpeakerProcessResult = std::result::Result<
(
String,
ChannelDspChain,
f64,
f64,
crate::Curve,
crate::Curve,
Vec<crate::iir::Biquad>,
f64,
Option<f64>,
Option<Vec<f64>>,
),
AutoeqError,
>;
type MixedModeResult = (
ChannelDspChain,
f64,
f64,
Curve,
Curve,
Vec<Biquad>,
f64,
Option<f64>,
Option<Vec<f64>>,
);
pub(super) fn warn_if_optimizer_bounds_exceed_data(
channel_name: &str,
curve: &Curve,
opt: &super::types::OptimizerConfig,
) {
if curve.freq.is_empty() {
return;
}
let data_min = curve.freq[0];
let data_max = curve.freq[curve.freq.len() - 1];
let log_margin = 0.05;
let min_tol = data_min * 10_f64.powf(-log_margin);
let max_tol = data_max * 10_f64.powf(log_margin);
if opt.min_freq < min_tol {
warn!(
"Channel '{}': optimizer.min_freq={:.1} Hz is below measurement minimum {:.1} Hz. \
Filters in [{:.1} .. {:.1}] Hz will have no data to correct and will be ignored.",
channel_name, opt.min_freq, data_min, opt.min_freq, data_min,
);
}
if opt.max_freq > max_tol {
warn!(
"Channel '{}': optimizer.max_freq={:.1} Hz is above measurement maximum {:.1} Hz. \
Filters in [{:.1} .. {:.1}] Hz will have no data to correct and will be ignored.",
channel_name, opt.max_freq, data_max, data_max, opt.max_freq,
);
}
}
pub(super) fn detect_passband_and_mean(curve: &Curve) -> (Option<(f64, f64)>, f64) {
let freqs_f32: Vec<f32> = curve.freq.iter().map(|&f| f as f32).collect();
let spl_f32: Vec<f32> = curve.spl.iter().map(|&s| s as f32).collect();
if spl_f32.len() < 2 {
return (None, 0.0);
}
let smoothed = crate::read::smooth_one_over_n_octave(curve, 1);
let smoothed_spl: Vec<f32> = smoothed.spl.iter().map(|&s| s as f32).collect();
let ref_level = compute_average_response(&freqs_f32, &smoothed_spl, None);
if !ref_level.is_finite() || ref_level < -100.0 {
return (None, 0.0);
}
let threshold = ref_level - 10.0;
let first_above = smoothed_spl.iter().position(|&v| v >= threshold);
let last_above = smoothed_spl.iter().rposition(|&v| v >= threshold);
let (start_idx, end_idx) = match (first_above, last_above) {
(Some(s), Some(e)) if e > s => (s, e),
_ => return (None, 0.0),
};
let f_low = if start_idx > 0 {
interp_threshold_crossing(
freqs_f32[start_idx - 1],
freqs_f32[start_idx],
smoothed_spl[start_idx - 1],
smoothed_spl[start_idx],
threshold,
)
} else {
freqs_f32[start_idx]
};
let f_high = if end_idx + 1 < smoothed_spl.len() {
interp_threshold_crossing(
freqs_f32[end_idx],
freqs_f32[end_idx + 1],
smoothed_spl[end_idx],
smoothed_spl[end_idx + 1],
threshold,
)
} else {
freqs_f32[end_idx]
};
let norm_range_f32 = Some((f_low, f_high));
let mean = compute_average_response(&freqs_f32, &spl_f32, norm_range_f32) as f64;
(Some((f_low as f64, f_high as f64)), mean)
}
fn interp_threshold_crossing(f0: f32, f1: f32, m0: f32, m1: f32, threshold: f32) -> f32 {
let denom = m1 - m0;
if denom.abs() < 1e-9 {
return f0;
}
let t = ((threshold - m0) / denom).clamp(0.0, 1.0);
f0 + t * (f1 - f0)
}
const LEVEL_DIFFERENCE_WARNING_THRESHOLD: f64 = 6.0;
const ARRIVAL_TIME_WARNING_THRESHOLD_MS: f64 = 50.0;
fn is_subwoofer_channel(config: &RoomConfig, channel_name: &str) -> bool {
if let Some(sys) = &config.system
&& let Some(subs) = &sys.subwoofers
{
if channel_name.eq_ignore_ascii_case("lfe") {
return true;
}
if let Some(measurement_key) = sys.speakers.get(channel_name) {
return subs.mapping.contains_key(measurement_key);
}
}
let lower = channel_name.to_lowercase();
lower == "lfe" || lower == "sub" || lower.starts_with("sub")
}
fn find_sub_main_pairings(
config: &RoomConfig,
curves: &HashMap<String, crate::Curve>,
) -> Vec<(String, String)> {
let mut pairings = Vec::new();
if let Some(sys) = &config.system {
if let Some(subs) = &sys.subwoofers {
let meas_to_role: HashMap<&String, &String> =
sys.speakers.iter().map(|(r, m)| (m, r)).collect();
for (sub_meas_key, main_role) in &subs.mapping {
if let Some(sub_role) = meas_to_role.get(sub_meas_key) {
pairings.push((sub_role.to_string(), main_role.clone()));
} else {
warn!(
"Subwoofer measurement '{}' not mapped to any output channel",
sub_meas_key
);
}
}
}
} else {
let sub_channel = curves
.keys()
.find(|name| is_subwoofer_channel(config, name))
.cloned();
if let Some(sub_name) = sub_channel {
let main_channels: Vec<String> = curves
.keys()
.filter(|name| *name != &sub_name && !is_subwoofer_channel(config, name))
.cloned()
.collect();
for main in main_channels {
pairings.push((sub_name.clone(), main));
}
}
}
pairings
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CallbackAction {
Continue,
Stop,
}
#[derive(Debug, Clone)]
pub struct RoomOptimizationProgress {
pub current_speaker: String,
pub speaker_index: usize,
pub total_speakers: usize,
pub iteration: usize,
pub max_iterations: usize,
pub loss: f64,
pub overall_progress: f64,
pub message: Option<String>,
pub epa_preference: Option<f64>,
}
impl From<&PipelineEvent> for RoomOptimizationProgress {
fn from(event: &PipelineEvent) -> Self {
Self {
current_speaker: event.channel.clone().unwrap_or_default(),
speaker_index: event.channel_index.unwrap_or(0),
total_speakers: event.total_channels.unwrap_or(0),
iteration: event.iteration.unwrap_or(0),
max_iterations: event.max_iterations.unwrap_or(0),
loss: event.loss.unwrap_or(0.0),
overall_progress: event.overall_progress,
message: event.message.clone(),
epa_preference: event.epa_preference,
}
}
}
pub type RoomOptimizationCallback =
Box<dyn FnMut(&RoomOptimizationProgress) -> CallbackAction + Send>;
pub type SpeakerOptimizationCallback =
Box<dyn FnMut(&RoomOptimizationProgress) -> CallbackAction + Send>;
#[derive(Debug, Clone)]
pub struct ChannelOptimizationResult {
pub name: String,
pub pre_score: f64,
pub post_score: f64,
pub initial_curve: Curve,
pub final_curve: Curve,
pub biquads: Vec<Biquad>,
pub fir_coeffs: Option<Vec<f64>>,
}
#[derive(Debug, Clone)]
pub struct RoomOptimizationResult {
pub channels: HashMap<String, ChannelDspChain>,
pub channel_results: HashMap<String, ChannelOptimizationResult>,
pub combined_pre_score: f64,
pub combined_post_score: f64,
pub metadata: OptimizationMetadata,
}
impl RoomOptimizationResult {
pub fn to_dsp_chain_output(&self) -> DspChainOutput {
output::create_dsp_chain_output(self.channels.clone(), Some(self.metadata.clone()))
}
}
#[derive(Debug, Clone)]
pub struct SpeakerOptimizationResult {
pub chain: ChannelDspChain,
pub pre_score: f64,
pub post_score: f64,
pub initial_curve: Curve,
pub final_curve: Curve,
pub biquads: Vec<Biquad>,
pub fir_coeffs: Option<Vec<f64>>,
}
pub fn optimize_room(
config: &RoomConfig,
sample_rate: f64,
callback: Option<RoomOptimizationCallback>,
output_dir: Option<&Path>,
) -> Result<RoomOptimizationResult> {
RoomPipeline::new(RoomPipelineRequest {
config,
sample_rate,
output_dir,
probe_arrival_overrides: None,
})
.run(callback.map(callback_pipeline_observer))
}
pub fn optimize_room_with_probe_arrivals(
config: &RoomConfig,
sample_rate: f64,
callback: Option<RoomOptimizationCallback>,
output_dir: Option<&Path>,
probe_arrival_ms: &HashMap<String, f64>,
) -> Result<RoomOptimizationResult> {
RoomPipeline::new(RoomPipelineRequest {
config,
sample_rate,
output_dir,
probe_arrival_overrides: Some(probe_arrival_ms),
})
.run(callback.map(callback_pipeline_observer))
}
pub(super) fn optimize_room_pipeline_impl(
request: RoomPipelineRequest<'_>,
observer: Option<Box<dyn PipelineObserver>>,
) -> Result<RoomOptimizationResult> {
optimize_room_impl(
request.config,
request.sample_rate,
request.output_dir,
request.probe_arrival_overrides,
observer,
)
}
fn callback_pipeline_observer(callback: RoomOptimizationCallback) -> Box<dyn PipelineObserver> {
Box::new(RoomOptimizationCallbackObserver { callback })
}
struct RoomOptimizationCallbackObserver {
callback: RoomOptimizationCallback,
}
impl PipelineObserver for RoomOptimizationCallbackObserver {
fn on_event(&mut self, event: &PipelineEvent) -> PipelineControl {
let progress = RoomOptimizationProgress::from(event);
match (self.callback)(&progress) {
CallbackAction::Continue => PipelineControl::Continue,
CallbackAction::Stop => PipelineControl::Stop,
}
}
}
type SharedPipelineObserver = Arc<Mutex<Option<Box<dyn PipelineObserver>>>>;
fn pipeline_stopped_error(step_id: PipelineStepId) -> AutoeqError {
AutoeqError::OptimizationFailed {
message: format!("Room optimization stopped by observer during {:?}", step_id),
}
}
fn emit_pipeline_event(observer: &SharedPipelineObserver, event: PipelineEvent) -> Result<()> {
let step_id = event.step_id;
let mut guard = observer.lock().unwrap();
if let Some(observer) = guard.as_mut()
&& observer.on_event(&event) == PipelineControl::Stop
{
return Err(pipeline_stopped_error(step_id));
}
Ok(())
}
fn progress_event(
step_id: PipelineStepId,
status: PipelineStepStatus,
progress: &RoomOptimizationProgress,
) -> PipelineEvent {
let mut event = PipelineEvent::new(step_id, status)
.with_overall_progress(progress.overall_progress)
.with_epa_preference(progress.epa_preference);
if !progress.current_speaker.is_empty() {
event = event.with_channel(progress.current_speaker.clone());
}
if progress.total_speakers > 0 {
event = event.with_channels(progress.speaker_index, progress.total_speakers);
}
if progress.max_iterations > 0 || progress.iteration > 0 {
event = event.with_iteration(progress.iteration, progress.max_iterations);
}
if progress.loss != 0.0 {
event = event.with_loss(progress.loss);
}
if let Some(message) = &progress.message {
event = event.with_message(message.clone());
}
event
}
fn sanity_check_result(result: &RoomOptimizationResult) -> Result<()> {
for (name, ch) in &result.channel_results {
if ch.initial_curve.freq.len() != ch.initial_curve.spl.len() {
let msg = format!(
"channel '{}': initial_curve freq/spl length mismatch ({} vs {})",
name,
ch.initial_curve.freq.len(),
ch.initial_curve.spl.len()
);
debug_assert!(false, "{}", msg);
return Err(AutoeqError::OptimizationFailed { message: msg });
}
if ch.final_curve.freq.len() != ch.final_curve.spl.len() {
let msg = format!(
"channel '{}': final_curve freq/spl length mismatch ({} vs {})",
name,
ch.final_curve.freq.len(),
ch.final_curve.spl.len()
);
debug_assert!(false, "{}", msg);
return Err(AutoeqError::OptimizationFailed { message: msg });
}
if let Some((i, v)) = ch
.final_curve
.spl
.iter()
.enumerate()
.find(|(_, v)| !v.is_finite())
{
let msg = format!(
"channel '{}': final_curve.spl[{}]={} is non-finite (optimiser diverged)",
name, i, v
);
debug_assert!(false, "{}", msg);
return Err(AutoeqError::OptimizationFailed { message: msg });
}
}
Ok(())
}
fn optimize_room_impl(
config: &RoomConfig,
sample_rate: f64,
output_dir: Option<&Path>,
probe_arrival_overrides: Option<&HashMap<String, f64>>,
observer: Option<Box<dyn PipelineObserver>>,
) -> Result<RoomOptimizationResult> {
let observer_shared: SharedPipelineObserver = Arc::new(Mutex::new(observer));
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(
PipelineStepId::ConfigPreparation,
"Preparing room optimization configuration",
),
)?;
let mut config = config.clone();
if config
.optimizer
.cea2034_correction
.as_ref()
.is_some_and(|c| c.enabled)
{
let cache = super::cea2034_correction::pre_fetch_all_cea2034(&config);
if !cache.is_empty() {
info!(
" CEA2034 cache: loaded data for {} speaker(s)",
cache.len()
);
config.cea2034_cache = Some(cache);
}
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::ConfigPreparation,
"Room optimization configuration prepared",
),
)?;
let config = &config;
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(PipelineStepId::Validation, "Validating room configuration"),
)?;
let validation = validate_room_config(config);
validation.print_results();
if !validation.is_valid {
return Err(AutoeqError::OptimizationFailed {
message: format!(
"Configuration validation failed with {} errors",
validation.errors.len()
),
});
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::Validation, "Room configuration validated"),
)?;
fn send_progress(
observer: &SharedPipelineObserver,
step_id: PipelineStepId,
status: PipelineStepStatus,
progress: &RoomOptimizationProgress,
) -> Result<()> {
emit_pipeline_event(observer, progress_event(step_id, status, progress))
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(
PipelineStepId::TopologyRouteSelection,
"Selecting Room EQ topology route",
),
)?;
if let Some(sys) = &config.system {
let has_group = sys
.speakers
.values()
.any(|key| matches!(config.speakers.get(key), Some(SpeakerConfig::Group(_))));
if !has_group {
let workflow_name = match sys.model {
SystemModel::Stereo => {
if sys.subwoofers.is_some() {
"Stereo 2.1"
} else {
"Stereo 2.0"
}
}
SystemModel::HomeCinema => "Home Cinema",
SystemModel::Custom => "Custom",
};
if sys.model != SystemModel::Custom {
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::TopologyRouteSelection,
format!("Selected {} workflow", workflow_name),
),
)?;
send_progress(
&observer_shared,
PipelineStepId::TopologyWorkflowExecution,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: String::new(),
speaker_index: 0,
total_speakers: sys.speakers.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.0,
message: Some(format!(
"Starting {} workflow ({} channels)",
workflow_name,
sys.speakers.len()
)),
epa_preference: None,
},
)?;
}
let workflow_result = match sys.model {
SystemModel::Stereo => {
if sys.subwoofers.is_some() {
Some(super::workflows::optimize_stereo_2_1(
config,
sys,
sample_rate,
output_dir.unwrap_or(Path::new(".")),
))
} else {
Some(super::workflows::optimize_stereo_2_0(
config,
sys,
sample_rate,
output_dir.unwrap_or(Path::new(".")),
))
}
}
SystemModel::HomeCinema => Some(super::workflows::optimize_home_cinema(
config,
sys,
sample_rate,
output_dir.unwrap_or(Path::new(".")),
)),
SystemModel::Custom => None, };
if let Some(result) = workflow_result {
let mut result = result?;
let mut workflow_refresh_needed = false;
let summary: Vec<String> = result
.channel_results
.iter()
.map(|(name, ch)| {
format!(" {}: {:.4} -> {:.4}", name, ch.pre_score, ch.post_score)
})
.collect();
send_progress(
&observer_shared,
PipelineStepId::TopologyWorkflowExecution,
PipelineStepStatus::Completed,
&RoomOptimizationProgress {
current_speaker: String::new(),
speaker_index: result.channel_results.len(),
total_speakers: result.channel_results.len(),
iteration: 0,
max_iterations: 0,
loss: result.combined_post_score,
overall_progress: 1.0,
message: Some(format!(
"{} workflow complete:\n{}",
workflow_name,
summary.join("\n")
)),
epa_preference: None,
},
)?;
if matches!(
config.optimizer.processing_mode,
ProcessingMode::PhaseLinear | ProcessingMode::Hybrid
) {
send_progress(
&observer_shared,
PipelineStepId::FirGeneration,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "FIR generation".to_string(),
speaker_index: 0,
total_speakers: result.channel_results.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.95,
message: Some("Generating FIR coefficients...".to_string()),
epa_preference: None,
},
)?;
let out_dir = output_dir.unwrap_or(Path::new("."));
let names: Vec<String> = result.channel_results.keys().cloned().collect();
for name in names {
let generated = if let Some(ch) = result.channel_results.get_mut(&name) {
if ch.fir_coeffs.is_some() {
None
} else {
ch.fir_coeffs = post_generate_fir(
&name,
&ch.initial_curve,
&ch.final_curve,
&config.optimizer,
config.target_curve.as_ref(),
sample_rate,
Some(out_dir),
);
ch.fir_coeffs.clone()
}
} else {
None
};
let Some(coeffs) = generated else {
continue;
};
if let Some(chain) = result.channels.get_mut(&name) {
let filename = format!("{}_fir.wav", name);
chain
.plugins
.push(super::output::create_convolution_plugin(&filename));
}
sync_reported_fir_adjustment(
&name,
&mut result.channel_results,
&mut result.channels,
&coeffs,
sample_rate,
);
workflow_refresh_needed = true;
}
}
if config.optimizer.processing_mode == ProcessingMode::MixedPhase {
send_progress(
&observer_shared,
PipelineStepId::MixedPhaseFirGeneration,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Mixed-phase FIR".to_string(),
speaker_index: 0,
total_speakers: result.channel_results.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.95,
message: Some("Generating mixed-phase FIR...".to_string()),
epa_preference: None,
},
)?;
let out_dir = output_dir.unwrap_or(Path::new("."));
let names: Vec<String> = result.channel_results.keys().cloned().collect();
for name in names {
let generated = if let Some(ch) = result.channel_results.get_mut(&name) {
if ch.fir_coeffs.is_some() {
None
} else {
ch.fir_coeffs = post_generate_mixed_phase_fir(
&name,
&ch.initial_curve,
&config.optimizer,
sample_rate,
Some(out_dir),
);
ch.fir_coeffs.clone()
}
} else {
None
};
let Some(coeffs) = generated else {
continue;
};
if let Some(chain) = result.channels.get_mut(&name) {
let filename = format!("{}_excess_phase_fir.wav", name);
chain
.plugins
.push(super::output::create_convolution_plugin(&filename));
}
sync_reported_fir_adjustment(
&name,
&mut result.channel_results,
&mut result.channels,
&coeffs,
sample_rate,
);
workflow_refresh_needed = true;
}
}
if config.optimizer.phase_correction.is_some() {
send_progress(
&observer_shared,
PipelineStepId::PhaseCorrection,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Phase correction".to_string(),
speaker_index: 0,
total_speakers: result.channel_results.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.96,
message: Some("Phase correction...".to_string()),
epa_preference: None,
},
)?;
}
if let Some(ref pc_config) = config.optimizer.phase_correction {
let out_dir = output_dir.unwrap_or(Path::new("."));
let names: Vec<String> = result.channel_results.keys().cloned().collect();
for name in &names {
if let Some(ch) = result.channel_results.get_mut(name)
&& let Some(chain) = result.channels.get_mut(name)
{
let before_plugins = chain.plugins.len();
apply_phase_correction(
name,
ch,
chain,
pc_config,
sample_rate,
Some(out_dir),
);
workflow_refresh_needed |= chain.plugins.len() != before_plugins;
}
}
}
send_progress(
&observer_shared,
PipelineStepId::ImpulseResponseComputation,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "IR computation".to_string(),
speaker_index: 0,
total_speakers: result.channel_results.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.97,
message: Some("Computing impulse responses...".to_string()),
epa_preference: None,
},
)?;
for (channel_name, ch_result) in &result.channel_results {
let delay_ms = result
.channels
.get(channel_name)
.map(total_chain_delay_ms)
.unwrap_or(0.0);
if let Some((pre_ir, post_ir)) =
super::ir_waveform::compute_channel_ir_waveforms(
&ch_result.initial_curve,
&ch_result.biquads,
ch_result.fir_coeffs.as_deref(),
delay_ms,
sample_rate,
)
&& let Some(chain) = result.channels.get_mut(channel_name)
{
chain.pre_ir = Some(pre_ir);
chain.post_ir = Some(post_ir);
}
}
if result.channel_results.len() > 1 {
send_progress(
&observer_shared,
PipelineStepId::ChannelMatching,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Channel matching".to_string(),
speaker_index: 0,
total_speakers: result.channel_results.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.98,
message: Some("Channel matching analysis...".to_string()),
epa_preference: None,
},
)?;
let plugin_count_before_icd: usize = result
.channels
.values()
.map(|chain| chain.plugins.len())
.sum();
compute_and_correct_icd(&mut result, config, sample_rate);
let plugin_count_after_icd: usize = result
.channels
.values()
.map(|chain| chain.plugins.len())
.sum();
workflow_refresh_needed |= plugin_count_after_icd != plugin_count_before_icd;
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(PipelineStepId::MetadataRefresh, "Refreshing reports"),
)?;
if workflow_refresh_needed {
refresh_final_reports(&mut result, config, sample_rate);
}
update_perceptual_metrics(
&mut result.metadata,
Some(&result.channels),
Some(config),
);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::MetadataRefresh, "Reports refreshed"),
)?;
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(
PipelineStepId::SanityCheck,
"Checking final optimization result",
),
)?;
sanity_check_result(&result)?;
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::SanityCheck,
"Final optimization result checked",
),
)?;
return Ok(result);
}
}
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::TopologyRouteSelection,
"Selected generic channel optimization",
),
)?;
let channels_to_process: Vec<(String, SpeakerConfig)> = if let Some(sys) = &config.system {
info!("Using SystemConfig for channel mapping");
sys.speakers
.iter()
.filter_map(|(role, key)| match config.speakers.get(key) {
Some(cfg) => Some((role.clone(), cfg.clone())),
None => {
warn!(
"System config references missing speaker key '{}' for role '{}'",
key, role
);
None
}
})
.collect()
} else {
config
.speakers
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect()
};
let total_speakers = channels_to_process.len();
info!("Processing {} channels", total_speakers);
let shared_mean_spl: Option<f64> = if total_speakers > 1 {
let min_freq = config.optimizer.min_freq;
let max_freq = config.optimizer.max_freq;
let mut channel_means: Vec<f64> = Vec::new();
let mut excluded_group_count = 0_usize;
for (_name, speaker_config) in &channels_to_process {
if let SpeakerConfig::Single(source) = speaker_config
&& let Ok(curve) = crate::read::load_source(source)
{
let freqs_f32: Vec<f32> = curve.freq.iter().map(|&f| f as f32).collect();
let spl_f32: Vec<f32> = curve.spl.iter().map(|&s| s as f32).collect();
let mean = compute_average_response(
&freqs_f32,
&spl_f32,
Some((min_freq as f32, max_freq as f32)),
) as f64;
channel_means.push(mean);
} else if !matches!(speaker_config, SpeakerConfig::Single(_)) {
excluded_group_count += 1;
}
}
if excluded_group_count > 0 {
info!(
"Shared mean pre-pass: {} non-Single speaker(s) excluded (Group/MultiSub/DBA/Cardioid)",
excluded_group_count
);
}
if channel_means.len() > 1 {
let avg = channel_means.iter().sum::<f64>() / channel_means.len() as f64;
info!(
"Shared target level: {:.1} dB (average of {} channels)",
avg,
channel_means.len()
);
Some(avg)
} else {
None
}
} else {
None
};
send_progress(
&observer_shared,
PipelineStepId::GenericChannelOptimization,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: String::new(),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.0,
message: Some(format!(
"Starting optimization for {} channels",
total_speakers
)),
epa_preference: None,
},
)?;
let params_per_filter = match config.optimizer.peq_model.as_str() {
"free" | "ls-pk-hs" => 4,
_ => 3,
};
let n_params = config.optimizer.num_filters * params_per_filter;
let n_free = n_params.max(1); let desired_pop = config
.optimizer
.population
.max(1)
.min(config.optimizer.max_iter.max(1));
let pop_multiplier = desired_pop.div_ceil(n_free).max(4);
let population_size = pop_multiplier * n_free;
const DE_GENERATIONS_FLOOR: usize = 5000;
let computed_generations =
config.optimizer.max_iter.saturating_sub(population_size) / population_size;
let budget_supports_floor =
config.optimizer.max_iter >= DE_GENERATIONS_FLOOR.saturating_mul(population_size);
let max_iterations = if budget_supports_floor {
computed_generations.max(DE_GENERATIONS_FLOOR)
} else {
let capped = computed_generations.max(1);
if config.optimizer.max_iter > 0 && capped < DE_GENERATIONS_FLOOR {
warn!(
"DE budget: max_iter={} with population_size={} is below the {} generation floor × pop. \
Running {} generations — expect degraded convergence. Raise max_iter to {} to regain the floor.",
config.optimizer.max_iter,
population_size,
DE_GENERATIONS_FLOOR,
capped,
DE_GENERATIONS_FLOOR.saturating_mul(population_size),
);
}
capped
};
info!(
"DE budget: {} params, population_size={}, max_generations={} (from max_iter={}, floor={} when budget allows)",
n_params, population_size, max_iterations, config.optimizer.max_iter, DE_GENERATIONS_FLOOR,
);
let mut results: Vec<SpeakerProcessResult> = Vec::with_capacity(total_speakers);
for (speaker_idx, (channel_name, speaker_config)) in channels_to_process.into_iter().enumerate()
{
info!("Processing channel: {}", channel_name);
send_progress(
&observer_shared,
PipelineStepId::GenericChannelOptimization,
PipelineStepStatus::InProgress,
&RoomOptimizationProgress {
current_speaker: channel_name.clone(),
speaker_index: speaker_idx,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: speaker_idx as f64 / total_speakers as f64,
message: Some(format!("Processing channel: {}", channel_name)),
epa_preference: None,
},
)?;
let eq_callback: Option<crate::optim::OptimProgressCallback> = {
let observer = Arc::clone(&observer_shared);
let name = channel_name.clone();
let si = speaker_idx;
let ts = total_speakers;
let mi = max_iterations;
Some(Box::new(move |iter: usize, loss: f64, epa: Option<f64>| {
let base_progress = si as f64 / ts as f64;
let speaker_progress = if mi > 0 { iter as f64 / mi as f64 } else { 0.0 };
let overall = (base_progress + speaker_progress / ts as f64).min(1.0);
match send_progress(
&observer,
PipelineStepId::GenericChannelOptimization,
PipelineStepStatus::InProgress,
&RoomOptimizationProgress {
current_speaker: name.clone(),
speaker_index: si,
total_speakers: ts,
iteration: iter,
max_iterations: mi,
loss,
overall_progress: overall,
message: None,
epa_preference: epa,
},
) {
Ok(()) => crate::de::CallbackAction::Continue,
Err(_) => crate::de::CallbackAction::Stop,
}
}))
};
let result = process_speaker_internal(
&channel_name,
&speaker_config,
config,
sample_rate,
output_dir,
eq_callback,
shared_mean_spl,
probe_arrival_overrides,
);
match result {
Ok((
chain,
pre_score,
post_score,
initial_curve,
final_curve,
biquads,
mean_spl,
arrival_time_ms,
fir_coeffs,
)) => {
send_progress(
&observer_shared,
PipelineStepId::GenericChannelOptimization,
PipelineStepStatus::InProgress,
&RoomOptimizationProgress {
current_speaker: channel_name.clone(),
speaker_index: speaker_idx,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: post_score,
overall_progress: (speaker_idx + 1) as f64 / total_speakers as f64,
message: Some(format!(
"Channel {}: {:.4} -> {:.4}",
channel_name, pre_score, post_score
)),
epa_preference: None,
},
)?;
results.push(Ok((
channel_name,
chain,
pre_score,
post_score,
initial_curve,
final_curve,
biquads,
mean_spl,
arrival_time_ms,
fir_coeffs,
)));
}
Err(e) => {
results.push(Err(e));
}
}
}
send_progress(
&observer_shared,
PipelineStepId::GenericChannelOptimization,
PipelineStepStatus::Completed,
&RoomOptimizationProgress {
current_speaker: String::new(),
speaker_index: total_speakers,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.90,
message: Some(format!("Optimized {} channels", total_speakers)),
epa_preference: None,
},
)?;
let mut channel_chains: HashMap<String, ChannelDspChain> = HashMap::new();
let mut channel_results: HashMap<String, ChannelOptimizationResult> = HashMap::new();
let mut pre_scores: Vec<f64> = Vec::new();
let mut post_scores: Vec<f64> = Vec::new();
let mut curves: HashMap<String, crate::Curve> = HashMap::new();
let mut channel_means: HashMap<String, f64> = HashMap::new();
let mut channel_arrivals: HashMap<String, f64> = HashMap::new();
for res in results {
let (
channel_name,
chain,
pre_score,
post_score,
initial_curve,
final_curve,
biquads,
mean_spl,
arrival_time_ms,
fir_coeffs,
) = res?;
channel_chains.insert(channel_name.clone(), chain);
curves.insert(channel_name.clone(), final_curve.clone());
pre_scores.push(pre_score);
post_scores.push(post_score);
channel_means.insert(channel_name.clone(), mean_spl);
if let Some(arrival_ms) = arrival_time_ms {
channel_arrivals.insert(channel_name.clone(), arrival_ms);
}
let mut post_generated_fir: Option<Vec<f64>> = None;
let fir_coeffs = if fir_coeffs.is_none()
&& !matches!(
config.optimizer.processing_mode,
ProcessingMode::LowLatency | ProcessingMode::MixedPhase
) {
send_progress(
&observer_shared,
PipelineStepId::FirGeneration,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: format!("FIR: {}", channel_name),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.95,
message: Some(format!(
"Generating FIR coefficients for {}...",
channel_name
)),
epa_preference: None,
},
)?;
let generated = post_generate_fir(
&channel_name,
&initial_curve,
&final_curve,
&config.optimizer,
config.target_curve.as_ref(),
sample_rate,
output_dir,
);
post_generated_fir = generated.clone();
generated
} else {
fir_coeffs
};
channel_results.insert(
channel_name.clone(),
ChannelOptimizationResult {
name: channel_name.clone(),
pre_score,
post_score,
initial_curve,
final_curve,
biquads,
fir_coeffs,
},
);
if let Some(coeffs) = post_generated_fir {
if let Some(chain) = channel_chains.get_mut(&channel_name) {
let filename = format!("{}_fir.wav", channel_name);
chain
.plugins
.push(super::output::create_convolution_plugin(&filename));
}
sync_reported_fir_adjustment(
&channel_name,
&mut channel_results,
&mut channel_chains,
&coeffs,
sample_rate,
);
}
}
curves = collect_current_final_curves(&channel_results);
let phase_ir_sync = channel_arrivals.is_empty() && channel_results.len() > 1;
if phase_ir_sync {
for (channel_name, result) in &channel_results {
if let Some(arrival_ms) =
super::time_align::estimate_arrival_from_phase(&result.initial_curve, 200.0, 2000.0)
{
channel_arrivals.insert(channel_name.clone(), arrival_ms);
}
}
if channel_arrivals.len() > 1 {
info!(
"Auto IR sync: phase-estimated arrival times for {} channels",
channel_arrivals.len()
);
for (name, arrival) in &channel_arrivals {
info!(
" Channel '{}': phase-estimated arrival = {:.2} ms",
name, arrival
);
}
} else {
channel_arrivals.clear();
}
}
if (config.optimizer.allow_delay() || phase_ir_sync) && channel_arrivals.len() > 1 {
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(PipelineStepId::TimeAlignment, "Aligning channel timing"),
)?;
let arrivals: Vec<f64> = channel_arrivals.values().copied().collect();
let min_arrival = arrivals.iter().cloned().fold(f64::INFINITY, f64::min);
let max_arrival = arrivals.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
let arrival_spread = max_arrival - min_arrival;
if arrival_spread > ARRIVAL_TIME_WARNING_THRESHOLD_MS {
warn!(
"Channel arrival times differ by {:.1} ms (threshold: {:.1} ms). \
This may indicate measurement issues or very different speaker distances.",
arrival_spread, ARRIVAL_TIME_WARNING_THRESHOLD_MS
);
for (name, arrival) in &channel_arrivals {
info!(" Channel '{}': arrival time = {:.2} ms", name, arrival);
}
}
let alignment_delays = super::time_align::calculate_alignment_delays(&channel_arrivals);
for (channel_name, delay_ms) in &alignment_delays {
let applied = if *delay_ms > 0.01
&& let Some(chain) = channel_chains.get_mut(channel_name)
{
chain
.plugins
.insert(0, output::create_delay_plugin(*delay_ms));
true
} else {
false
};
if applied {
sync_reported_phase_adjustment(
channel_name,
&mut channel_results,
&mut channel_chains,
*delay_ms,
false,
);
info!(
" Channel '{}': added {:.3} ms delay for time alignment",
channel_name, delay_ms
);
}
}
curves = collect_current_final_curves(&channel_results);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::TimeAlignment, "Channel timing aligned"),
)?;
} else if channel_arrivals.is_empty() && config.speakers.len() > 1 {
info!("No arrival time data (WAV or phase) available for time alignment. Skipping.");
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(
PipelineStepId::TimeAlignment,
"No arrival time data available for time alignment",
),
)?;
} else {
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(PipelineStepId::TimeAlignment, "Time alignment not needed"),
)?;
}
let spectral_curves: HashMap<String, Curve> = curves
.iter()
.filter(|(name, _)| !is_subwoofer_channel(config, name))
.map(|(name, curve)| (name.clone(), curve.clone()))
.collect();
if spectral_curves.len() > 1 {
send_progress(
&observer_shared,
PipelineStepId::SpectralAlignment,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Spectral alignment".to_string(),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.92,
message: Some("Spectral channel alignment...".to_string()),
epa_preference: None,
},
)?;
let min_freq = config.optimizer.min_freq;
let max_freq = config.optimizer.max_freq;
let sample_rate = config
.recording_config
.as_ref()
.and_then(|rc| rc.playback_sample_rate)
.unwrap_or(48000) as f64;
let mut post_eq_means: HashMap<String, f64> = HashMap::new();
for (channel_name, final_curve) in &spectral_curves {
let freqs_f32: Vec<f32> = final_curve.freq.iter().map(|&f| f as f32).collect();
let spl_f32: Vec<f32> = final_curve.spl.iter().map(|&s| s as f32).collect();
let post_mean = compute_average_response(
&freqs_f32,
&spl_f32,
Some((min_freq as f32, max_freq as f32)),
) as f64;
post_eq_means.insert(channel_name.clone(), post_mean);
}
let means: Vec<f64> = post_eq_means.values().copied().collect();
let min_mean = means.iter().cloned().fold(f64::INFINITY, f64::min);
let max_mean = means.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
let level_spread = max_mean - min_mean;
info!(
"Post-EQ spectral alignment: level spread = {:.2} dB across {} channels",
level_spread,
post_eq_means.len()
);
for (name, mean) in &post_eq_means {
info!(" Channel '{}': post-EQ mean SPL = {:.1} dB", name, mean);
}
if level_spread > LEVEL_DIFFERENCE_WARNING_THRESHOLD {
warn!(
"Channel levels differ by {:.1} dB (threshold: {:.1} dB). \
This may indicate measurement issues (mic placement, cable problems, etc.).",
level_spread, LEVEL_DIFFERENCE_WARNING_THRESHOLD
);
}
let alignment_results = super::spectral_align::compute_spectral_alignment(
&spectral_curves,
sample_rate,
min_freq,
max_freq,
);
super::spectral_align::log_spectral_alignment(&alignment_results);
for (channel_name, result) in &alignment_results {
let shelf_filters =
super::spectral_align::create_alignment_filters(result, sample_rate);
let (apply_shelves, apply_gain) = if channel_results.contains_key(channel_name) {
let (score_min, score_max) = final_score_band_for_channel(config, channel_name);
let shelves_ok = should_apply_spectral_shelves(
&curves,
channel_name,
&shelf_filters,
sample_rate,
score_min,
score_max,
);
let gain_ok = result.flat_gain_db.abs() >= super::spectral_align::MIN_CORRECTION_DB;
(shelves_ok, gain_ok)
} else {
(false, false)
};
if !apply_shelves && !apply_gain {
continue;
}
if let Some(chain) = channel_chains.get_mut(channel_name) {
let (eq_plugin, gain_plugin) =
super::spectral_align::create_alignment_plugins(result, sample_rate);
if apply_shelves && let Some(eq) = eq_plugin {
chain.plugins.push(eq);
}
if apply_gain && let Some(gain) = gain_plugin {
chain.plugins.push(gain);
}
}
if apply_shelves {
sync_reported_biquad_adjustment(
channel_name,
&mut channel_results,
&mut channel_chains,
&shelf_filters,
sample_rate,
);
}
if apply_gain {
sync_reported_gain_adjustment(
channel_name,
&mut channel_results,
&mut channel_chains,
result.flat_gain_db,
false,
);
}
}
curves = collect_current_final_curves(&channel_results);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::SpectralAlignment,
"Spectral channel alignment complete",
),
)?;
} else {
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(
PipelineStepId::SpectralAlignment,
"Spectral channel alignment not needed",
),
)?;
}
if let Some(vog_config) = &config.optimizer.vog
&& vog_config.enabled
{
send_progress(
&observer_shared,
PipelineStepId::VoiceOfGodAlignment,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Voice of God".to_string(),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.93,
message: Some(format!(
"Voice of God alignment (ref: '{}')...",
vog_config.reference_channel
)),
epa_preference: None,
},
)?;
info!(
"Running Voice of God alignment (reference: '{}')...",
vog_config.reference_channel
);
let corrected_curves: HashMap<String, Curve> = channel_results
.iter()
.map(|(name, result)| (name.clone(), result.final_curve.clone()))
.collect();
match super::voice_of_god::compute_voice_of_god(
&corrected_curves,
&vog_config.reference_channel,
sample_rate,
config.optimizer.min_freq,
config.optimizer.max_freq,
) {
Ok(vog_results) => {
for (channel_name, vog_result) in &vog_results {
let plugins = super::voice_of_god::create_vog_plugins(vog_result, sample_rate);
if !plugins.is_empty()
&& let Some(chain) = channel_chains.get_mut(channel_name)
{
for plugin in plugins {
chain.plugins.push(plugin);
}
}
if let Some(alignment) = &vog_result.alignment {
let shelf_filters =
super::spectral_align::create_alignment_filters(alignment, sample_rate);
sync_reported_biquad_adjustment(
channel_name,
&mut channel_results,
&mut channel_chains,
&shelf_filters,
sample_rate,
);
if alignment.flat_gain_db.abs() >= super::spectral_align::MIN_CORRECTION_DB
{
sync_reported_gain_adjustment(
channel_name,
&mut channel_results,
&mut channel_chains,
alignment.flat_gain_db,
false,
);
}
}
}
curves = collect_current_final_curves(&channel_results);
}
Err(e) => {
warn!("Voice of God optimization failed: {}", e);
}
}
}
if config.optimizer.vog.as_ref().is_some_and(|v| v.enabled) {
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::VoiceOfGodAlignment,
"Voice of God alignment complete",
),
)?;
} else {
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(
PipelineStepId::VoiceOfGodAlignment,
"Voice of God alignment not enabled",
),
)?;
}
let mut phase_alignment_results: HashMap<String, (f64, bool, String)> = HashMap::new();
if config.optimizer.allow_delay()
&& let Some(phase_config) = &config.optimizer.phase_alignment
&& phase_config.enabled
{
let pairings = find_sub_main_pairings(config, &curves);
if pairings.is_empty() {
warn!("Phase alignment enabled but no valid sub-main pairings found.");
} else {
info!("Running phase alignment optimization...");
send_progress(
&observer_shared,
PipelineStepId::PhaseAlignment,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: String::new(),
speaker_index: 0,
total_speakers: pairings.len(),
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.0,
message: Some("Running phase alignment...".to_string()),
epa_preference: None,
},
)?;
for (sub_name, main_name) in &pairings {
let sub_curve = match curves.get(sub_name) {
Some(c) => c,
None => {
warn!(
"Subwoofer channel '{}' not found for phase alignment",
sub_name
);
continue;
}
};
if let Some(speaker_curve) = curves.get(main_name) {
if sub_curve.phase.is_some() && speaker_curve.phase.is_some() {
match phase_alignment::optimize_phase_alignment(
sub_curve,
speaker_curve,
phase_config,
) {
Ok(result) => {
info!(
" Phase alignment '{}' with '{}': delay={:.2}ms, invert={}, improvement={:.2}dB",
main_name,
sub_name,
result.delay_ms,
result.invert_polarity,
result.improvement_db
);
phase_alignment_results.insert(
main_name.clone(),
(result.delay_ms, result.invert_polarity, sub_name.clone()),
);
}
Err(e) => {
warn!(" Phase alignment failed for '{}': {}", main_name, e);
}
}
} else {
debug!(
" Skipping phase alignment for '{}': no phase data available",
main_name
);
}
}
}
}
}
for (speaker_name, (delay_ms, invert, sub_name)) in &phase_alignment_results {
if *invert {
let applied = if let Some(chain) = channel_chains.get_mut(speaker_name) {
let invert_plugin = output::create_gain_plugin_with_invert(0.0, true);
chain.plugins.insert(0, invert_plugin);
true
} else {
false
};
if applied {
sync_reported_phase_adjustment(
speaker_name,
&mut channel_results,
&mut channel_chains,
0.0,
true,
);
info!(" Applied polarity inversion to '{}'", speaker_name);
}
}
debug!(
" Phase alignment constraint: delay('{}') - delay('{}') = {:.3} ms",
speaker_name, sub_name, delay_ms
);
}
apply_phase_alignment_delay_schedule(
&phase_alignment_results,
&mut channel_results,
&mut channel_chains,
);
if !phase_alignment_results.is_empty() {
curves = collect_current_final_curves(&channel_results);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::PhaseAlignment, "Phase alignment complete"),
)?;
} else {
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(
PipelineStepId::PhaseAlignment,
"Phase alignment not applied",
),
)?;
}
if config.optimizer.phase_correction.is_some() {
send_progress(
&observer_shared,
PipelineStepId::PhaseCorrection,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Phase correction".to_string(),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.96,
message: Some("Phase correction...".to_string()),
epa_preference: None,
},
)?;
}
if let Some(ref pc_config) = config.optimizer.phase_correction {
let names: Vec<String> = channel_results.keys().cloned().collect();
for name in &names {
if let Some(ch) = channel_results.get_mut(name.as_str())
&& let Some(chain) = channel_chains.get_mut(name.as_str())
{
apply_phase_correction(name, ch, chain, pc_config, sample_rate, output_dir);
}
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::PhaseCorrection, "Phase correction complete"),
)?;
} else {
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(
PipelineStepId::PhaseCorrection,
"Phase correction not enabled",
),
)?;
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(
PipelineStepId::GroupDelayOptimization,
"Running GD optimization",
),
)?;
let group_delay_summary = try_run_gd_opt(
config,
&mut channel_results,
&mut channel_chains,
sample_rate,
);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::GroupDelayOptimization,
"GD optimization complete",
),
)?;
send_progress(
&observer_shared,
PipelineStepId::ImpulseResponseComputation,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "IR computation".to_string(),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.97,
message: Some("Computing impulse responses...".to_string()),
epa_preference: None,
},
)?;
for (channel_name, result) in &channel_results {
let delay_ms = channel_chains
.get(channel_name)
.map(total_chain_delay_ms)
.unwrap_or(0.0);
if let Some((pre_ir, post_ir)) = super::ir_waveform::compute_channel_ir_waveforms(
&result.initial_curve,
&result.biquads,
result.fir_coeffs.as_deref(),
delay_ms,
sample_rate,
) && let Some(chain) = channel_chains.get_mut(channel_name)
{
chain.pre_ir = Some(pre_ir);
chain.post_ir = Some(post_ir);
}
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::ImpulseResponseComputation,
"Impulse responses computed",
),
)?;
let avg_pre_score = if !pre_scores.is_empty() {
pre_scores.iter().sum::<f64>() / pre_scores.len() as f64
} else {
0.0
};
let avg_post_score = if !post_scores.is_empty() {
post_scores.iter().sum::<f64>() / post_scores.len() as f64
} else {
0.0
};
info!(
"Average pre-score: {:.4}, post-score: {:.4}",
avg_pre_score, avg_post_score
);
let acoustic_groups = identify_acoustic_groups(config);
for (group_name, group_channels) in &acoustic_groups {
if group_channels.len() > 1 {
debug!("Acoustic Group '{}': {:?}", group_name, group_channels);
check_group_consistency(group_name, group_channels, &channel_means, &curves);
}
}
let epa_cfg = config.optimizer.epa_config.clone().unwrap_or_default();
let epa_per_channel = crate::roomeq::output::compute_epa_per_channel(&channel_chains, &epa_cfg);
let metadata = OptimizationMetadata {
pre_score: avg_pre_score,
post_score: avg_post_score,
algorithm: config.optimizer.algorithm.clone(),
loss_type: Some(config.optimizer.loss_type.clone()),
iterations: config.optimizer.max_iter,
timestamp: chrono::Utc::now().to_rfc3339(),
inter_channel_deviation: None,
epa_per_channel,
group_delay: group_delay_summary,
perceptual_metrics: None,
home_cinema_layout: None,
multi_seat_coverage: None,
multi_seat_correction: None,
bass_management: None,
timing_diagnostics: build_timing_diagnostics(config, &channel_arrivals, &channel_chains),
};
let mut result = RoomOptimizationResult {
channels: channel_chains,
channel_results,
combined_pre_score: avg_pre_score,
combined_post_score: avg_post_score,
metadata,
};
if curves.len() > 1 {
send_progress(
&observer_shared,
PipelineStepId::ChannelMatching,
PipelineStepStatus::Started,
&RoomOptimizationProgress {
current_speaker: "Channel matching".to_string(),
speaker_index: 0,
total_speakers,
iteration: 0,
max_iterations: 0,
loss: 0.0,
overall_progress: 0.98,
message: Some("Channel matching analysis...".to_string()),
epa_preference: None,
},
)?;
compute_and_correct_icd(&mut result, config, sample_rate);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::ChannelMatching, "Channel matching complete"),
)?;
} else {
emit_pipeline_event(
&observer_shared,
PipelineEvent::skipped(
PipelineStepId::ChannelMatching,
"Channel matching not needed",
),
)?;
}
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(PipelineStepId::MetadataRefresh, "Refreshing reports"),
)?;
refresh_final_reports(&mut result, config, sample_rate);
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(PipelineStepId::MetadataRefresh, "Reports refreshed"),
)?;
emit_pipeline_event(
&observer_shared,
PipelineEvent::started(
PipelineStepId::SanityCheck,
"Checking final optimization result",
),
)?;
sanity_check_result(&result)?;
emit_pipeline_event(
&observer_shared,
PipelineEvent::completed(
PipelineStepId::SanityCheck,
"Final optimization result checked",
),
)?;
Ok(result)
}
fn identify_acoustic_groups(config: &RoomConfig) -> HashMap<String, Vec<String>> {
let mut groups: HashMap<String, Vec<String>> = HashMap::new();
let mut positioned_channels: HashMap<String, String> = HashMap::new();
for (channel_name, speaker_cfg) in &config.speakers {
if let Some(speaker_name) = speaker_cfg.speaker_name() {
groups
.entry(speaker_name.to_string())
.or_default()
.push(channel_name.clone());
} else {
positioned_channels.insert(channel_name.clone(), channel_name.clone());
}
}
let pairs = [
("L", "R"),
("SL", "SR"),
("SBL", "SBR"),
("TFL", "TFR"),
("TRL", "TRR"),
("FWL", "FWR"),
];
for (p1, p2) in pairs {
if positioned_channels.contains_key(p1) && positioned_channels.contains_key(p2) {
let group_name = format!("{}-{}", p1, p2);
let mut group = Vec::new();
if let Some(c1) = positioned_channels.remove(p1) {
group.push(c1);
}
if let Some(c2) = positioned_channels.remove(p2) {
group.push(c2);
}
groups.insert(group_name, group);
}
}
groups
}
pub fn optimize_speaker(
channel_name: &str,
speaker_config: &SpeakerConfig,
optimizer_config: &OptimizerConfig,
target_curve: Option<&TargetCurveConfig>,
sample_rate: f64,
_callback: Option<SpeakerOptimizationCallback>,
) -> Result<SpeakerOptimizationResult> {
let optimizer_config = optimizer_config.clone();
let room_config = RoomConfig {
version: super::types::default_config_version(),
system: None,
speakers: HashMap::new(),
crossovers: None,
target_curve: target_curve.cloned(),
optimizer: optimizer_config,
recording_config: None,
cea2034_cache: None,
};
let (
chain,
pre_score,
post_score,
initial_curve,
final_curve,
biquads,
_mean_spl,
_arrival_time_ms,
fir_coeffs,
) = process_speaker_internal(
channel_name,
speaker_config,
&room_config,
sample_rate,
None,
None,
None, None, )?;
Ok(SpeakerOptimizationResult {
chain,
pre_score,
post_score,
initial_curve,
final_curve,
biquads,
fir_coeffs,
})
}
fn process_speaker_internal(
channel_name: &str,
speaker_config: &SpeakerConfig,
room_config: &RoomConfig,
sample_rate: f64,
output_dir: Option<&Path>,
callback: Option<crate::optim::OptimProgressCallback>,
shared_mean_spl: Option<f64>,
probe_arrival_overrides: Option<&HashMap<String, f64>>,
) -> Result<MixedModeResult> {
let output_dir = output_dir.unwrap_or(Path::new("."));
match speaker_config {
SpeakerConfig::Single(source) => {
let probe_arrival_ms =
probe_arrival_overrides.and_then(|m| m.get(channel_name).copied());
process_single_speaker(
channel_name,
source,
room_config,
sample_rate,
output_dir,
callback,
probe_arrival_ms,
shared_mean_spl,
)
}
SpeakerConfig::Group(group) => {
process_speaker_group(channel_name, group, room_config, sample_rate, output_dir)
}
SpeakerConfig::MultiSub(group) => {
process_multisub_group(channel_name, group, room_config, sample_rate, output_dir)
}
SpeakerConfig::Dba(config) => {
process_dba(channel_name, config, room_config, sample_rate, output_dir)
}
SpeakerConfig::Cardioid(config) => {
process_cardioid(channel_name, config, room_config, sample_rate, output_dir)
}
}
}
pub(super) fn extract_wav_path(source: &MeasurementSource) -> Option<String> {
match source {
MeasurementSource::Single(s) => {
if let crate::MeasurementRef::Inline(inline) = &s.measurement {
inline.wav_path.clone()
} else {
None
}
}
MeasurementSource::Multiple(m) => {
m.measurements.first().and_then(|r| {
if let crate::MeasurementRef::Inline(inline) = r {
inline.wav_path.clone()
} else {
None
}
})
}
MeasurementSource::InMemory(_) | MeasurementSource::InMemoryMultiple(_) => None,
}
}
#[cfg(test)]
mod tests;