1use std::any::Any;
2use std::collections::HashMap;
3use std::f32::consts::PI;
4use std::sync::{Mutex, OnceLock};
5
6use float_eq::float_eq;
7use hrtf::{HrirSphere, HrtfContext, HrtfProcessor, Vec3};
8
9use crate::context::{AudioContextRegistration, AudioParamId, BaseAudioContext};
10use crate::param::{AudioParam, AudioParamDescriptor};
11use crate::render::{
12    AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
13};
14use crate::RENDER_QUANTUM_SIZE;
15
16use super::{AudioNode, AudioNodeOptions, ChannelConfig, ChannelCountMode, ChannelInterpretation};
17
18#[track_caller]
25#[inline(always)]
26#[allow(clippy::manual_range_contains)]
27pub(crate) fn assert_valid_cone_outer_gain(value: f64) {
28    assert!(
29        value >= 0. && value <= 1.,
30        "InvalidStateError - coneOuterGain must be in the range [0, 1]"
31    );
32}
33
34pub(crate) fn load_hrtf_processor(sample_rate: u32) -> (HrtfProcessor, usize) {
40    static INSTANCE: OnceLock<Mutex<HashMap<u32, (HrtfProcessor, usize)>>> = OnceLock::new();
41    let cache = INSTANCE.get_or_init(|| Mutex::new(HashMap::new()));
42
43    let sample_rate = sample_rate.max(27_000);
47
48    {
50        if let Some(value) = cache.lock().unwrap().get(&sample_rate) {
51            return value.clone();
52        }
53    }
54
55    let resource = include_bytes!("../../resources/IRC_1003_C.bin");
57    let hrir_sphere = HrirSphere::new(&resource[..], sample_rate).unwrap();
58    let len = hrir_sphere.len();
59
60    let interpolation_steps = 1; let samples_per_step = RENDER_QUANTUM_SIZE / interpolation_steps;
62    let processor = HrtfProcessor::new(hrir_sphere, interpolation_steps, samples_per_step);
63
64    let value = (processor, len);
65    cache.lock().unwrap().insert(sample_rate, value.clone());
66
67    value
68}
69
70#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
72pub enum PanningModelType {
73    #[default]
74    EqualPower,
75    HRTF,
76}
77
78impl From<u8> for PanningModelType {
79    fn from(i: u8) -> Self {
80        match i {
81            0 => PanningModelType::EqualPower,
82            1 => PanningModelType::HRTF,
83            _ => unreachable!(),
84        }
85    }
86}
87
88#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
90pub enum DistanceModelType {
91    Linear,
92    #[default]
93    Inverse,
94    Exponential,
95}
96
97impl From<u8> for DistanceModelType {
98    fn from(i: u8) -> Self {
99        match i {
100            0 => DistanceModelType::Linear,
101            1 => DistanceModelType::Inverse,
102            2 => DistanceModelType::Exponential,
103            _ => unreachable!(),
104        }
105    }
106}
107
108#[derive(Clone, Debug)]
126pub struct PannerOptions {
127    pub panning_model: PanningModelType,
128    pub distance_model: DistanceModelType,
129    pub position_x: f32,
130    pub position_y: f32,
131    pub position_z: f32,
132    pub orientation_x: f32,
133    pub orientation_y: f32,
134    pub orientation_z: f32,
135    pub ref_distance: f64,
136    pub max_distance: f64,
137    pub rolloff_factor: f64,
138    pub cone_inner_angle: f64,
139    pub cone_outer_angle: f64,
140    pub cone_outer_gain: f64,
141    pub audio_node_options: AudioNodeOptions,
142}
143
144impl Default for PannerOptions {
145    fn default() -> Self {
146        PannerOptions {
147            panning_model: PanningModelType::default(),
148            distance_model: DistanceModelType::default(),
149            position_x: 0.,
150            position_y: 0.,
151            position_z: 0.,
152            orientation_x: 1.,
153            orientation_y: 0.,
154            orientation_z: 0.,
155            ref_distance: 1.,
156            max_distance: 10000.,
157            rolloff_factor: 1.,
158            cone_inner_angle: 360.,
159            cone_outer_angle: 360.,
160            cone_outer_gain: 0.,
161            audio_node_options: AudioNodeOptions {
162                channel_count: 2,
163                channel_count_mode: ChannelCountMode::ClampedMax,
164                channel_interpretation: ChannelInterpretation::Speakers,
165            },
166        }
167    }
168}
169
170enum ControlMessage {
171    DistanceModel(DistanceModelType),
172    PanningModel(Box<Option<HrtfState>>),
174    RefDistance(f64),
175    MaxDistance(f64),
176    RollOffFactor(f64),
177    ConeInnerAngle(f64),
178    ConeOuterAngle(f64),
179    ConeOuterGain(f64),
180}
181
182#[track_caller]
190#[inline(always)]
191fn assert_valid_channel_count(count: usize) {
192    assert!(
193        count <= 2,
194        "NotSupportedError - PannerNode channel count cannot be greater than two"
195    );
196}
197
198#[track_caller]
206#[inline(always)]
207fn assert_valid_channel_count_mode(mode: ChannelCountMode) {
208    assert_ne!(
209        mode,
210        ChannelCountMode::Max,
211        "NotSupportedError - PannerNode channel count mode cannot be set to max"
212    );
213}
214
215struct HrtfState {
217    len: usize,
218    processor: HrtfProcessor,
219    output_interleaved: Vec<(f32, f32)>,
220    prev_sample_vector: Vec3,
221    prev_left_samples: Vec<f32>,
222    prev_right_samples: Vec<f32>,
223    prev_distance_gain: f32,
224}
225
226impl HrtfState {
227    fn new(processor: HrtfProcessor, len: usize) -> Self {
228        Self {
229            len,
230            processor,
231            output_interleaved: vec![(0., 0.); RENDER_QUANTUM_SIZE],
232            prev_sample_vector: Vec3::new(0., 0., 1.),
233            prev_left_samples: vec![],  prev_right_samples: vec![], prev_distance_gain: 0.,
236        }
237    }
238
239    fn process(
240        &mut self,
241        source: &[f32],
242        new_distance_gain: f32,
243        projected_source: [f32; 3],
244    ) -> &[(f32, f32)] {
245        self.output_interleaved.fill((0., 0.));
247
248        let new_sample_vector = Vec3 {
249            x: projected_source[0],
250            z: projected_source[1],
251            y: projected_source[2],
252        };
253
254        let context = HrtfContext {
255            source,
256            output: &mut self.output_interleaved,
257            new_sample_vector,
258            prev_sample_vector: self.prev_sample_vector,
259            prev_left_samples: &mut self.prev_left_samples,
260            prev_right_samples: &mut self.prev_right_samples,
261            new_distance_gain,
262            prev_distance_gain: self.prev_distance_gain,
263        };
264
265        self.processor.process_samples(context);
266
267        self.prev_sample_vector = new_sample_vector;
268        self.prev_distance_gain = new_distance_gain;
269
270        &self.output_interleaved
271    }
272
273    fn tail_time_samples(&self) -> usize {
274        self.len
275    }
276}
277
278#[derive(Debug)]
325pub struct PannerNode {
326    registration: AudioContextRegistration,
327    channel_config: ChannelConfig,
328    position_x: AudioParam,
329    position_y: AudioParam,
330    position_z: AudioParam,
331    orientation_x: AudioParam,
332    orientation_y: AudioParam,
333    orientation_z: AudioParam,
334    cone_inner_angle: f64,
335    cone_outer_angle: f64,
336    cone_outer_gain: f64,
337    distance_model: DistanceModelType,
338    ref_distance: f64,
339    max_distance: f64,
340    rolloff_factor: f64,
341    panning_model: PanningModelType,
342}
343
344impl AudioNode for PannerNode {
345    fn registration(&self) -> &AudioContextRegistration {
346        &self.registration
347    }
348
349    fn channel_config(&self) -> &ChannelConfig {
350        &self.channel_config
351    }
352
353    fn number_of_inputs(&self) -> usize {
354        1
355    }
356
357    fn number_of_outputs(&self) -> usize {
358        1
359    }
360
361    fn set_channel_count(&self, count: usize) {
364        assert_valid_channel_count(count);
365        self.channel_config.set_count(count, self.registration());
366    }
367
368    fn set_channel_count_mode(&self, mode: ChannelCountMode) {
369        assert_valid_channel_count_mode(mode);
370        self.channel_config
371            .set_count_mode(mode, self.registration());
372    }
373}
374
375impl PannerNode {
376    #[allow(clippy::missing_panics_doc)]
392    pub fn new<C: BaseAudioContext>(context: &C, options: PannerOptions) -> Self {
393        let mut node = context.base().register(|registration| {
394            use crate::spatial::PARAM_OPTS;
395
396            let PannerOptions {
397                position_x,
398                position_y,
399                position_z,
400                orientation_x,
401                orientation_y,
402                orientation_z,
403                distance_model,
404                ref_distance,
405                max_distance,
406                rolloff_factor,
407                cone_inner_angle,
408                cone_outer_angle,
409                cone_outer_gain,
410                audio_node_options: channel_config,
411                panning_model,
412            } = options;
413
414            assert!(
415                ref_distance >= 0.,
416                "RangeError - refDistance cannot be negative"
417            );
418            assert!(
419                max_distance > 0.,
420                "RangeError - maxDistance must be strictly positive"
421            );
422            assert!(
423                rolloff_factor >= 0.,
424                "RangeError - rolloffFactor cannot be negative"
425            );
426            assert_valid_cone_outer_gain(cone_outer_gain);
427            assert_valid_channel_count(channel_config.channel_count);
428            assert_valid_channel_count_mode(channel_config.channel_count_mode);
429
430            let (param_px, render_px) = context.create_audio_param(PARAM_OPTS, ®istration);
432            let (param_py, render_py) = context.create_audio_param(PARAM_OPTS, ®istration);
433            let (param_pz, render_pz) = context.create_audio_param(PARAM_OPTS, ®istration);
434            param_px.set_value(position_x);
435            param_py.set_value(position_y);
436            param_pz.set_value(position_z);
437
438            let orientation_x_opts = AudioParamDescriptor {
440                default_value: 1.0,
441                ..PARAM_OPTS
442            };
443            let (param_ox, render_ox) =
444                context.create_audio_param(orientation_x_opts, ®istration);
445            let (param_oy, render_oy) = context.create_audio_param(PARAM_OPTS, ®istration);
446            let (param_oz, render_oz) = context.create_audio_param(PARAM_OPTS, ®istration);
447            param_ox.set_value(orientation_x);
448            param_oy.set_value(orientation_y);
449            param_oz.set_value(orientation_z);
450
451            let render = PannerRenderer {
452                position_x: render_px,
453                position_y: render_py,
454                position_z: render_pz,
455                orientation_x: render_ox,
456                orientation_y: render_oy,
457                orientation_z: render_oz,
458                distance_model,
459                ref_distance,
460                max_distance,
461                rolloff_factor,
462                cone_inner_angle,
463                cone_outer_angle,
464                cone_outer_gain,
465                hrtf_state: None,
466                tail_time_counter: 0,
467            };
468
469            let node = PannerNode {
470                registration,
471                channel_config: channel_config.into(),
472                position_x: param_px,
473                position_y: param_py,
474                position_z: param_pz,
475                orientation_x: param_ox,
476                orientation_y: param_oy,
477                orientation_z: param_oz,
478                distance_model,
479                ref_distance,
480                max_distance,
481                rolloff_factor,
482                cone_inner_angle,
483                cone_outer_angle,
484                cone_outer_gain,
485                panning_model,
486            };
487
488            context.base().ensure_audio_listener_present();
490
491            (node, Box::new(render))
492        });
493
494        context
496            .base()
497            .connect_listener_to_panner(node.registration().id());
498
499        node.set_panning_model(options.panning_model);
501
502        node
503    }
504
505    pub fn position_x(&self) -> &AudioParam {
506        &self.position_x
507    }
508
509    pub fn position_y(&self) -> &AudioParam {
510        &self.position_y
511    }
512
513    pub fn position_z(&self) -> &AudioParam {
514        &self.position_z
515    }
516
517    pub fn set_position(&self, x: f32, y: f32, z: f32) {
518        self.position_x.set_value(x);
519        self.position_y.set_value(y);
520        self.position_z.set_value(z);
521    }
522
523    pub fn orientation_x(&self) -> &AudioParam {
524        &self.orientation_x
525    }
526
527    pub fn orientation_y(&self) -> &AudioParam {
528        &self.orientation_y
529    }
530
531    pub fn orientation_z(&self) -> &AudioParam {
532        &self.orientation_z
533    }
534
535    pub fn set_orientation(&self, x: f32, y: f32, z: f32) {
536        self.orientation_x.set_value(x);
537        self.orientation_y.set_value(y);
538        self.orientation_z.set_value(z);
539    }
540
541    pub fn distance_model(&self) -> DistanceModelType {
542        self.distance_model
543    }
544
545    pub fn set_distance_model(&mut self, value: DistanceModelType) {
546        self.distance_model = value;
547        self.registration
548            .post_message(ControlMessage::DistanceModel(value));
549    }
550
551    pub fn ref_distance(&self) -> f64 {
552        self.ref_distance
553    }
554
555    pub fn set_ref_distance(&mut self, value: f64) {
561        assert!(value >= 0., "RangeError - refDistance cannot be negative");
562        self.ref_distance = value;
563        self.registration
564            .post_message(ControlMessage::RefDistance(value));
565    }
566
567    pub fn max_distance(&self) -> f64 {
568        self.max_distance
569    }
570
571    pub fn set_max_distance(&mut self, value: f64) {
577        assert!(
578            value > 0.,
579            "RangeError - maxDistance must be strictly positive"
580        );
581        self.max_distance = value;
582        self.registration
583            .post_message(ControlMessage::MaxDistance(value));
584    }
585
586    pub fn rolloff_factor(&self) -> f64 {
587        self.rolloff_factor
588    }
589
590    pub fn set_rolloff_factor(&mut self, value: f64) {
596        assert!(value >= 0., "RangeError - rolloffFactor cannot be negative");
597        self.rolloff_factor = value;
598        self.registration
599            .post_message(ControlMessage::RollOffFactor(value));
600    }
601
602    pub fn cone_inner_angle(&self) -> f64 {
603        self.cone_inner_angle
604    }
605
606    pub fn set_cone_inner_angle(&mut self, value: f64) {
607        self.cone_inner_angle = value;
608        self.registration
609            .post_message(ControlMessage::ConeInnerAngle(value));
610    }
611
612    pub fn cone_outer_angle(&self) -> f64 {
613        self.cone_outer_angle
614    }
615
616    pub fn set_cone_outer_angle(&mut self, value: f64) {
617        self.cone_outer_angle = value;
618        self.registration
619            .post_message(ControlMessage::ConeOuterAngle(value));
620    }
621
622    pub fn cone_outer_gain(&self) -> f64 {
623        self.cone_outer_gain
624    }
625
626    pub fn set_cone_outer_gain(&mut self, value: f64) {
632        assert_valid_cone_outer_gain(value);
633        self.cone_outer_gain = value;
634        self.registration
635            .post_message(ControlMessage::ConeOuterGain(value));
636    }
637
638    pub fn panning_model(&self) -> PanningModelType {
639        self.panning_model
640    }
641
642    #[allow(clippy::missing_panics_doc)] pub fn set_panning_model(&mut self, value: PanningModelType) {
644        let hrtf_option = match value {
645            PanningModelType::EqualPower => None,
646            PanningModelType::HRTF => {
647                let sample_rate = self.context().sample_rate() as u32;
648                let (processor, len) = load_hrtf_processor(sample_rate);
649                Some(HrtfState::new(processor, len))
650            }
651        };
652
653        self.panning_model = value;
654        self.registration
655            .post_message(ControlMessage::PanningModel(Box::new(hrtf_option)));
656    }
657}
658
659#[derive(Copy, Clone)]
660struct SpatialParams {
661    dist_gain: f32,
662    cone_gain: f32,
663    azimuth: f32,
664    elevation: f32,
665}
666
667struct PannerRenderer {
668    position_x: AudioParamId,
669    position_y: AudioParamId,
670    position_z: AudioParamId,
671    orientation_x: AudioParamId,
672    orientation_y: AudioParamId,
673    orientation_z: AudioParamId,
674    distance_model: DistanceModelType,
675    ref_distance: f64,
676    max_distance: f64,
677    rolloff_factor: f64,
678    cone_inner_angle: f64,
679    cone_outer_angle: f64,
680    cone_outer_gain: f64,
681    hrtf_state: Option<HrtfState>, tail_time_counter: usize,
683}
684
685impl AudioProcessor for PannerRenderer {
686    fn process(
687        &mut self,
688        inputs: &[AudioRenderQuantum],
689        outputs: &mut [AudioRenderQuantum],
690        params: AudioParamValues<'_>,
691        _scope: &AudioWorkletGlobalScope,
692    ) -> bool {
693        let input = &inputs[0];
695        let output = &mut outputs[0];
696
697        if input.is_silent() {
699            let tail_time = match &self.hrtf_state {
702                None => false,
703                Some(hrtf_state) => hrtf_state.tail_time_samples() > self.tail_time_counter,
704            };
705            if !tail_time {
706                output.make_silent();
707                return false;
708            }
709
710            self.tail_time_counter += RENDER_QUANTUM_SIZE;
711        }
712
713        let mut hrtf_state = self.hrtf_state.take();
715
716        let source_position_x = params.get(&self.position_x);
718        let source_position_y = params.get(&self.position_y);
719        let source_position_z = params.get(&self.position_z);
720        let source_orientation_x = params.get(&self.orientation_x);
721        let source_orientation_y = params.get(&self.orientation_y);
722        let source_orientation_z = params.get(&self.orientation_z);
723
724        let [listener_position_x, listener_position_y, listener_position_z, listener_forward_x, listener_forward_y, listener_forward_z, listener_up_x, listener_up_y, listener_up_z] =
726            params.listener_params();
727
728        let mut a_rate_params = source_position_x
730            .iter()
731            .cycle()
732            .zip(source_position_y.iter().cycle())
733            .zip(source_position_z.iter().cycle())
734            .zip(source_orientation_x.iter().cycle())
735            .zip(source_orientation_y.iter().cycle())
736            .zip(source_orientation_z.iter().cycle())
737            .zip(listener_position_x.iter().cycle())
738            .zip(listener_position_y.iter().cycle())
739            .zip(listener_position_z.iter().cycle())
740            .zip(listener_forward_x.iter().cycle())
741            .zip(listener_forward_y.iter().cycle())
742            .zip(listener_forward_z.iter().cycle())
743            .zip(listener_up_x.iter().cycle())
744            .zip(listener_up_y.iter().cycle())
745            .zip(listener_up_z.iter().cycle())
746            .map(|tuple| {
747                let ((((((sp_so_lp, lfx), lfy), lfz), lux), luy), luz) = tuple;
749                let (((sp_so, lpx), lpy), lpz) = sp_so_lp;
750                let (((sp, sox), soy), soz) = sp_so;
751                let ((spx, spy), spz) = sp;
752
753                let source_position = [*spx, *spy, *spz];
755                let source_orientation = [*sox, *soy, *soz];
756                let listener_position = [*lpx, *lpy, *lpz];
757                let listener_forward = [*lfx, *lfy, *lfz];
758                let listener_up = [*lux, *luy, *luz];
759
760                let dist_gain = self.dist_gain(source_position, listener_position);
762                let cone_gain =
763                    self.cone_gain(source_position, source_orientation, listener_position);
764
765                let (azimuth, elevation) = crate::spatial::azimuth_and_elevation(
767                    source_position,
768                    listener_position,
769                    listener_forward,
770                    listener_up,
771                );
772
773                SpatialParams {
774                    dist_gain,
775                    cone_gain,
776                    azimuth,
777                    elevation,
778                }
779            });
780
781        if let Some(hrtf_state) = &mut hrtf_state {
782            let SpatialParams {
784                dist_gain,
785                cone_gain,
786                azimuth,
787                elevation,
788            } = a_rate_params.next().unwrap();
789
790            let new_distance_gain = cone_gain * dist_gain;
791
792            let az_rad = azimuth * PI / 180.;
794            let el_rad = elevation * PI / 180.;
795            let x = az_rad.sin() * el_rad.cos();
796            let z = az_rad.cos() * el_rad.cos();
797            let y = el_rad.sin();
798            let mut projected_source = [x, y, z];
799
800            if float_eq!(&projected_source[..], &[0.; 3][..], abs_all <= 1E-6) {
801                projected_source = [0., 0., 1.];
802            }
803
804            *output = input.clone();
810            let mut overall_gain_correction = 1.;
811            if output.number_of_channels() == 2 {
812                overall_gain_correction *= 2.; output.mix(1, ChannelInterpretation::Speakers);
814            }
815
816            let output_interleaved =
817                hrtf_state.process(output.channel_data(0), new_distance_gain, projected_source);
818
819            output.set_number_of_channels(2);
820            let [left, right] = output.stereo_mut();
821
822            output_interleaved
823                .iter()
824                .zip(&mut left[..])
825                .zip(&mut right[..])
826                .for_each(|((p, l), r)| {
827                    *l = overall_gain_correction * p.0;
828                    *r = overall_gain_correction * p.1;
829                });
830        } else {
831            let single_valued = listener_position_x.len() == 1
835                && listener_position_y.len() == 1
836                && listener_position_z.len() == 1
837                && listener_forward_x.len() == 1
838                && listener_forward_y.len() == 1
839                && listener_forward_z.len() == 1
840                && listener_up_x.len() == 1
841                && listener_up_y.len() == 1
842                && listener_up_z.len() == 1;
843
844            if single_valued {
845                let param_value = a_rate_params.next().unwrap();
846                match input.number_of_channels() {
847                    1 => {
848                        *output = input.clone();
849                        output.mix(2, ChannelInterpretation::Speakers);
850                        let [left, right] = output.stereo_mut();
851                        left.iter_mut()
852                            .zip(&mut right[..])
853                            .for_each(|(l, r)| apply_mono_to_stereo_gain(param_value, l, r));
854                    }
855                    2 => {
856                        output.set_number_of_channels(2);
857                        let [left, right] = output.stereo_mut();
858                        input
859                            .channel_data(0)
860                            .iter()
861                            .copied()
862                            .zip(input.channel_data(1).iter().copied())
863                            .zip(&mut left[..])
864                            .zip(&mut right[..])
865                            .for_each(|(((il, ir), ol), or)| {
866                                apply_stereo_to_stereo_gain(param_value, il, ir, ol, or)
867                            });
868                    }
869                    _ => unreachable!(),
870                }
871            } else {
872                match input.number_of_channels() {
873                    1 => {
874                        *output = input.clone();
875                        output.mix(2, ChannelInterpretation::Speakers);
876                        let [left, right] = output.stereo_mut();
877                        a_rate_params
878                            .zip(&mut left[..])
879                            .zip(&mut right[..])
880                            .for_each(|((p, l), r)| apply_mono_to_stereo_gain(p, l, r));
881                    }
882                    2 => {
883                        output.set_number_of_channels(2);
884                        let [left, right] = output.stereo_mut();
885                        a_rate_params
886                            .zip(input.channel_data(0).iter().copied())
887                            .zip(input.channel_data(1).iter().copied())
888                            .zip(&mut left[..])
889                            .zip(&mut right[..])
890                            .for_each(|((((p, il), ir), ol), or)| {
891                                apply_stereo_to_stereo_gain(p, il, ir, ol, or)
892                            });
893                    }
894                    _ => unreachable!(),
895                }
896            }
897        }
898
899        self.hrtf_state = hrtf_state;
901
902        self.hrtf_state.is_some()
904    }
905
906    fn onmessage(&mut self, msg: &mut dyn Any) {
907        if let Some(control) = msg.downcast_mut::<ControlMessage>() {
908            match control {
909                ControlMessage::DistanceModel(value) => self.distance_model = *value,
910                ControlMessage::RefDistance(value) => self.ref_distance = *value,
911                ControlMessage::MaxDistance(value) => self.max_distance = *value,
912                ControlMessage::RollOffFactor(value) => self.rolloff_factor = *value,
913                ControlMessage::ConeInnerAngle(value) => self.cone_inner_angle = *value,
914                ControlMessage::ConeOuterAngle(value) => self.cone_outer_angle = *value,
915                ControlMessage::ConeOuterGain(value) => self.cone_outer_gain = *value,
916                ControlMessage::PanningModel(value) => self.hrtf_state = value.take(),
917            }
918
919            return;
920        }
921
922        log::warn!("PannerRenderer: Dropping incoming message {msg:?}");
923    }
924}
925
926impl PannerRenderer {
927    fn cone_gain(
928        &self,
929        source_position: [f32; 3],
930        source_orientation: [f32; 3],
931        listener_position: [f32; 3],
932    ) -> f32 {
933        let abs_inner_angle = self.cone_inner_angle.abs() as f32 / 2.;
934        let abs_outer_angle = self.cone_outer_angle.abs() as f32 / 2.;
935        if abs_inner_angle >= 180. && abs_outer_angle >= 180. {
936            1. } else {
938            let cone_outer_gain = self.cone_outer_gain as f32;
939
940            let abs_angle =
941                crate::spatial::angle(source_position, source_orientation, listener_position);
942
943            if abs_angle < abs_inner_angle {
944                1. } else if abs_angle >= abs_outer_angle {
946                cone_outer_gain } else {
948                let x = (abs_angle - abs_inner_angle) / (abs_outer_angle - abs_inner_angle);
950                (1. - x) + cone_outer_gain * x
951            }
952        }
953    }
954
955    fn dist_gain(&self, source_position: [f32; 3], listener_position: [f32; 3]) -> f32 {
956        let distance_model = self.distance_model;
957        let ref_distance = self.ref_distance;
958        let distance = crate::spatial::distance(source_position, listener_position) as f64;
959
960        let dist_gain = match distance_model {
961            DistanceModelType::Linear => {
962                let rolloff_factor = self.rolloff_factor.clamp(0., 1.);
963                let max_distance = self.max_distance;
964                let d2ref = ref_distance.min(max_distance);
965                let d2max = ref_distance.max(max_distance);
966                let d_clamped = distance.clamp(d2ref, d2max);
967                1. - rolloff_factor * (d_clamped - d2ref) / (d2max - d2ref)
968            }
969            DistanceModelType::Inverse => {
970                let rolloff_factor = self.rolloff_factor.max(0.);
971                if distance > 0. {
972                    ref_distance
973                        / (ref_distance
974                            + rolloff_factor * (ref_distance.max(distance) - ref_distance))
975                } else {
976                    1.
977                }
978            }
979            DistanceModelType::Exponential => {
980                let rolloff_factor = self.rolloff_factor.max(0.);
981                (distance.max(ref_distance) / ref_distance).powf(-rolloff_factor)
982            }
983        };
984        dist_gain as f32
985    }
986}
987
988fn apply_mono_to_stereo_gain(spatial_params: SpatialParams, l: &mut f32, r: &mut f32) {
989    let SpatialParams {
990        dist_gain,
991        cone_gain,
992        azimuth,
993        ..
994    } = spatial_params;
995
996    let mut azimuth = azimuth.clamp(-180., 180.);
998
999    if azimuth < -90. {
1001        azimuth = -180. - azimuth;
1002    } else if azimuth > 90. {
1003        azimuth = 180. - azimuth;
1004    }
1005
1006    let x = (azimuth + 90.) / 180.;
1008    let gain_l = (x * PI / 2.).cos();
1009    let gain_r = (x * PI / 2.).sin();
1010
1011    *l *= gain_l * dist_gain * cone_gain;
1013    *r *= gain_r * dist_gain * cone_gain;
1014}
1015
1016fn apply_stereo_to_stereo_gain(
1017    spatial_params: SpatialParams,
1018    il: f32,
1019    ir: f32,
1020    ol: &mut f32,
1021    or: &mut f32,
1022) {
1023    let SpatialParams {
1024        dist_gain,
1025        cone_gain,
1026        azimuth,
1027        ..
1028    } = spatial_params;
1029
1030    let mut azimuth = azimuth.clamp(-180., 180.);
1032
1033    if azimuth < -90. {
1035        azimuth = -180. - azimuth;
1036    } else if azimuth > 90. {
1037        azimuth = 180. - azimuth;
1038    }
1039
1040    let x = if azimuth <= 0. {
1042        (azimuth + 90.) / 90.
1043    } else {
1044        azimuth / 90.
1045    };
1046    let gain_l = (x * PI / 2.).cos();
1047    let gain_r = (x * PI / 2.).sin();
1048
1049    if azimuth <= 0. {
1051        *ol = (il + ir * gain_l) * dist_gain * cone_gain;
1052        *or = ir * gain_r * dist_gain * cone_gain;
1053    } else {
1054        *ol = il * gain_l * dist_gain * cone_gain;
1055        *or = (ir + il * gain_r) * dist_gain * cone_gain;
1056    }
1057}
1058
1059#[cfg(test)]
1060mod tests {
1061    use float_eq::{assert_float_eq, assert_float_ne};
1062
1063    use crate::context::{BaseAudioContext, OfflineAudioContext};
1064    use crate::node::{AudioBufferSourceNode, AudioBufferSourceOptions, AudioScheduledSourceNode};
1065    use crate::AudioBuffer;
1066
1067    use super::*;
1068
1069    #[test]
1070    fn test_audioparam_value_applies_immediately() {
1071        let context = OfflineAudioContext::new(1, 128, 48000.);
1072        let options = PannerOptions {
1073            position_x: 12.,
1074            ..Default::default()
1075        };
1076        let src = PannerNode::new(&context, options);
1077        assert_float_eq!(src.position_x.value(), 12., abs_all <= 0.);
1078    }
1079
1080    #[test]
1081    fn test_equal_power_mono_to_stereo() {
1082        let sample_rate = 44100.;
1083        let length = RENDER_QUANTUM_SIZE * 4;
1084        let mut context = OfflineAudioContext::new(2, length, sample_rate);
1085
1086        let input = AudioBuffer::from(vec![vec![1.; RENDER_QUANTUM_SIZE]], sample_rate);
1088        let mut src = AudioBufferSourceNode::new(&context, AudioBufferSourceOptions::default());
1089        src.set_buffer(input);
1090        src.start();
1091
1092        let options = PannerOptions {
1093            panning_model: PanningModelType::EqualPower,
1094            ..PannerOptions::default()
1095        };
1096        let panner = PannerNode::new(&context, options);
1097        assert_eq!(panner.panning_model(), PanningModelType::EqualPower);
1098        panner.set_channel_count(1);
1099        panner.position_x().set_value(1.); src.connect(&panner);
1102        panner.connect(&context.destination());
1103
1104        let output = context.start_rendering_sync();
1105        let original = vec![1.; RENDER_QUANTUM_SIZE];
1106        let zero = vec![0.; RENDER_QUANTUM_SIZE];
1107
1108        assert_float_eq!(
1110            output.get_channel_data(0)[..128],
1111            &zero[..],
1112            abs_all <= 1E-6
1113        );
1114        assert_float_eq!(
1115            output.get_channel_data(1)[..128],
1116            &original[..],
1117            abs_all <= 1E-6
1118        );
1119
1120        assert_float_eq!(
1122            output.get_channel_data(0)[128..256],
1123            &zero[..],
1124            abs_all <= 1E-6
1125        );
1126        assert_float_eq!(
1127            output.get_channel_data(1)[128..256],
1128            &zero[..],
1129            abs_all <= 1E-6
1130        );
1131    }
1132
1133    #[test]
1134    fn test_equal_power_azimuth_mono_to_stereo() {
1135        let sample_rate = 44100.;
1136        let length = RENDER_QUANTUM_SIZE;
1137        let mut context = OfflineAudioContext::new(2, length, sample_rate);
1138
1139        let input = AudioBuffer::from(vec![vec![1.; RENDER_QUANTUM_SIZE]], sample_rate);
1141        let mut src = AudioBufferSourceNode::new(&context, AudioBufferSourceOptions::default());
1142        src.set_buffer(input);
1143        src.start();
1144
1145        let options = PannerOptions {
1146            panning_model: PanningModelType::EqualPower,
1147            ..PannerOptions::default()
1148        };
1149        let panner = PannerNode::new(&context, options);
1150        assert_eq!(panner.panning_model(), PanningModelType::EqualPower);
1151        panner.position_y().set_value(1.); src.connect(&panner);
1154        panner.connect(&context.destination());
1155
1156        let output = context.start_rendering_sync();
1157        let sqrt2 = vec![(1.0f32 / 2.).sqrt(); RENDER_QUANTUM_SIZE];
1158
1159        assert_float_eq!(
1161            output.get_channel_data(0)[..128],
1162            &sqrt2[..],
1163            abs_all <= 1E-6
1164        );
1165        assert_float_eq!(
1166            output.get_channel_data(1)[..128],
1167            &sqrt2[..],
1168            abs_all <= 1E-6
1169        );
1170    }
1171
1172    #[test]
1173    fn test_equal_power_stereo_to_stereo() {
1174        let sample_rate = 44100.;
1175        let length = RENDER_QUANTUM_SIZE;
1176        let mut context = OfflineAudioContext::new(2, length, sample_rate);
1177
1178        let listener = context.listener();
1180        listener.position_x().set_value(10.);
1181        listener.position_y().set_value(0.);
1182        listener.position_z().set_value(0.);
1183        listener.forward_x().set_value(1.);
1184        listener.forward_y().set_value(0.);
1185        listener.forward_z().set_value(0.);
1186        listener.up_x().set_value(0.);
1187        listener.up_y().set_value(0.);
1188        listener.up_z().set_value(1.);
1189
1190        let input = AudioBuffer::from(
1192            vec![vec![1.; RENDER_QUANTUM_SIZE], vec![1.; RENDER_QUANTUM_SIZE]],
1193            sample_rate,
1194        );
1195        let mut src = AudioBufferSourceNode::new(&context, AudioBufferSourceOptions::default());
1196        src.set_buffer(input);
1197        src.start();
1198
1199        let panner = context.create_panner();
1201        panner.position_x().set_value(10.);
1202        panner.position_y().set_value(10.);
1203        panner.position_z().set_value(0.);
1204
1205        src.connect(&panner);
1206        panner.connect(&context.destination());
1207
1208        let output = context.start_rendering_sync();
1209
1210        assert_float_eq!(
1213            output.get_channel_data(0)[..RENDER_QUANTUM_SIZE],
1214            &[0.2; RENDER_QUANTUM_SIZE][..],
1215            abs_all <= 0.001
1216        );
1217        assert_float_eq!(
1219            output.get_channel_data(1)[..RENDER_QUANTUM_SIZE],
1220            &[0.; RENDER_QUANTUM_SIZE][..],
1221            abs_all <= 0.001
1222        );
1223    }
1224
1225    #[test]
1226    fn test_hrtf() {
1227        let sample_rate = 44100.;
1228        let length = RENDER_QUANTUM_SIZE * 4;
1229        let mut context = OfflineAudioContext::new(2, length, sample_rate);
1230
1231        let input = AudioBuffer::from(vec![vec![1.; RENDER_QUANTUM_SIZE]], sample_rate);
1233        let mut src = AudioBufferSourceNode::new(&context, AudioBufferSourceOptions::default());
1234        src.set_buffer(input);
1235        src.start();
1236
1237        let options = PannerOptions {
1238            panning_model: PanningModelType::HRTF,
1239            ..PannerOptions::default()
1240        };
1241        let panner = PannerNode::new(&context, options);
1242        assert_eq!(panner.panning_model(), PanningModelType::HRTF);
1243        panner.position_x().set_value(1.); src.connect(&panner);
1246        panner.connect(&context.destination());
1247
1248        let output = context.start_rendering_sync();
1249        let original = vec![1.; RENDER_QUANTUM_SIZE];
1250
1251        assert_float_ne!(
1253            output.get_channel_data(0)[..128],
1254            &original[..],
1255            abs_all <= 1E-6
1256        );
1257        assert_float_ne!(
1258            output.get_channel_data(1)[..128],
1259            &original[..],
1260            abs_all <= 1E-6
1261        );
1262
1263        let left = output.channel_data(0).as_slice();
1265        assert!(left[128..256].iter().any(|v| *v >= 1E-6));
1266
1267        let right = output.channel_data(1).as_slice();
1268        assert!(right[128..256].iter().any(|v| *v >= 1E-6));
1269    }
1270}