web_audio_api/node/
convolver.rs

1use std::any::Any;
2
3use fft_convolver::FFTConvolver;
4
5use crate::buffer::AudioBuffer;
6use crate::context::{AudioContextRegistration, BaseAudioContext};
7use crate::render::{
8    AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
9};
10use crate::RENDER_QUANTUM_SIZE;
11
12use super::{AudioNode, AudioNodeOptions, ChannelConfig, ChannelCountMode, ChannelInterpretation};
13
14/// Scale buffer by an equal-power normalization
15// see - <https://webaudio.github.io/web-audio-api/#dom-convolvernode-normalize>
16fn normalize_buffer(buffer: &AudioBuffer) -> f32 {
17    let gain_calibration = 0.00125;
18    let gain_calibration_sample_rate = 44100.;
19    let min_power = 0.000125;
20
21    // Normalize by RMS power.
22    let number_of_channels = buffer.number_of_channels();
23    let length = buffer.length();
24    let sample_rate = buffer.sample_rate();
25
26    let mut power: f32 = buffer
27        .channels()
28        .iter()
29        .map(|c| c.as_slice().iter().map(|&s| s * s).sum::<f32>())
30        .sum();
31
32    power = (power / (number_of_channels * length) as f32).sqrt();
33
34    // Protect against accidental overload.
35    if !power.is_finite() || power.is_nan() || power < min_power {
36        power = min_power;
37    }
38
39    let mut scale = 1. / power;
40
41    // Calibrate to make perceived volume same as unprocessed.
42    scale *= gain_calibration;
43
44    // Scale depends on sample-rate.
45    scale *= gain_calibration_sample_rate / sample_rate;
46
47    // True-stereo compensation.
48    if number_of_channels == 4 {
49        scale *= 0.5;
50    }
51
52    scale
53}
54
55/// `ConvolverNode` options
56//dictionary ConvolverOptions : AudioNodeOptions {
57//  AudioBuffer? buffer;
58//  boolean disableNormalization = false;
59//};
60#[derive(Clone, Debug)]
61pub struct ConvolverOptions {
62    /// The desired buffer for the ConvolverNode
63    pub buffer: Option<AudioBuffer>,
64    /// The opposite of the desired initial value for the normalize attribute
65    pub disable_normalization: bool,
66    /// AudioNode options
67    pub audio_node_options: AudioNodeOptions,
68}
69
70impl Default for ConvolverOptions {
71    fn default() -> Self {
72        Self {
73            buffer: None,
74            disable_normalization: false,
75            audio_node_options: AudioNodeOptions {
76                channel_count: 2,
77                channel_count_mode: ChannelCountMode::ClampedMax,
78                channel_interpretation: ChannelInterpretation::Speakers,
79            },
80        }
81    }
82}
83
84/// Assert that the channel count is valid for the ConvolverNode
85/// see <https://webaudio.github.io/web-audio-api/#audionode-channelcount-constraints>
86///
87/// # Panics
88///
89/// This function panics if given count is greater than 2
90///
91#[track_caller]
92#[inline(always)]
93fn assert_valid_channel_count(count: usize) {
94    assert!(
95        count <= 2,
96        "NotSupportedError - ConvolverNode channel count cannot be greater than two"
97    );
98}
99
100/// Assert that the channel count mode is valid for the ConvolverNode
101/// see <https://webaudio.github.io/web-audio-api/#audionode-channelcountmode-constraints>
102///
103/// # Panics
104///
105/// This function panics if given count mode is [`ChannelCountMode::Max`]
106///
107#[track_caller]
108#[inline(always)]
109fn assert_valid_channel_count_mode(mode: ChannelCountMode) {
110    assert_ne!(
111        mode,
112        ChannelCountMode::Max,
113        "NotSupportedError - ConvolverNode channel count mode cannot be set to max"
114    );
115}
116
117/// Processing node which applies a linear convolution effect given an impulse response.
118///
119/// - MDN documentation: <https://developer.mozilla.org/en-US/docs/Web/API/ConvolverNode>
120/// - specification: <https://webaudio.github.io/web-audio-api/#ConvolverNode>
121/// - see also: [`BaseAudioContext::create_convolver`]
122///
123/// The current implementation only handles mono-to-mono convolutions. The provided impulse
124/// response buffer and the input signal will be downmixed appropriately.
125///
126/// # Usage
127///
128/// ```no_run
129/// use std::fs::File;
130///
131/// use web_audio_api::context::{AudioContext, BaseAudioContext};
132/// use web_audio_api::node::{AudioNode, AudioScheduledSourceNode, ConvolverNode, ConvolverOptions};
133///
134/// let context = AudioContext::default();
135/// let file = File::open("samples/vocals-dry.wav").unwrap();
136/// let audio_buffer = context.decode_audio_data_sync(file).unwrap();
137///
138/// let impulse_file = File::open("samples/small-room-response.wav").unwrap();
139/// let impulse_buffer = context.decode_audio_data_sync(impulse_file).unwrap();
140///
141/// let mut src = context.create_buffer_source();
142/// src.set_buffer(audio_buffer);
143///
144/// let mut convolve = ConvolverNode::new(&context, ConvolverOptions::default());
145/// convolve.set_buffer(impulse_buffer);
146///
147/// src.connect(&convolve);
148/// convolve.connect(&context.destination());
149/// src.start();
150/// std::thread::sleep(std::time::Duration::from_millis(4_000));
151/// ```
152///
153/// # Examples
154///
155/// - `cargo run --release --example convolution`
156///
157#[derive(Debug)]
158pub struct ConvolverNode {
159    /// Represents the node instance and its associated audio context
160    registration: AudioContextRegistration,
161    /// Info about audio node channel configuration
162    channel_config: ChannelConfig,
163    /// Perform equal power normalization on response buffer
164    normalize: bool,
165    /// The response buffer, nullable
166    buffer: Option<AudioBuffer>,
167}
168
169impl AudioNode for ConvolverNode {
170    fn registration(&self) -> &AudioContextRegistration {
171        &self.registration
172    }
173
174    fn channel_config(&self) -> &ChannelConfig {
175        &self.channel_config
176    }
177
178    fn number_of_inputs(&self) -> usize {
179        1
180    }
181
182    fn number_of_outputs(&self) -> usize {
183        1
184    }
185
186    // see <https://webaudio.github.io/web-audio-api/#audionode-channelcount-constraints>
187    fn set_channel_count(&self, count: usize) {
188        assert_valid_channel_count(count);
189        self.channel_config.set_count(count, self.registration());
190    }
191
192    // see <https://webaudio.github.io/web-audio-api/#audionode-channelcountmode-constraints>
193    fn set_channel_count_mode(&self, mode: ChannelCountMode) {
194        assert_valid_channel_count_mode(mode);
195        self.channel_config
196            .set_count_mode(mode, self.registration());
197    }
198}
199
200impl ConvolverNode {
201    /// returns a `ConvolverNode` instance
202    ///
203    /// # Arguments
204    ///
205    /// * `context` - audio context in which the audio node will live.
206    /// * `options` - convolver options
207    ///
208    /// # Panics
209    ///
210    /// Panics when an AudioBuffer is provided via the `ConvolverOptions` with a sample rate
211    /// different from the audio context sample rate.
212    pub fn new<C: BaseAudioContext>(context: &C, options: ConvolverOptions) -> Self {
213        let ConvolverOptions {
214            buffer,
215            disable_normalization,
216            audio_node_options,
217        } = options;
218
219        assert_valid_channel_count(audio_node_options.channel_count);
220        assert_valid_channel_count_mode(audio_node_options.channel_count_mode);
221
222        let mut node = context.base().register(move |registration| {
223            let renderer = ConvolverRenderer {
224                convolvers: None,
225                impulse_length: 0,
226                impulse_number_of_channels: 0,
227                tail_count: 0,
228            };
229
230            let node = Self {
231                registration,
232                channel_config: audio_node_options.into(),
233                normalize: !disable_normalization,
234                buffer: None,
235            };
236
237            (node, Box::new(renderer))
238        });
239
240        // renderer has been sent to render thread, we can send it messages
241        if let Some(buffer) = buffer {
242            node.set_buffer(buffer);
243        }
244
245        node
246    }
247
248    /// Get the current impulse response buffer
249    pub fn buffer(&self) -> Option<&AudioBuffer> {
250        self.buffer.as_ref()
251    }
252
253    /// Set or update the impulse response buffer
254    ///
255    /// # Panics
256    ///
257    /// Panics when the sample rate of the provided AudioBuffer differs from the audio context
258    /// sample rate.
259    pub fn set_buffer(&mut self, buffer: AudioBuffer) {
260        // If the buffer number of channels is not 1, 2, 4, or if the sample-rate of the buffer is
261        // not the same as the sample-rate of its associated BaseAudioContext, a NotSupportedError
262        // MUST be thrown.
263
264        let sample_rate = buffer.sample_rate();
265        assert_eq!(
266            sample_rate,
267            self.context().sample_rate(),
268            "NotSupportedError - sample rate of the convolution buffer must match the audio context"
269        );
270
271        let number_of_channels = buffer.number_of_channels();
272        assert!(
273            [1, 2, 4].contains(&number_of_channels),
274            "NotSupportedError - the convolution buffer must consist of 1, 2 or 4 channels"
275        );
276
277        // normalize before padding because the length of the buffer affects the scale
278        let scale = if self.normalize {
279            normalize_buffer(&buffer)
280        } else {
281            1.
282        };
283
284        let mut convolvers = Vec::<FFTConvolver<f32>>::new();
285        // @note - value defined by "rule of thumb", to be explored further
286        let partition_size = RENDER_QUANTUM_SIZE * 8;
287
288        // Handle multichannel IR
289        // cf. https://webaudio.github.io/web-audio-api/#Convolution-channel-configurations
290        // Note that in case of mono IR we create 2 convolvers to properly handle stereo input
291        for index in 0..number_of_channels.max(2) {
292            // make sure we don't try to access an inexisting channel, cf. note above
293            let channel = index.min(number_of_channels - 1);
294
295            let mut scaled_channel = vec![0.; buffer.length()];
296            scaled_channel
297                .iter_mut()
298                .zip(buffer.get_channel_data(channel))
299                .for_each(|(o, i)| *o = *i * scale);
300
301            let mut convolver = FFTConvolver::<f32>::default();
302            convolver
303                .init(partition_size, &scaled_channel)
304                .expect("Unable to initialize convolution engine");
305
306            convolvers.push(convolver);
307        }
308
309        let msg = ConvolverInfosMessage {
310            convolvers: Some(convolvers),
311            impulse_length: buffer.length(),
312            impulse_number_of_channels: number_of_channels,
313        };
314
315        self.registration.post_message(msg);
316        self.buffer = Some(buffer);
317    }
318
319    /// Denotes if the response buffer will be scaled with an equal-power normalization
320    pub fn normalize(&self) -> bool {
321        self.normalize
322    }
323
324    /// Update the `normalize` setting. This will only have an effect when `set_buffer` is called.
325    pub fn set_normalize(&mut self, value: bool) {
326        self.normalize = value;
327    }
328}
329
330struct ConvolverInfosMessage {
331    convolvers: Option<Vec<FFTConvolver<f32>>>,
332    impulse_length: usize,
333    impulse_number_of_channels: usize,
334}
335
336struct ConvolverRenderer {
337    convolvers: Option<Vec<FFTConvolver<f32>>>,
338    impulse_length: usize,
339    impulse_number_of_channels: usize,
340    tail_count: usize,
341}
342
343impl AudioProcessor for ConvolverRenderer {
344    fn process(
345        &mut self,
346        inputs: &[AudioRenderQuantum],
347        outputs: &mut [AudioRenderQuantum],
348        _params: AudioParamValues<'_>,
349        _scope: &AudioWorkletGlobalScope,
350    ) -> bool {
351        // single input/output node
352        let input = &inputs[0];
353        let output = &mut outputs[0];
354        output.force_mono();
355
356        let convolvers = match &mut self.convolvers {
357            None => {
358                // no convolution buffer set, passthrough
359                *output = input.clone();
360                return !input.is_silent();
361            }
362            Some(convolvers) => convolvers,
363        };
364
365        // https://webaudio.github.io/web-audio-api/#Convolution-channel-configurations
366        // @todo - handle tailtime per channel if input number of channel changes
367        match (input.number_of_channels(), self.impulse_number_of_channels) {
368            (1, 1) => {
369                output.set_number_of_channels(1);
370
371                let i = &input.channel_data(0)[..];
372                let o = &mut output.channel_data_mut(0)[..];
373                let _ = convolvers[0].process(i, o);
374            }
375            (1, 2) => {
376                output.set_number_of_channels(2);
377
378                let i = &input.channel_data(0)[..];
379
380                let o_left = &mut output.channel_data_mut(0)[..];
381                let _ = convolvers[0].process(i, o_left);
382
383                let o_right = &mut output.channel_data_mut(1)[..];
384                let _ = convolvers[1].process(i, o_right);
385            }
386            (2, 1) => {
387                output.set_number_of_channels(2);
388
389                let i_left = &input.channel_data(0)[..];
390                let o_left = &mut output.channel_data_mut(0)[..];
391                let _ = convolvers[0].process(i_left, o_left);
392
393                let i_right = &input.channel_data(1)[..];
394                let o_right = &mut output.channel_data_mut(1)[..];
395                let _ = convolvers[1].process(i_right, o_right);
396            }
397            (2, 2) => {
398                output.set_number_of_channels(2);
399
400                let i_left = &input.channel_data(0)[..];
401                let o_left = &mut output.channel_data_mut(0)[..];
402                let _ = convolvers[0].process(i_left, o_left);
403
404                let i_right = &input.channel_data(1)[..];
405                let o_right = &mut output.channel_data_mut(1)[..];
406                let _ = convolvers[1].process(i_right, o_right);
407            }
408            (2, 4) => {
409                output.set_number_of_channels(4);
410
411                let i_left = &input.channel_data(0)[..];
412
413                let o_0 = &mut output.channel_data_mut(0)[..];
414                let _ = convolvers[0].process(i_left, o_0);
415                let o_1 = &mut output.channel_data_mut(1)[..];
416                let _ = convolvers[1].process(i_left, o_1);
417
418                let i_right = &input.channel_data(1)[..];
419
420                let o_2 = &mut output.channel_data_mut(2)[..];
421                let _ = convolvers[2].process(i_right, o_2);
422                let o_3 = &mut output.channel_data_mut(3)[..];
423                let _ = convolvers[3].process(i_right, o_3);
424
425                // mix output back to stereo
426                let o_2 = output.channel_data(2).clone();
427                let o_3 = output.channel_data(3).clone();
428
429                output
430                    .channel_data_mut(0)
431                    .iter_mut()
432                    .zip(o_2.iter())
433                    .for_each(|(l, sl)| *l += *sl);
434
435                output
436                    .channel_data_mut(1)
437                    .iter_mut()
438                    .zip(o_3.iter())
439                    .for_each(|(r, sr)| *r += *sr);
440
441                output.set_number_of_channels(2);
442            }
443            (1, 4) => {
444                output.set_number_of_channels(4);
445
446                let i = &input.channel_data(0)[..];
447
448                let o_0 = &mut output.channel_data_mut(0)[..];
449                let _ = convolvers[0].process(i, o_0);
450                let o_1 = &mut output.channel_data_mut(1)[..];
451                let _ = convolvers[1].process(i, o_1);
452                let o_2 = &mut output.channel_data_mut(2)[..];
453                let _ = convolvers[2].process(i, o_2);
454                let o_3 = &mut output.channel_data_mut(3)[..];
455                let _ = convolvers[3].process(i, o_3);
456
457                // mix output back to stereo
458                let o_2 = output.channel_data(2).clone();
459                let o_3 = output.channel_data(3).clone();
460
461                output
462                    .channel_data_mut(0)
463                    .iter_mut()
464                    .zip(o_2.iter())
465                    .for_each(|(l, sl)| *l += *sl);
466
467                output
468                    .channel_data_mut(1)
469                    .iter_mut()
470                    .zip(o_3.iter())
471                    .for_each(|(r, sr)| *r += *sr);
472
473                output.set_number_of_channels(2);
474            }
475            _ => unreachable!(),
476        }
477
478        // handle tail time
479        if input.is_silent() {
480            self.tail_count += RENDER_QUANTUM_SIZE;
481            return self.tail_count < self.impulse_length;
482        }
483
484        self.tail_count = 0;
485
486        true
487    }
488
489    fn onmessage(&mut self, msg: &mut dyn Any) {
490        if let Some(msg) = msg.downcast_mut::<ConvolverInfosMessage>() {
491            let ConvolverInfosMessage {
492                convolvers,
493                impulse_length,
494                impulse_number_of_channels,
495            } = msg;
496            // Avoid deallocation in the render thread by swapping the convolver.
497            std::mem::swap(&mut self.convolvers, convolvers);
498            self.impulse_length = *impulse_length;
499            self.impulse_number_of_channels = *impulse_number_of_channels;
500
501            return;
502        }
503
504        log::warn!("ConvolverRenderer: Dropping incoming message {msg:?}");
505    }
506}
507
508#[cfg(test)]
509mod tests {
510    use float_eq::assert_float_eq;
511
512    use crate::context::{BaseAudioContext, OfflineAudioContext};
513    use crate::node::{AudioBufferSourceNode, AudioBufferSourceOptions, AudioScheduledSourceNode};
514
515    use super::*;
516
517    #[test]
518    #[should_panic]
519    fn test_buffer_sample_rate_matches() {
520        let context = OfflineAudioContext::new(1, 128, 44100.);
521
522        let ir = vec![1.];
523        let ir = AudioBuffer::from(vec![ir; 1], 48000.); // sample_rate differs
524        let options = ConvolverOptions {
525            buffer: Some(ir),
526            ..ConvolverOptions::default()
527        };
528
529        let _ = ConvolverNode::new(&context, options);
530    }
531
532    #[test]
533    #[should_panic]
534    fn test_buffer_must_have_1_2_4_channels() {
535        let context = OfflineAudioContext::new(1, 128, 48000.);
536
537        let ir = vec![1.];
538        let ir = AudioBuffer::from(vec![ir; 3], 48000.); // three channels
539        let options = ConvolverOptions {
540            buffer: Some(ir),
541            ..ConvolverOptions::default()
542        };
543
544        let _ = ConvolverNode::new(&context, options);
545    }
546
547    #[test]
548    fn test_constructor_options_buffer() {
549        let sample_rate = 44100.;
550        let mut context = OfflineAudioContext::new(1, 10, sample_rate);
551
552        let ir = vec![1.];
553        let calibration = 0.00125;
554        let channel_data = vec![0., 1., 0., -1., 0.];
555        let expected = [0., calibration, 0., -calibration, 0., 0., 0., 0., 0., 0.];
556
557        // identity ir
558        let ir = AudioBuffer::from(vec![ir; 1], sample_rate);
559        let options = ConvolverOptions {
560            buffer: Some(ir),
561            ..ConvolverOptions::default()
562        };
563        let conv = ConvolverNode::new(&context, options);
564        conv.connect(&context.destination());
565
566        let buffer = AudioBuffer::from(vec![channel_data; 1], sample_rate);
567        let mut src = context.create_buffer_source();
568        src.connect(&conv);
569        src.set_buffer(buffer);
570        src.start();
571
572        let output = context.start_rendering_sync();
573
574        assert_float_eq!(output.get_channel_data(0), &expected[..], abs_all <= 1E-6);
575    }
576
577    fn test_convolve(signal: &[f32], impulse_resp: Option<Vec<f32>>, length: usize) -> AudioBuffer {
578        let sample_rate = 44100.;
579        let mut context = OfflineAudioContext::new(1, length, sample_rate);
580
581        let input = AudioBuffer::from(vec![signal.to_vec()], sample_rate);
582        let mut src = AudioBufferSourceNode::new(&context, AudioBufferSourceOptions::default());
583        src.set_buffer(input);
584        src.start();
585
586        let mut conv = ConvolverNode::new(&context, ConvolverOptions::default());
587        if let Some(ir) = impulse_resp {
588            conv.set_buffer(AudioBuffer::from(vec![ir.to_vec()], sample_rate));
589        }
590
591        src.connect(&conv);
592        conv.connect(&context.destination());
593
594        context.start_rendering_sync()
595    }
596
597    #[test]
598    fn test_passthrough() {
599        let output = test_convolve(&[0., 1., 0., -1., 0.], None, 10);
600        let expected = [0., 1., 0., -1., 0., 0., 0., 0., 0., 0.];
601        assert_float_eq!(output.get_channel_data(0), &expected[..], abs_all <= 1E-6);
602    }
603
604    #[test]
605    fn test_empty() {
606        let ir = vec![];
607        let output = test_convolve(&[0., 1., 0., -1., 0.], Some(ir), 10);
608        let expected = [0.; 10];
609        assert_float_eq!(output.get_channel_data(0), &expected[..], abs_all <= 1E-6);
610    }
611
612    #[test]
613    fn test_zeroed() {
614        let ir = vec![0., 0., 0., 0., 0., 0.];
615        let output = test_convolve(&[0., 1., 0., -1., 0.], Some(ir), 10);
616        let expected = [0.; 10];
617        assert_float_eq!(output.get_channel_data(0), &expected[..], abs_all <= 1E-6);
618    }
619
620    #[test]
621    fn test_identity() {
622        let ir = vec![1.];
623        let calibration = 0.00125;
624        let output = test_convolve(&[0., 1., 0., -1., 0.], Some(ir), 10);
625        let expected = [0., calibration, 0., -calibration, 0., 0., 0., 0., 0., 0.];
626        assert_float_eq!(output.get_channel_data(0), &expected[..], abs_all <= 1E-6);
627    }
628
629    #[test]
630    fn test_two_id() {
631        let ir = vec![1., 1.];
632        let calibration = 0.00125;
633        let output = test_convolve(&[0., 1., 0., -1., 0.], Some(ir), 10);
634        let expected = [
635            0.,
636            calibration,
637            calibration,
638            -calibration,
639            -calibration,
640            0.,
641            0.,
642            0.,
643            0.,
644            0.,
645        ];
646        assert_float_eq!(output.get_channel_data(0), &expected[..], abs_all <= 1E-6);
647    }
648
649    #[test]
650    fn test_should_have_tail_time() {
651        // impulse response of length 256
652        const IR_LEN: usize = 256;
653        let ir = vec![1.; IR_LEN];
654
655        // unity input signal
656        let input = &[1.];
657
658        // render into a buffer of size 512
659        let output = test_convolve(input, Some(ir), 512);
660
661        // we expect non-zero output in the range 0 to IR_LEN
662        let output = output.channel_data(0).as_slice();
663        assert!(!output[..IR_LEN].iter().any(|v| *v <= 1E-6));
664        assert_float_eq!(&output[IR_LEN..], &[0.; 512 - IR_LEN][..], abs_all <= 1E-6);
665    }
666
667    #[test]
668    fn test_channel_config_1_chan_in_1_chan_ir() {
669        let number_of_channels = 1;
670        let length = 128;
671        let sample_rate = 44100.;
672        let mut context = OfflineAudioContext::new(number_of_channels, length, sample_rate);
673
674        let input = AudioBuffer::from(vec![vec![1.]], sample_rate);
675        let ir = AudioBuffer::from(vec![vec![0., 1.]], sample_rate);
676
677        let mut src = AudioBufferSourceNode::new(
678            &context,
679            AudioBufferSourceOptions {
680                buffer: Some(input),
681                ..AudioBufferSourceOptions::default()
682            },
683        );
684
685        let conv = ConvolverNode::new(
686            &context,
687            ConvolverOptions {
688                buffer: Some(ir),
689                disable_normalization: true,
690                ..ConvolverOptions::default()
691            },
692        );
693
694        src.connect(&conv);
695        conv.connect(&context.destination());
696        src.start();
697
698        let result = context.start_rendering_sync();
699
700        let mut expected = [0.; 128];
701        expected[1] = 1.;
702
703        assert_float_eq!(
704            result.get_channel_data(0)[..],
705            expected[..],
706            abs_all <= 1e-7
707        );
708    }
709
710    #[test]
711    fn test_channel_config_1_chan_in_2_chan_ir() {
712        let number_of_channels = 2;
713        let length = 128;
714        let sample_rate = 44100.;
715        let mut context = OfflineAudioContext::new(number_of_channels, length, sample_rate);
716
717        let input = AudioBuffer::from(vec![vec![1.]], sample_rate);
718        let ir = AudioBuffer::from(vec![vec![0., 1., 0.], vec![0., 0., 1.]], sample_rate);
719
720        let mut src = AudioBufferSourceNode::new(
721            &context,
722            AudioBufferSourceOptions {
723                buffer: Some(input),
724                ..AudioBufferSourceOptions::default()
725            },
726        );
727
728        let conv = ConvolverNode::new(
729            &context,
730            ConvolverOptions {
731                buffer: Some(ir),
732                disable_normalization: true,
733                ..ConvolverOptions::default()
734            },
735        );
736
737        src.connect(&conv);
738        conv.connect(&context.destination());
739        src.start();
740
741        let result = context.start_rendering_sync();
742
743        let mut expected_left = [0.; 128];
744        expected_left[1] = 1.;
745
746        let mut expected_right = [0.; 128];
747        expected_right[2] = 1.;
748
749        assert_eq!(result.number_of_channels(), 2);
750        assert_float_eq!(
751            result.get_channel_data(0)[..],
752            expected_left[..],
753            abs_all <= 1e-7
754        );
755        assert_float_eq!(
756            result.get_channel_data(1)[..],
757            expected_right[..],
758            abs_all <= 1e-7
759        );
760    }
761
762    #[test]
763    fn test_channel_config_2_chan_in_1_chan_ir() {
764        let number_of_channels = 2;
765        let length = 128;
766        let sample_rate = 44100.;
767        let mut context = OfflineAudioContext::new(number_of_channels, length, sample_rate);
768
769        let input = AudioBuffer::from(vec![vec![1., 0.], vec![0., 1.]], sample_rate);
770        let ir = AudioBuffer::from(vec![vec![0., 1.]], sample_rate);
771
772        let mut src = AudioBufferSourceNode::new(
773            &context,
774            AudioBufferSourceOptions {
775                buffer: Some(input),
776                ..AudioBufferSourceOptions::default()
777            },
778        );
779
780        let conv = ConvolverNode::new(
781            &context,
782            ConvolverOptions {
783                buffer: Some(ir),
784                disable_normalization: true,
785                ..ConvolverOptions::default()
786            },
787        );
788
789        src.connect(&conv);
790        conv.connect(&context.destination());
791        src.start();
792
793        let result = context.start_rendering_sync();
794
795        let mut expected_left = [0.; 128];
796        expected_left[1] = 1.;
797
798        let mut expected_right = [0.; 128];
799        expected_right[2] = 1.;
800
801        assert_eq!(result.number_of_channels(), 2);
802        assert_float_eq!(
803            result.get_channel_data(0)[..],
804            expected_left[..],
805            abs_all <= 1e-7
806        );
807        assert_float_eq!(
808            result.get_channel_data(1)[..],
809            expected_right[..],
810            abs_all <= 1e-7
811        );
812    }
813
814    #[test]
815    fn test_channel_config_2_chan_in_2_chan_ir() {
816        let number_of_channels = 2;
817        let length = 128;
818        let sample_rate = 44100.;
819        let mut context = OfflineAudioContext::new(number_of_channels, length, sample_rate);
820
821        let input = AudioBuffer::from(vec![vec![1., 0.], vec![0., 1.]], sample_rate);
822        let ir = AudioBuffer::from(vec![vec![0., 1., 0.], vec![0., 0., 1.]], sample_rate);
823
824        let mut src = AudioBufferSourceNode::new(
825            &context,
826            AudioBufferSourceOptions {
827                buffer: Some(input),
828                ..AudioBufferSourceOptions::default()
829            },
830        );
831
832        let conv = ConvolverNode::new(
833            &context,
834            ConvolverOptions {
835                buffer: Some(ir),
836                disable_normalization: true,
837                ..ConvolverOptions::default()
838            },
839        );
840
841        src.connect(&conv);
842        conv.connect(&context.destination());
843        src.start();
844
845        let result = context.start_rendering_sync();
846
847        let mut expected_left = [0.; 128];
848        expected_left[1] = 1.;
849
850        let mut expected_right = [0.; 128];
851        expected_right[3] = 1.;
852
853        assert_eq!(result.number_of_channels(), 2);
854        assert_float_eq!(
855            result.get_channel_data(0)[..],
856            expected_left[..],
857            abs_all <= 1e-7
858        );
859        assert_float_eq!(
860            result.get_channel_data(1)[..],
861            expected_right[..],
862            abs_all <= 1e-7
863        );
864    }
865
866    #[test]
867    fn test_channel_config_2_chan_in_4_chan_ir() {
868        let number_of_channels = 2;
869        let length = 128;
870        let sample_rate = 44100.;
871        let mut context = OfflineAudioContext::new(number_of_channels, length, sample_rate);
872
873        let input = AudioBuffer::from(vec![vec![1., 0.], vec![0., 1.]], sample_rate);
874        let ir = AudioBuffer::from(
875            vec![
876                vec![0., 1., 0., 0., 0.], // in 0 -> out 0
877                vec![0., 0., 1., 0., 0.], // in 0 -> out 1
878                vec![0., 0., 0., 1., 0.], // in 1 -> out 0
879                vec![0., 0., 0., 0., 1.], // in 1 -> out 1
880            ],
881            sample_rate,
882        );
883
884        let mut src = AudioBufferSourceNode::new(
885            &context,
886            AudioBufferSourceOptions {
887                buffer: Some(input),
888                ..AudioBufferSourceOptions::default()
889            },
890        );
891
892        let conv = ConvolverNode::new(
893            &context,
894            ConvolverOptions {
895                buffer: Some(ir),
896                disable_normalization: true,
897                ..ConvolverOptions::default()
898            },
899        );
900
901        src.connect(&conv);
902        conv.connect(&context.destination());
903        src.start();
904
905        let result = context.start_rendering_sync();
906
907        let mut expected_left = [0.; 128];
908        expected_left[1] = 1.;
909        expected_left[4] = 1.;
910
911        let mut expected_right = [0.; 128];
912        expected_right[2] = 1.;
913        expected_right[5] = 1.;
914
915        assert_eq!(result.number_of_channels(), 2);
916        assert_float_eq!(
917            result.get_channel_data(0)[..],
918            expected_left[..],
919            abs_all <= 1e-7
920        );
921        assert_float_eq!(
922            result.get_channel_data(1)[..],
923            expected_right[..],
924            abs_all <= 1e-7
925        );
926    }
927
928    #[test]
929    fn test_channel_config_1_chan_in_4_chan_ir() {
930        let number_of_channels = 2;
931        let length = 128;
932        let sample_rate = 44100.;
933        let mut context = OfflineAudioContext::new(number_of_channels, length, sample_rate);
934
935        let input = AudioBuffer::from(vec![vec![1., 0.]], sample_rate);
936        let ir = AudioBuffer::from(
937            vec![
938                vec![0., 1., 0., 0., 0.], // in 0 -> out 0
939                vec![0., 0., 1., 0., 0.], // in 0 -> out 1
940                vec![0., 0., 0., 1., 0.], // in 0 -> out 0
941                vec![0., 0., 0., 0., 1.], // in 0 -> out 1
942            ],
943            sample_rate,
944        );
945
946        let mut src = AudioBufferSourceNode::new(
947            &context,
948            AudioBufferSourceOptions {
949                buffer: Some(input),
950                ..AudioBufferSourceOptions::default()
951            },
952        );
953
954        let conv = ConvolverNode::new(
955            &context,
956            ConvolverOptions {
957                buffer: Some(ir),
958                disable_normalization: true,
959                ..ConvolverOptions::default()
960            },
961        );
962
963        src.connect(&conv);
964        conv.connect(&context.destination());
965        src.start();
966
967        let result = context.start_rendering_sync();
968
969        let mut expected_left = [0.; 128];
970        expected_left[1] = 1.;
971        expected_left[3] = 1.;
972
973        let mut expected_right = [0.; 128];
974        expected_right[2] = 1.;
975        expected_right[4] = 1.;
976
977        assert_eq!(result.number_of_channels(), 2);
978        assert_float_eq!(
979            result.get_channel_data(0)[..],
980            expected_left[..],
981            abs_all <= 1e-7
982        );
983        assert_float_eq!(
984            result.get_channel_data(1)[..],
985            expected_right[..],
986            abs_all <= 1e-7
987        );
988    }
989}