web_audio_api/node/
audio_buffer_source.rs

1use std::any::Any;
2use std::sync::atomic::Ordering;
3use std::sync::Arc;
4
5use crate::buffer::AudioBuffer;
6use crate::context::{AudioContextRegistration, AudioParamId, BaseAudioContext};
7use crate::param::{AudioParam, AudioParamDescriptor, AutomationRate};
8use crate::render::{
9    AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
10};
11use crate::{assert_valid_time_value, AtomicF64, RENDER_QUANTUM_SIZE};
12
13use super::{AudioNode, AudioScheduledSourceNode, ChannelConfig};
14
15/// Options for constructing an [`AudioBufferSourceNode`]
16// dictionary AudioBufferSourceOptions {
17//   AudioBuffer? buffer;
18//   float detune = 0;
19//   boolean loop = false;
20//   double loopEnd = 0;
21//   double loopStart = 0;
22//   float playbackRate = 1;
23// };
24//
25// @note - Does extend AudioNodeOptions but they are useless for source nodes as
26// they instruct how to upmix the inputs.
27// This is a common source of confusion, see e.g. https://github.com/mdn/content/pull/18472, and
28// an issue in the spec, see discussion in https://github.com/WebAudio/web-audio-api/issues/2496
29#[derive(Clone, Debug)]
30pub struct AudioBufferSourceOptions {
31    pub buffer: Option<AudioBuffer>,
32    pub detune: f32,
33    pub loop_: bool,
34    pub loop_start: f64,
35    pub loop_end: f64,
36    pub playback_rate: f32,
37}
38
39impl Default for AudioBufferSourceOptions {
40    fn default() -> Self {
41        Self {
42            buffer: None,
43            detune: 0.,
44            loop_: false,
45            loop_start: 0.,
46            loop_end: 0.,
47            playback_rate: 1.,
48        }
49    }
50}
51
52#[derive(Debug, Copy, Clone)]
53struct PlaybackInfo {
54    prev_frame_index: usize,
55    k: f64,
56}
57
58#[derive(Debug, Clone, Copy)]
59struct LoopState {
60    pub is_looping: bool,
61    pub start: f64,
62    pub end: f64,
63}
64
65/// Instructions to start or stop processing
66#[derive(Debug, Clone)]
67enum ControlMessage {
68    StartWithOffsetAndDuration(f64, f64, f64),
69    Stop(f64),
70    Loop(bool),
71    LoopStart(f64),
72    LoopEnd(f64),
73}
74
75/// `AudioBufferSourceNode` represents an audio source that consists of an
76/// in-memory audio source (i.e. an audio file completely loaded in memory),
77/// stored in an [`AudioBuffer`].
78///
79/// - MDN documentation: <https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode>
80/// - specification: <https://webaudio.github.io/web-audio-api/#AudioBufferSourceNode>
81/// - see also: [`BaseAudioContext::create_buffer_source`]
82///
83/// # Usage
84///
85/// ```no_run
86/// use std::fs::File;
87/// use web_audio_api::context::{BaseAudioContext, AudioContext};
88/// use web_audio_api::node::{AudioNode, AudioScheduledSourceNode};
89///
90/// // create an `AudioContext`
91/// let context = AudioContext::default();
92/// // load and decode a soundfile
93/// let file = File::open("samples/sample.wav").unwrap();
94/// let audio_buffer = context.decode_audio_data_sync(file).unwrap();
95/// // play the sound file
96/// let mut src = context.create_buffer_source();
97/// src.set_buffer(audio_buffer);
98/// src.connect(&context.destination());
99/// src.start();
100/// ```
101///
102/// # Examples
103///
104/// - `cargo run --release --example trigger_soundfile`
105/// - `cargo run --release --example granular`
106///
107#[derive(Debug)]
108pub struct AudioBufferSourceNode {
109    registration: AudioContextRegistration,
110    channel_config: ChannelConfig,
111    detune: AudioParam,        // has constraints, no a-rate
112    playback_rate: AudioParam, // has constraints, no a-rate
113    buffer_time: Arc<AtomicF64>,
114    buffer: Option<AudioBuffer>,
115    loop_state: LoopState,
116    start_stop_count: u8,
117}
118
119impl AudioNode for AudioBufferSourceNode {
120    fn registration(&self) -> &AudioContextRegistration {
121        &self.registration
122    }
123
124    fn channel_config(&self) -> &ChannelConfig {
125        &self.channel_config
126    }
127
128    fn number_of_inputs(&self) -> usize {
129        0
130    }
131
132    fn number_of_outputs(&self) -> usize {
133        1
134    }
135}
136
137impl AudioScheduledSourceNode for AudioBufferSourceNode {
138    fn start(&mut self) {
139        let start = self.registration.context().current_time();
140        self.start_at_with_offset_and_duration(start, 0., f64::MAX);
141    }
142
143    fn start_at(&mut self, when: f64) {
144        self.start_at_with_offset_and_duration(when, 0., f64::MAX);
145    }
146
147    fn stop(&mut self) {
148        let stop = self.registration.context().current_time();
149        self.stop_at(stop);
150    }
151
152    fn stop_at(&mut self, when: f64) {
153        assert_valid_time_value(when);
154        assert_eq!(
155            self.start_stop_count, 1,
156            "InvalidStateError cannot stop before start"
157        );
158
159        self.start_stop_count += 1;
160        self.registration.post_message(ControlMessage::Stop(when));
161    }
162}
163
164impl AudioBufferSourceNode {
165    /// Create a new [`AudioBufferSourceNode`] instance
166    pub fn new<C: BaseAudioContext>(context: &C, options: AudioBufferSourceOptions) -> Self {
167        let AudioBufferSourceOptions {
168            buffer,
169            detune,
170            loop_,
171            loop_start,
172            loop_end,
173            playback_rate,
174        } = options;
175
176        let mut node = context.base().register(move |registration| {
177            // these parameters can't be changed to a-rate
178            // @see - <https://webaudio.github.io/web-audio-api/#audioparam-automation-rate-constraints>
179            let detune_param_options = AudioParamDescriptor {
180                name: String::new(),
181                min_value: f32::MIN,
182                max_value: f32::MAX,
183                default_value: 0.,
184                automation_rate: AutomationRate::K,
185            };
186            let (mut d_param, d_proc) =
187                context.create_audio_param(detune_param_options, &registration);
188            d_param.set_automation_rate_constrained(true);
189            d_param.set_value(detune);
190
191            let playback_rate_param_options = AudioParamDescriptor {
192                name: String::new(),
193                min_value: f32::MIN,
194                max_value: f32::MAX,
195                default_value: 1.,
196                automation_rate: AutomationRate::K,
197            };
198            let (mut pr_param, pr_proc) =
199                context.create_audio_param(playback_rate_param_options, &registration);
200            pr_param.set_automation_rate_constrained(true);
201            pr_param.set_value(playback_rate);
202
203            let loop_state = LoopState {
204                is_looping: loop_,
205                start: loop_start,
206                end: loop_end,
207            };
208
209            let renderer = AudioBufferSourceRenderer {
210                start_time: f64::MAX,
211                stop_time: f64::MAX,
212                duration: f64::MAX,
213                offset: 0.,
214                buffer: None,
215                detune: d_proc,
216                playback_rate: pr_proc,
217                loop_state,
218                render_state: AudioBufferRendererState::default(),
219            };
220
221            let node = Self {
222                registration,
223                channel_config: ChannelConfig::default(),
224                detune: d_param,
225                playback_rate: pr_param,
226                buffer_time: Arc::clone(&renderer.render_state.buffer_time),
227                buffer: None,
228                loop_state,
229                start_stop_count: 0,
230            };
231
232            (node, Box::new(renderer))
233        });
234
235        // renderer has been sent to render thread, we can send it messages
236        if let Some(buf) = buffer {
237            node.set_buffer(buf);
238        }
239
240        node
241    }
242
243    /// Start the playback at the given time and with a given offset
244    ///
245    /// # Panics
246    ///
247    /// Panics if the source was already started
248    pub fn start_at_with_offset(&mut self, start: f64, offset: f64) {
249        self.start_at_with_offset_and_duration(start, offset, f64::MAX);
250    }
251
252    /// Start the playback at the given time, with a given offset, for a given duration
253    ///
254    /// # Panics
255    ///
256    /// Panics if the source was already started
257    pub fn start_at_with_offset_and_duration(&mut self, start: f64, offset: f64, duration: f64) {
258        assert_valid_time_value(start);
259        assert_valid_time_value(offset);
260        assert_valid_time_value(duration);
261        assert_eq!(
262            self.start_stop_count, 0,
263            "InvalidStateError - Cannot call `start` twice"
264        );
265
266        self.start_stop_count += 1;
267        let control = ControlMessage::StartWithOffsetAndDuration(start, offset, duration);
268        self.registration.post_message(control);
269    }
270
271    /// Current buffer value (nullable)
272    pub fn buffer(&self) -> Option<&AudioBuffer> {
273        self.buffer.as_ref()
274    }
275
276    /// Provide an [`AudioBuffer`] as the source of data to be played bask
277    ///
278    /// # Panics
279    ///
280    /// Panics if a buffer has already been given to the source (though `new` or through
281    /// `set_buffer`)
282    pub fn set_buffer(&mut self, audio_buffer: AudioBuffer) {
283        let clone = audio_buffer.clone();
284
285        assert!(
286            self.buffer.is_none(),
287            "InvalidStateError - cannot assign buffer twice",
288        );
289        self.buffer = Some(audio_buffer);
290
291        self.registration.post_message(clone);
292    }
293
294    /// K-rate [`AudioParam`] that defines the speed at which the [`AudioBuffer`]
295    /// will be played, e.g.:
296    /// - `0.5` will play the file at half speed
297    /// - `-1` will play the file in reverse
298    ///
299    /// Note that playback rate will also alter the pitch of the [`AudioBuffer`]
300    pub fn playback_rate(&self) -> &AudioParam {
301        &self.playback_rate
302    }
303
304    /// Current playhead position in seconds within the [`AudioBuffer`].
305    ///
306    /// This value is updated at the end of each render quantum.
307    ///
308    /// Unofficial v2 API extension, not part of the spec yet.
309    /// See also: <https://github.com/WebAudio/web-audio-api/issues/2397#issuecomment-709478405>
310    pub fn position(&self) -> f64 {
311        self.buffer_time.load(Ordering::Relaxed)
312    }
313
314    /// K-rate [`AudioParam`] that defines a pitch transposition of the file,
315    /// expressed in cents
316    ///
317    /// see <https://en.wikipedia.org/wiki/Cent_(music)>
318    pub fn detune(&self) -> &AudioParam {
319        &self.detune
320    }
321
322    /// Defines if the playback the [`AudioBuffer`] should be looped
323    #[allow(clippy::missing_panics_doc)]
324    pub fn loop_(&self) -> bool {
325        self.loop_state.is_looping
326    }
327
328    pub fn set_loop(&mut self, value: bool) {
329        self.loop_state.is_looping = value;
330        self.registration.post_message(ControlMessage::Loop(value));
331    }
332
333    /// Defines the loop start point, in the time reference of the [`AudioBuffer`]
334    pub fn loop_start(&self) -> f64 {
335        self.loop_state.start
336    }
337
338    pub fn set_loop_start(&mut self, value: f64) {
339        self.loop_state.start = value;
340        self.registration
341            .post_message(ControlMessage::LoopStart(value));
342    }
343
344    /// Defines the loop end point, in the time reference of the [`AudioBuffer`]
345    pub fn loop_end(&self) -> f64 {
346        self.loop_state.end
347    }
348
349    pub fn set_loop_end(&mut self, value: f64) {
350        self.loop_state.end = value;
351        self.registration
352            .post_message(ControlMessage::LoopEnd(value));
353    }
354}
355
356struct AudioBufferRendererState {
357    buffer_time: Arc<AtomicF64>,
358    started: bool,
359    entered_loop: bool,
360    buffer_time_elapsed: f64,
361    is_aligned: bool,
362    ended: bool,
363}
364
365impl Default for AudioBufferRendererState {
366    fn default() -> Self {
367        Self {
368            buffer_time: Arc::new(AtomicF64::new(0.)),
369            started: false,
370            entered_loop: false,
371            buffer_time_elapsed: 0.,
372            is_aligned: false,
373            ended: false,
374        }
375    }
376}
377
378struct AudioBufferSourceRenderer {
379    start_time: f64,
380    stop_time: f64,
381    offset: f64,
382    duration: f64,
383    buffer: Option<AudioBuffer>,
384    detune: AudioParamId,
385    playback_rate: AudioParamId,
386    loop_state: LoopState,
387    render_state: AudioBufferRendererState,
388}
389
390impl AudioBufferSourceRenderer {
391    fn handle_control_message(&mut self, control: &ControlMessage) {
392        match control {
393            ControlMessage::StartWithOffsetAndDuration(when, offset, duration) => {
394                self.start_time = *when;
395                self.offset = *offset;
396                self.duration = *duration;
397            }
398            ControlMessage::Stop(when) => self.stop_time = *when,
399            ControlMessage::Loop(is_looping) => self.loop_state.is_looping = *is_looping,
400            ControlMessage::LoopStart(loop_start) => self.loop_state.start = *loop_start,
401            ControlMessage::LoopEnd(loop_end) => self.loop_state.end = *loop_end,
402        }
403
404        self.clamp_loop_boundaries();
405    }
406
407    fn clamp_loop_boundaries(&mut self) {
408        if let Some(buffer) = &self.buffer {
409            let duration = buffer.duration();
410
411            // https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopstart
412            if self.loop_state.start < 0. {
413                self.loop_state.start = 0.;
414            } else if self.loop_state.start > duration {
415                self.loop_state.start = duration;
416            }
417
418            // https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopend
419            if self.loop_state.end <= 0. || self.loop_state.end > duration {
420                self.loop_state.end = duration;
421            }
422        }
423    }
424}
425
426impl AudioProcessor for AudioBufferSourceRenderer {
427    fn process(
428        &mut self,
429        _inputs: &[AudioRenderQuantum], // This node has no input
430        outputs: &mut [AudioRenderQuantum],
431        params: AudioParamValues<'_>,
432        scope: &AudioWorkletGlobalScope,
433    ) -> bool {
434        // Single output node
435        let output = &mut outputs[0];
436
437        if self.render_state.ended {
438            output.make_silent();
439            return false;
440        }
441
442        let sample_rate = scope.sample_rate as f64;
443        let dt = 1. / sample_rate;
444        let block_duration = dt * RENDER_QUANTUM_SIZE as f64;
445        let next_block_time = scope.current_time + block_duration;
446
447        // Return early if start_time is beyond this block
448        if self.start_time >= next_block_time {
449            output.make_silent();
450            // #462 AudioScheduledSourceNodes that have not been scheduled to start can safely
451            // return tail_time false in order to be collected if their control handle drops.
452            return self.start_time != f64::MAX;
453        }
454
455        // If the buffer has not been set wait for it.
456        let buffer = match &self.buffer {
457            None => {
458                output.make_silent();
459                // #462 like the above arm, we can safely return tail_time false
460                // if this node has no buffer set.
461                return false;
462            }
463            Some(b) => b,
464        };
465
466        let LoopState {
467            is_looping,
468            start: loop_start,
469            end: loop_end,
470        } = self.loop_state;
471
472        // these will only be used if `loop_` is true, so no need for `Option`
473        let mut actual_loop_start = 0.;
474        let mut actual_loop_end = 0.;
475
476        // compute compound parameter at k-rate, these parameters have constraints
477        // https://webaudio.github.io/web-audio-api/#audioparam-automation-rate-constraints
478        let detune = params.get(&self.detune)[0];
479        let playback_rate = params.get(&self.playback_rate)[0];
480        let computed_playback_rate = (playback_rate * (detune / 1200.).exp2()) as f64;
481
482        let buffer_duration = buffer.duration();
483        let buffer_length = buffer.length();
484        // multiplier to be applied on `position` to tackle possible difference
485        // between the context and buffer sample rates. As this is an edge case,
486        // we just linearly interpolate, thus favoring performance vs quality
487        let sampling_ratio = buffer.sample_rate() as f64 / sample_rate;
488
489        // Load the buffer time from the render state.
490        // The render state has to be updated before leaving this method!
491        let mut buffer_time = self.render_state.buffer_time.load(Ordering::Relaxed);
492
493        output.set_number_of_channels(buffer.number_of_channels());
494
495        // go through the algorithm described in the spec
496        // @see <https://webaudio.github.io/web-audio-api/#playback-AudioBufferSourceNode>
497        let block_time = scope.current_time;
498
499        // prevent scheduling in the past
500        // If 0 is passed in for this value or if the value is less than
501        // currentTime, then the sound will start playing immediately
502        // cf. https://webaudio.github.io/web-audio-api/#dom-audioscheduledsourcenode-start-when-when
503        if !self.render_state.started && self.start_time < block_time {
504            self.start_time = block_time;
505        }
506
507        // Define if we can avoid the resampling interpolation in some common cases,
508        // basically when:
509        // - `src.start()` is called with `audio_context.current_time`,
510        //   i.e. start time is aligned with a render quantum block
511        // - the AudioBuffer was decoded w/ the right sample rate
512        // - no detune or playback_rate changes are made
513        // - loop boundaries have not been changed
514        if self.start_time == block_time && self.offset == 0. {
515            self.render_state.is_aligned = true;
516        }
517
518        // these two case imply resampling
519        if sampling_ratio != 1. || computed_playback_rate != 1. {
520            self.render_state.is_aligned = false;
521        }
522
523        // If loop points are not aligned on sample, they can imply resampling.
524        // For now we just consider that we can go fast track if loop points are
525        // bound to the buffer boundaries.
526        //
527        // By default, cf. clamp_loop_boundaries, loop_start == 0 && loop_end == buffer_duration,
528        if loop_start != 0. || loop_end != buffer_duration {
529            self.render_state.is_aligned = false;
530        }
531
532        // If some user defined end of rendering, i.e. explicit stop_time or duration,
533        // is within this render quantum force slow track as well. It might imply
534        // resampling e.g. if stop_time is between 2 samples
535        if buffer_time + block_duration > self.duration
536            || block_time + block_duration > self.stop_time
537        {
538            self.render_state.is_aligned = false;
539        }
540
541        if self.render_state.is_aligned {
542            // ---------------------------------------------------------------
543            // Fast track
544            // ---------------------------------------------------------------
545            if self.start_time == block_time {
546                self.render_state.started = true;
547            }
548
549            // buffer ends within this block
550            if buffer_time + block_duration > buffer_duration {
551                let end_index = buffer.length();
552                // In case of a loop point in the middle of the block, this value will
553                // be used to recompute `buffer_time` according to the actual loop point.
554                let mut loop_point_index: Option<usize> = None;
555
556                buffer
557                    .channels()
558                    .iter()
559                    .zip(output.channels_mut().iter_mut())
560                    .for_each(|(buffer_channel, output_channel)| {
561                        // we need to recompute that for each channel
562                        let buffer_channel = buffer_channel.as_slice();
563                        let mut start_index = (buffer_time * sample_rate).round() as usize;
564                        let mut offset = 0;
565
566                        for (index, o) in output_channel.iter_mut().enumerate() {
567                            let mut buffer_index = start_index + index - offset;
568
569                            *o = if buffer_index < end_index {
570                                buffer_channel[buffer_index]
571                            } else {
572                                if is_looping && buffer_index >= end_index {
573                                    loop_point_index = Some(index);
574                                    // reset values for the rest of the block
575                                    start_index = 0;
576                                    offset = index;
577                                    buffer_index = 0;
578                                }
579
580                                if is_looping {
581                                    buffer_channel[buffer_index]
582                                } else {
583                                    0.
584                                }
585                            };
586                        }
587                    });
588
589                if let Some(loop_point_index) = loop_point_index {
590                    buffer_time = ((RENDER_QUANTUM_SIZE - loop_point_index) as f64 / sample_rate)
591                        % buffer_duration;
592                } else {
593                    buffer_time += block_duration;
594                }
595            } else {
596                let start_index = (buffer_time * sample_rate).round() as usize;
597                let end_index = start_index + RENDER_QUANTUM_SIZE;
598                // we can do memcopy
599                buffer
600                    .channels()
601                    .iter()
602                    .zip(output.channels_mut().iter_mut())
603                    .for_each(|(buffer_channel, output_channel)| {
604                        let buffer_channel = buffer_channel.as_slice();
605                        output_channel.copy_from_slice(&buffer_channel[start_index..end_index]);
606                    });
607
608                buffer_time += block_duration;
609            }
610
611            self.render_state.buffer_time_elapsed += block_duration;
612        } else {
613            // ---------------------------------------------------------------
614            // Slow track
615            // ---------------------------------------------------------------
616            if is_looping {
617                if loop_start >= 0. && loop_end > 0. && loop_start < loop_end {
618                    actual_loop_start = loop_start;
619                    actual_loop_end = loop_end;
620                } else {
621                    actual_loop_start = 0.;
622                    actual_loop_end = buffer_duration;
623                }
624            } else {
625                self.render_state.entered_loop = false;
626            }
627
628            // internal buffer used to store playback infos to compute the samples
629            // according to the source buffer. (prev_sample_index, k)
630            let mut playback_infos = [None; RENDER_QUANTUM_SIZE];
631
632            // compute position for each sample and store into `self.positions`
633            for (i, playback_info) in playback_infos.iter_mut().enumerate() {
634                let current_time = block_time + i as f64 * dt;
635
636                // Sticky behavior to handle floating point errors due to start time computation
637                // cf. test_subsample_buffer_stitching
638                if !self.render_state.started && almost::equal(current_time, self.start_time) {
639                    self.start_time = current_time;
640                }
641
642                // Handle following cases:
643                // - we are before start time
644                // - we are after stop time
645                // - explicit duration (in buffer time reference) has been given and we have reached it
646                // Note that checking against buffer duration is done below to handle looping
647                if current_time < self.start_time
648                    || current_time >= self.stop_time
649                    || self.render_state.buffer_time_elapsed >= self.duration
650                {
651                    continue; // nothing more to do for this sample
652                }
653
654                // we have now reached start time
655                if !self.render_state.started {
656                    let delta = current_time - self.start_time;
657                    // handle that start time may be between last sample and this one
658                    self.offset += delta * computed_playback_rate;
659
660                    if is_looping && computed_playback_rate >= 0. && self.offset >= actual_loop_end
661                    {
662                        self.offset = actual_loop_end;
663                    }
664
665                    if is_looping && computed_playback_rate < 0. && self.offset < actual_loop_start
666                    {
667                        self.offset = actual_loop_start;
668                    }
669
670                    buffer_time = self.offset;
671                    self.render_state.buffer_time_elapsed = delta * computed_playback_rate;
672                    self.render_state.started = true;
673                }
674
675                if is_looping {
676                    if !self.render_state.entered_loop {
677                        // playback began before or within loop, and playhead is now past loop start
678                        if self.offset < actual_loop_end && buffer_time >= actual_loop_start {
679                            self.render_state.entered_loop = true;
680                        }
681
682                        // playback began after loop, and playhead is now prior to the loop end
683                        if self.offset >= actual_loop_end && buffer_time < actual_loop_end {
684                            self.render_state.entered_loop = true;
685                        }
686                    }
687
688                    // check loop boundaries
689                    if self.render_state.entered_loop {
690                        while buffer_time >= actual_loop_end {
691                            buffer_time -= actual_loop_end - actual_loop_start;
692                        }
693
694                        while buffer_time < actual_loop_start {
695                            buffer_time += actual_loop_end - actual_loop_start;
696                        }
697                    }
698                }
699
700                if buffer_time >= 0. && buffer_time < buffer_duration {
701                    let position = buffer_time * sampling_ratio;
702                    let playhead = position * sample_rate;
703                    let playhead_floored = playhead.floor();
704                    let prev_frame_index = playhead_floored as usize; // can't be < 0.
705                    let k = playhead - playhead_floored;
706
707                    // Due to how buffer_time is computed, we can still run into
708                    // floating point errors and try to access a non existing index
709                    // cf. test_end_of_file_slow_track_2
710                    if prev_frame_index < buffer_length {
711                        *playback_info = Some(PlaybackInfo {
712                            prev_frame_index,
713                            k,
714                        });
715                    }
716                }
717
718                let time_incr = dt * computed_playback_rate;
719                buffer_time += time_incr;
720                self.render_state.buffer_time_elapsed += time_incr;
721            }
722
723            // fill output according to computed positions
724            buffer
725                .channels()
726                .iter()
727                .zip(output.channels_mut().iter_mut())
728                .for_each(|(buffer_channel, output_channel)| {
729                    let buffer_channel = buffer_channel.as_slice();
730
731                    playback_infos
732                        .iter()
733                        .zip(output_channel.iter_mut())
734                        .for_each(|(playhead, o)| {
735                            *o = match playhead {
736                                Some(PlaybackInfo {
737                                    prev_frame_index,
738                                    k,
739                                }) => {
740                                    // `prev_frame_index` cannot be out of bounds
741                                    let prev_sample = buffer_channel[*prev_frame_index] as f64;
742                                    let next_sample = match buffer_channel.get(prev_frame_index + 1)
743                                    {
744                                        Some(val) => *val as f64,
745                                        // End of buffer
746                                        None => {
747                                            if is_looping {
748                                                if playback_rate >= 0. {
749                                                    let start_playhead =
750                                                        actual_loop_start * sample_rate;
751                                                    let start_index = if start_playhead.floor()
752                                                        == start_playhead
753                                                    {
754                                                        start_playhead as usize
755                                                    } else {
756                                                        start_playhead as usize + 1
757                                                    };
758
759                                                    buffer_channel[start_index] as f64
760                                                } else {
761                                                    let end_playhead =
762                                                        actual_loop_end * sample_rate;
763                                                    let end_index = end_playhead as usize;
764                                                    buffer_channel[end_index] as f64
765                                                }
766                                            } else {
767                                                // Handle 2 edge cases:
768                                                // 1. We are in a case where buffer time is below buffer
769                                                // duration due to floating point errors, but where
770                                                // prev_frame_index is last index and k is near 1. We can't
771                                                // filter this case before, because it might break
772                                                // loops logic.
773                                                // 2. Buffer contains only one sample
774                                                if almost::equal(*k, 1.) || *prev_frame_index == 0 {
775                                                    0.
776                                                } else {
777                                                    // Extrapolate next sample using the last two known samples
778                                                    // cf. https://github.com/WebAudio/web-audio-api/issues/2032
779                                                    let prev_prev_sample =
780                                                        buffer_channel[*prev_frame_index - 1];
781                                                    2. * prev_sample - prev_prev_sample as f64
782                                                }
783                                            }
784                                        }
785                                    };
786
787                                    (1. - k).mul_add(prev_sample, k * next_sample) as f32
788                                }
789                                None => 0.,
790                            };
791                        });
792                });
793        }
794
795        // Update render state
796        self.render_state
797            .buffer_time
798            .store(buffer_time, Ordering::Relaxed);
799
800        // The buffer has ended within this block, if one of the following conditions holds:
801        // 1. the stop time has been reached.
802        // 2. the duration has been reached.
803        // 3. the end of the buffer has been reached.
804        if next_block_time >= self.stop_time
805            || self.render_state.buffer_time_elapsed >= self.duration
806            || !is_looping
807                && (computed_playback_rate > 0. && buffer_time >= buffer_duration
808                    || computed_playback_rate < 0. && buffer_time < 0.)
809        {
810            self.render_state.ended = true;
811            scope.send_ended_event();
812        }
813
814        true
815    }
816
817    fn onmessage(&mut self, msg: &mut dyn Any) {
818        if let Some(control) = msg.downcast_ref::<ControlMessage>() {
819            self.handle_control_message(control);
820            return;
821        };
822
823        if let Some(buffer) = msg.downcast_mut::<AudioBuffer>() {
824            if let Some(current_buffer) = &mut self.buffer {
825                // Avoid deallocation in the render thread by swapping the buffers.
826                std::mem::swap(current_buffer, buffer);
827            } else {
828                // Creating the tombstone buffer does not cause allocations.
829                let tombstone_buffer = AudioBuffer {
830                    channels: Default::default(),
831                    sample_rate: Default::default(),
832                };
833                self.buffer = Some(std::mem::replace(buffer, tombstone_buffer));
834                self.clamp_loop_boundaries();
835            }
836            return;
837        };
838
839        log::warn!("AudioBufferSourceRenderer: Dropping incoming message {msg:?}");
840    }
841
842    fn before_drop(&mut self, scope: &AudioWorkletGlobalScope) {
843        if !self.render_state.ended && scope.current_time >= self.start_time {
844            scope.send_ended_event();
845            self.render_state.ended = true;
846        }
847    }
848}
849
850#[cfg(test)]
851mod tests {
852    use float_eq::assert_float_eq;
853    use std::f32::consts::PI;
854    use std::sync::atomic::{AtomicBool, Ordering};
855    use std::sync::{Arc, Mutex};
856
857    use crate::context::{BaseAudioContext, OfflineAudioContext};
858    use crate::AudioBufferOptions;
859    use crate::RENDER_QUANTUM_SIZE;
860
861    use super::*;
862
863    #[test]
864    fn test_construct_with_options_and_run() {
865        let sample_rate = 44100.;
866        let length = RENDER_QUANTUM_SIZE;
867        let mut context = OfflineAudioContext::new(1, length, sample_rate);
868
869        let buffer = AudioBuffer::from(vec![vec![1.; RENDER_QUANTUM_SIZE]], sample_rate);
870        let options = AudioBufferSourceOptions {
871            buffer: Some(buffer),
872            ..Default::default()
873        };
874        let mut src = AudioBufferSourceNode::new(&context, options);
875        src.connect(&context.destination());
876        src.start();
877        let res = context.start_rendering_sync();
878
879        assert_float_eq!(
880            res.channel_data(0).as_slice()[..],
881            &[1.; RENDER_QUANTUM_SIZE][..],
882            abs_all <= 0.
883        );
884    }
885
886    #[test]
887    fn test_playing_some_file() {
888        let context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, 44_100.);
889
890        let file = std::fs::File::open("samples/sample.wav").unwrap();
891        let expected = context.decode_audio_data_sync(file).unwrap();
892
893        // 44100 will go through fast track
894        // 48000 will go through slow track
895        [44100, 48000].iter().for_each(|sr| {
896            let decoding_context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, *sr as f32);
897
898            let mut filename = "samples/sample-".to_owned();
899            filename.push_str(&sr.to_string());
900            filename.push_str(".wav");
901
902            let file = std::fs::File::open("samples/sample.wav").unwrap();
903            let audio_buffer = decoding_context.decode_audio_data_sync(file).unwrap();
904
905            assert_eq!(audio_buffer.sample_rate(), *sr as f32);
906
907            let mut context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, 44_100.);
908
909            let mut src = context.create_buffer_source();
910            src.set_buffer(audio_buffer);
911            src.connect(&context.destination());
912            src.start_at(context.current_time());
913            src.stop_at(context.current_time() + 128.);
914
915            let res = context.start_rendering_sync();
916            let diff_abs = if *sr == 44100 {
917                0. // fast track
918            } else {
919                5e-3 // slow track w/ linear interpolation
920            };
921
922            // asserting length() is meaningless as this is controlled by the context
923            assert_eq!(res.number_of_channels(), expected.number_of_channels());
924
925            // check first 128 samples in left and right channels
926            assert_float_eq!(
927                res.channel_data(0).as_slice()[..],
928                expected.get_channel_data(0)[0..128],
929                abs_all <= diff_abs
930            );
931
932            assert_float_eq!(
933                res.channel_data(1).as_slice()[..],
934                expected.get_channel_data(1)[0..128],
935                abs_all <= diff_abs
936            );
937        });
938    }
939
940    // slow track
941    #[test]
942    fn test_sub_quantum_start_1() {
943        let sample_rate = 48_000.;
944        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
945
946        let mut dirac = context.create_buffer(1, 1, sample_rate);
947        dirac.copy_to_channel(&[1.], 0);
948
949        let mut src = context.create_buffer_source();
950        src.connect(&context.destination());
951        src.set_buffer(dirac);
952        src.start_at(1. / sample_rate as f64);
953
954        let result = context.start_rendering_sync();
955        let channel = result.get_channel_data(0);
956
957        let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
958        expected[1] = 1.;
959
960        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
961    }
962
963    // adapted from the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html
964    #[test]
965    fn test_sub_quantum_start_2() {
966        let sample_rate = 44_100.;
967        let length_in_seconds = 4.;
968        let mut context =
969            OfflineAudioContext::new(2, (length_in_seconds * sample_rate) as usize, sample_rate);
970
971        let mut dirac = context.create_buffer(2, 512, sample_rate);
972        dirac.copy_to_channel(&[1.], 0);
973        dirac.copy_to_channel(&[1.], 1);
974
975        let sample_offsets = [0, 3, 512, 517, 1000, 1005, 20000, 21234, 37590];
976
977        sample_offsets.iter().for_each(|index| {
978            let time_in_seconds = *index as f64 / sample_rate as f64;
979
980            let mut src = context.create_buffer_source();
981            src.set_buffer(dirac.clone());
982            src.connect(&context.destination());
983            src.start_at(time_in_seconds);
984        });
985
986        let res = context.start_rendering_sync();
987
988        let channel_left = res.get_channel_data(0);
989        let channel_right = res.get_channel_data(1);
990        // assert lef and right channels are equal
991        assert_float_eq!(channel_left[..], channel_right[..], abs_all <= 0.);
992        // assert we got our dirac at each defined offsets
993
994        sample_offsets.iter().for_each(|index| {
995            assert_ne!(
996                channel_left[*index], 0.,
997                "non zero sample at index {:?}",
998                index
999            );
1000        });
1001    }
1002
1003    #[test]
1004    fn test_sub_sample_start() {
1005        // sub sample
1006        let sample_rate = 48_000.;
1007        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1008
1009        let mut dirac = context.create_buffer(1, 1, sample_rate);
1010        dirac.copy_to_channel(&[1.], 0);
1011
1012        let mut src = context.create_buffer_source();
1013        src.connect(&context.destination());
1014        src.set_buffer(dirac);
1015        src.start_at(1.5 / sample_rate as f64);
1016
1017        let result = context.start_rendering_sync();
1018        let channel = result.get_channel_data(0);
1019
1020        let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
1021        expected[2] = 0.5;
1022
1023        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1024    }
1025
1026    #[test]
1027    fn test_sub_quantum_stop_fast_track() {
1028        let sample_rate = 48_000.;
1029        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1030
1031        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1032        dirac.copy_to_channel(&[0., 0., 0., 0., 1.], 0);
1033
1034        let mut src = context.create_buffer_source();
1035        src.connect(&context.destination());
1036        src.set_buffer(dirac);
1037        src.start_at(0. / sample_rate as f64);
1038        // stop at time of dirac, should not be played
1039        src.stop_at(4. / sample_rate as f64);
1040
1041        let result = context.start_rendering_sync();
1042        let channel = result.get_channel_data(0);
1043        let expected = vec![0.; RENDER_QUANTUM_SIZE];
1044
1045        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1046    }
1047
1048    #[test]
1049    fn test_sub_quantum_stop_slow_track() {
1050        let sample_rate = 48_000.;
1051        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1052
1053        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1054        dirac.copy_to_channel(&[0., 0., 0., 1.], 0);
1055
1056        let mut src = context.create_buffer_source();
1057        src.connect(&context.destination());
1058        src.set_buffer(dirac);
1059
1060        src.start_at(1. / sample_rate as f64);
1061        src.stop_at(4. / sample_rate as f64);
1062
1063        let result = context.start_rendering_sync();
1064        let channel = result.get_channel_data(0);
1065        let expected = vec![0.; RENDER_QUANTUM_SIZE];
1066
1067        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1068    }
1069
1070    #[test]
1071    fn test_sub_sample_stop_fast_track() {
1072        let sample_rate = 48_000.;
1073        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1074
1075        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1076        dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1077
1078        let mut src = context.create_buffer_source();
1079        src.connect(&context.destination());
1080        src.set_buffer(dirac);
1081        src.start_at(0. / sample_rate as f64);
1082        // stop at between two diracs, only first one should be played
1083        src.stop_at(4.5 / sample_rate as f64);
1084
1085        let result = context.start_rendering_sync();
1086        let channel = result.get_channel_data(0);
1087
1088        let mut expected = vec![0.; 128];
1089        expected[4] = 1.;
1090
1091        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1092    }
1093
1094    #[test]
1095    fn test_sub_sample_stop_slow_track() {
1096        let sample_rate = 48_000.;
1097        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1098
1099        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1100        dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1101
1102        let mut src = context.create_buffer_source();
1103        src.connect(&context.destination());
1104        src.set_buffer(dirac);
1105        src.start_at(1. / sample_rate as f64);
1106        // stop at between two diracs, only first one should be played
1107        src.stop_at(5.5 / sample_rate as f64);
1108
1109        let result = context.start_rendering_sync();
1110        let channel = result.get_channel_data(0);
1111
1112        let mut expected = vec![0.; 128];
1113        expected[5] = 1.;
1114
1115        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1116    }
1117
1118    #[test]
1119    fn test_start_in_the_past() {
1120        let sample_rate = 48_000.;
1121        let mut context = OfflineAudioContext::new(1, 2 * RENDER_QUANTUM_SIZE, sample_rate);
1122
1123        let mut dirac = context.create_buffer(1, 1, sample_rate);
1124        dirac.copy_to_channel(&[1.], 0);
1125
1126        context.suspend_sync((128. / sample_rate).into(), |context| {
1127            let mut src = context.create_buffer_source();
1128            src.connect(&context.destination());
1129            src.set_buffer(dirac);
1130            src.start_at(0.);
1131        });
1132
1133        let result = context.start_rendering_sync();
1134        let channel = result.get_channel_data(0);
1135
1136        let mut expected = vec![0.; 2 * RENDER_QUANTUM_SIZE];
1137        expected[128] = 1.;
1138
1139        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1140    }
1141
1142    #[test]
1143    fn test_audio_buffer_resampling() {
1144        [22_500, 38_000, 43_800, 48_000, 96_000]
1145            .iter()
1146            .for_each(|sr| {
1147                let freq = 1.;
1148                let base_sr = 44_100;
1149                let mut context = OfflineAudioContext::new(1, base_sr, base_sr as f32);
1150
1151                // 1Hz sine at different sample rates
1152                let buf_sr = *sr;
1153                // safe cast for sample rate, see discussion at #113
1154                let sample_rate = buf_sr as f32;
1155                let mut buffer = context.create_buffer(1, buf_sr, sample_rate);
1156                let mut sine = vec![];
1157
1158                for i in 0..buf_sr {
1159                    let phase = freq * i as f32 / buf_sr as f32 * 2. * PI;
1160                    let sample = phase.sin();
1161                    sine.push(sample);
1162                }
1163
1164                buffer.copy_to_channel(&sine[..], 0);
1165
1166                let mut src = context.create_buffer_source();
1167                src.connect(&context.destination());
1168                src.set_buffer(buffer);
1169                src.start_at(0. / sample_rate as f64);
1170
1171                let result = context.start_rendering_sync();
1172                let channel = result.get_channel_data(0);
1173
1174                // 1Hz sine at audio context sample rate
1175                let mut expected = vec![];
1176
1177                for i in 0..base_sr {
1178                    let phase = freq * i as f32 / base_sr as f32 * 2. * PI;
1179                    let sample = phase.sin();
1180                    expected.push(sample);
1181                }
1182
1183                assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1184            });
1185    }
1186
1187    #[test]
1188    fn test_playback_rate() {
1189        let sample_rate = 44_100;
1190        let mut context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
1191
1192        let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
1193        let mut sine = vec![];
1194
1195        // 1 Hz sine
1196        for i in 0..sample_rate {
1197            let phase = i as f32 / sample_rate as f32 * 2. * PI;
1198            let sample = phase.sin();
1199            sine.push(sample);
1200        }
1201
1202        buffer.copy_to_channel(&sine[..], 0);
1203
1204        let mut src = context.create_buffer_source();
1205        src.connect(&context.destination());
1206        src.set_buffer(buffer);
1207        src.playback_rate.set_value(0.5);
1208        src.start();
1209
1210        let result = context.start_rendering_sync();
1211        let channel = result.get_channel_data(0);
1212
1213        // 0.5 Hz sine
1214        let mut expected = vec![];
1215
1216        for i in 0..sample_rate {
1217            let phase = i as f32 / sample_rate as f32 * PI;
1218            let sample = phase.sin();
1219            expected.push(sample);
1220        }
1221
1222        assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1223    }
1224
1225    #[test]
1226    fn test_negative_playback_rate() {
1227        let sample_rate = 44_100;
1228        let mut context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
1229
1230        let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
1231        let mut sine = vec![];
1232
1233        // 1 Hz sine
1234        for i in 0..sample_rate {
1235            let phase = i as f32 / sample_rate as f32 * 2. * PI;
1236            let sample = phase.sin();
1237            sine.push(sample);
1238        }
1239
1240        buffer.copy_to_channel(&sine[..], 0);
1241
1242        let mut src = context.create_buffer_source();
1243        src.connect(&context.destination());
1244        src.set_buffer(buffer.clone());
1245        src.playback_rate.set_value(-1.);
1246        src.start_at_with_offset(context.current_time(), buffer.duration());
1247
1248        let result = context.start_rendering_sync();
1249        let channel = result.get_channel_data(0);
1250
1251        // -1 Hz sine
1252        let mut expected: Vec<f32> = sine.into_iter().rev().collect();
1253        // offset is at duration (after last sample), then result will start
1254        // with a zero value
1255        expected.pop();
1256        expected.insert(0, 0.);
1257
1258        assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1259    }
1260
1261    #[test]
1262    fn test_detune() {
1263        let sample_rate = 44_100;
1264        let mut context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
1265
1266        let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
1267        let mut sine = vec![];
1268
1269        // 1 Hz sine
1270        for i in 0..sample_rate {
1271            let phase = i as f32 / sample_rate as f32 * 2. * PI;
1272            let sample = phase.sin();
1273            sine.push(sample);
1274        }
1275
1276        buffer.copy_to_channel(&sine[..], 0);
1277
1278        let mut src = context.create_buffer_source();
1279        src.connect(&context.destination());
1280        src.set_buffer(buffer);
1281        src.detune.set_value(-1200.);
1282        src.start();
1283
1284        let result = context.start_rendering_sync();
1285        let channel = result.get_channel_data(0);
1286
1287        // 0.5 Hz sine
1288        let mut expected = vec![];
1289
1290        for i in 0..sample_rate {
1291            let phase = i as f32 / sample_rate as f32 * PI;
1292            let sample = phase.sin();
1293            expected.push(sample);
1294        }
1295
1296        assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1297    }
1298
1299    #[test]
1300    fn test_end_of_file_fast_track() {
1301        let sample_rate = 48_000.;
1302        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE * 2, sample_rate);
1303
1304        let mut buffer = context.create_buffer(1, 129, sample_rate);
1305        let mut data = vec![0.; 129];
1306        data[0] = 1.;
1307        data[128] = 1.;
1308        buffer.copy_to_channel(&data, 0);
1309
1310        let mut src = context.create_buffer_source();
1311        src.connect(&context.destination());
1312        src.set_buffer(buffer);
1313        src.start_at(0. / sample_rate as f64);
1314
1315        let result = context.start_rendering_sync();
1316        let channel = result.get_channel_data(0);
1317
1318        let mut expected = vec![0.; 256];
1319        expected[0] = 1.;
1320        expected[128] = 1.;
1321
1322        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1323    }
1324
1325    #[test]
1326    fn test_end_of_file_slow_track_1() {
1327        let sample_rate = 48_000.;
1328        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE * 2, sample_rate);
1329
1330        let mut buffer = context.create_buffer(1, 129, sample_rate);
1331        let mut data = vec![0.; 129];
1332        data[0] = 1.;
1333        data[128] = 1.;
1334        buffer.copy_to_channel(&data, 0);
1335
1336        let mut src = context.create_buffer_source();
1337        src.connect(&context.destination());
1338        src.set_buffer(buffer);
1339        src.start_at(1. / sample_rate as f64);
1340
1341        let result = context.start_rendering_sync();
1342        let channel = result.get_channel_data(0);
1343
1344        let mut expected = vec![0.; 256];
1345        expected[1] = 1.;
1346        expected[129] = 1.;
1347
1348        assert_float_eq!(channel[..], expected[..], abs_all <= 1e-10);
1349    }
1350
1351    #[test]
1352    fn test_with_duration_0() {
1353        let sample_rate = 48_000.;
1354        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1355
1356        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1357        dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1358
1359        let mut src = context.create_buffer_source();
1360        src.connect(&context.destination());
1361        src.set_buffer(dirac);
1362        // duration is between two diracs, only first one should be played
1363        src.start_at_with_offset_and_duration(0., 0., 4.5 / sample_rate as f64);
1364
1365        let result = context.start_rendering_sync();
1366        let channel = result.get_channel_data(0);
1367
1368        let mut expected = vec![0.; 128];
1369        expected[4] = 1.;
1370
1371        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1372    }
1373
1374    #[test]
1375    fn test_with_duration_1() {
1376        let sample_rate = 48_000.;
1377        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1378
1379        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1380        dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1381
1382        let mut src = context.create_buffer_source();
1383        src.connect(&context.destination());
1384        src.set_buffer(dirac);
1385        // duration is between two diracs, only first one should be played
1386        // as we force slow track with start == 1. / sample_rate as f64
1387        // the expected dirac will be at index 5 instead of 4
1388        src.start_at_with_offset_and_duration(
1389            1. / sample_rate as f64,
1390            0. / sample_rate as f64,
1391            4.5 / sample_rate as f64,
1392        );
1393
1394        let result = context.start_rendering_sync();
1395        let channel = result.get_channel_data(0);
1396
1397        let mut expected = vec![0.; 128];
1398        expected[5] = 1.;
1399
1400        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1401    }
1402
1403    #[test]
1404    // port from wpt - sub-sample-scheduling.html / sub-sample-grain
1405    fn test_with_duration_2() {
1406        let sample_rate = 32_768.;
1407        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1408
1409        let mut buffer = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1410        buffer.copy_to_channel(&[1.; RENDER_QUANTUM_SIZE], 0);
1411
1412        let start_grain_index = 3.1;
1413        let end_grain_index = 37.2;
1414
1415        let mut src = context.create_buffer_source();
1416        src.connect(&context.destination());
1417        src.set_buffer(buffer);
1418
1419        src.start_at_with_offset_and_duration(
1420            start_grain_index / sample_rate as f64,
1421            0.,
1422            (end_grain_index - start_grain_index) / sample_rate as f64,
1423        );
1424
1425        let result = context.start_rendering_sync();
1426        let channel = result.get_channel_data(0);
1427
1428        let mut expected = [1.; RENDER_QUANTUM_SIZE];
1429        for s in expected
1430            .iter_mut()
1431            .take(start_grain_index.floor() as usize + 1)
1432        {
1433            *s = 0.;
1434        }
1435        for s in expected
1436            .iter_mut()
1437            .take(RENDER_QUANTUM_SIZE)
1438            .skip(end_grain_index.ceil() as usize)
1439        {
1440            *s = 0.;
1441        }
1442
1443        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1444    }
1445
1446    #[test]
1447    fn test_with_offset() {
1448        // offset always bypass slow track
1449        let sample_rate = 48_000.;
1450        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1451
1452        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1453        dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1454
1455        let mut src = context.create_buffer_source();
1456        src.connect(&context.destination());
1457        src.set_buffer(dirac);
1458        // duration is between two diracs, only first one should be played
1459        // as we force slow track with start == 1. / sample_rate as f64
1460        // the expected dirac will be at index 5 instead of 4
1461        src.start_at_with_offset_and_duration(
1462            0. / sample_rate as f64,
1463            1. / sample_rate as f64,
1464            3.5 / sample_rate as f64,
1465        );
1466
1467        let result = context.start_rendering_sync();
1468        let channel = result.get_channel_data(0);
1469
1470        let mut expected = vec![0.; 128];
1471        expected[3] = 1.;
1472
1473        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1474    }
1475
1476    #[test]
1477    fn test_offset_larger_than_buffer_duration() {
1478        let sample_rate = 48_000.;
1479        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1480        let mut buffer = context.create_buffer(1, 13, sample_rate);
1481        buffer.copy_to_channel(&[1.; 13], 0);
1482
1483        let mut src = context.create_buffer_source();
1484        src.set_buffer(buffer);
1485        src.start_at_with_offset(0., 64. / sample_rate as f64); // offset larger than buffer size
1486
1487        let result = context.start_rendering_sync();
1488        let channel = result.get_channel_data(0);
1489
1490        let expected = [0.; RENDER_QUANTUM_SIZE];
1491        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1492    }
1493
1494    #[test]
1495    fn test_fast_track_loop_mono() {
1496        let sample_rate = 48_000.;
1497        let len = RENDER_QUANTUM_SIZE * 4;
1498
1499        for buffer_len in [
1500            RENDER_QUANTUM_SIZE / 2 - 1,
1501            RENDER_QUANTUM_SIZE / 2,
1502            RENDER_QUANTUM_SIZE / 2 + 1,
1503            RENDER_QUANTUM_SIZE - 1,
1504            RENDER_QUANTUM_SIZE,
1505            RENDER_QUANTUM_SIZE + 1,
1506            RENDER_QUANTUM_SIZE * 2 - 1,
1507            RENDER_QUANTUM_SIZE * 2,
1508            RENDER_QUANTUM_SIZE * 2 + 1,
1509        ] {
1510            let mut context = OfflineAudioContext::new(1, len, sample_rate);
1511
1512            let mut dirac = context.create_buffer(1, buffer_len, sample_rate);
1513            dirac.copy_to_channel(&[1.], 0);
1514
1515            let mut src = context.create_buffer_source();
1516            src.connect(&context.destination());
1517            src.set_loop(true);
1518            src.set_buffer(dirac);
1519            src.start();
1520
1521            let result = context.start_rendering_sync();
1522            let channel = result.get_channel_data(0);
1523
1524            let mut expected = vec![0.; len];
1525            for i in (0..len).step_by(buffer_len) {
1526                expected[i] = 1.;
1527            }
1528
1529            assert_float_eq!(channel[..], expected[..], abs_all <= 1e-10);
1530        }
1531    }
1532
1533    #[test]
1534    fn test_slow_track_loop_mono() {
1535        let sample_rate = 48_000.;
1536        let len = RENDER_QUANTUM_SIZE * 4;
1537
1538        for buffer_len in [
1539            RENDER_QUANTUM_SIZE / 2 - 1,
1540            RENDER_QUANTUM_SIZE / 2,
1541            RENDER_QUANTUM_SIZE / 2 + 1,
1542            RENDER_QUANTUM_SIZE - 1,
1543            RENDER_QUANTUM_SIZE,
1544            RENDER_QUANTUM_SIZE + 1,
1545            RENDER_QUANTUM_SIZE * 2 - 1,
1546            RENDER_QUANTUM_SIZE * 2,
1547            RENDER_QUANTUM_SIZE * 2 + 1,
1548        ] {
1549            let mut context = OfflineAudioContext::new(1, len, sample_rate);
1550
1551            let mut dirac = context.create_buffer(1, buffer_len, sample_rate);
1552            dirac.copy_to_channel(&[1.], 0);
1553
1554            let mut src = context.create_buffer_source();
1555            src.connect(&context.destination());
1556            src.set_loop(true);
1557            src.set_buffer(dirac);
1558            src.start_at(1. / sample_rate as f64);
1559
1560            let result = context.start_rendering_sync();
1561            let channel = result.get_channel_data(0);
1562
1563            let mut expected = vec![0.; len];
1564            for i in (1..len).step_by(buffer_len) {
1565                expected[i] = 1.;
1566            }
1567
1568            assert_float_eq!(channel[..], expected[..], abs_all <= 1e-9);
1569        }
1570    }
1571
1572    #[test]
1573    fn test_fast_track_loop_stereo() {
1574        let sample_rate = 48_000.;
1575        let len = RENDER_QUANTUM_SIZE * 4;
1576
1577        for buffer_len in [
1578            RENDER_QUANTUM_SIZE / 2 - 1,
1579            RENDER_QUANTUM_SIZE / 2,
1580            RENDER_QUANTUM_SIZE / 2 + 1,
1581            RENDER_QUANTUM_SIZE - 1,
1582            RENDER_QUANTUM_SIZE,
1583            RENDER_QUANTUM_SIZE + 1,
1584            RENDER_QUANTUM_SIZE * 2 - 1,
1585            RENDER_QUANTUM_SIZE * 2,
1586            RENDER_QUANTUM_SIZE * 2 + 1,
1587        ] {
1588            let mut context = OfflineAudioContext::new(2, len, sample_rate);
1589            let mut dirac = context.create_buffer(2, buffer_len, sample_rate);
1590            dirac.copy_to_channel(&[1.], 0);
1591            dirac.copy_to_channel(&[0., 1.], 1);
1592
1593            let mut src = context.create_buffer_source();
1594            src.connect(&context.destination());
1595            src.set_loop(true);
1596            src.set_buffer(dirac);
1597            src.start();
1598
1599            let result = context.start_rendering_sync();
1600
1601            let mut expected_left: Vec<f32> = vec![0.; len];
1602            let mut expected_right = vec![0.; len];
1603            for i in (0..len).step_by(buffer_len) {
1604                expected_left[i] = 1.;
1605
1606                if i < expected_right.len() - 1 {
1607                    expected_right[i + 1] = 1.;
1608                }
1609            }
1610
1611            assert_float_eq!(
1612                result.get_channel_data(0)[..],
1613                expected_left[..],
1614                abs_all <= 1e-10
1615            );
1616            assert_float_eq!(
1617                result.get_channel_data(1)[..],
1618                expected_right[..],
1619                abs_all <= 1e-10
1620            );
1621        }
1622    }
1623
1624    #[test]
1625    fn test_slow_track_loop_stereo() {
1626        let sample_rate = 48_000.;
1627        let len = RENDER_QUANTUM_SIZE * 4;
1628
1629        for buffer_len in [
1630            RENDER_QUANTUM_SIZE / 2 - 1,
1631            RENDER_QUANTUM_SIZE / 2,
1632            RENDER_QUANTUM_SIZE / 2 + 1,
1633            RENDER_QUANTUM_SIZE - 1,
1634            RENDER_QUANTUM_SIZE,
1635            RENDER_QUANTUM_SIZE + 1,
1636            RENDER_QUANTUM_SIZE * 2 - 1,
1637            RENDER_QUANTUM_SIZE * 2,
1638            RENDER_QUANTUM_SIZE * 2 + 1,
1639        ] {
1640            let mut context = OfflineAudioContext::new(2, len, sample_rate);
1641            let mut dirac = context.create_buffer(2, buffer_len, sample_rate);
1642            dirac.copy_to_channel(&[1.], 0);
1643            dirac.copy_to_channel(&[0., 1.], 1);
1644
1645            let mut src = context.create_buffer_source();
1646            src.connect(&context.destination());
1647            src.set_loop(true);
1648            src.set_buffer(dirac);
1649            src.start_at(1. / sample_rate as f64);
1650
1651            let result = context.start_rendering_sync();
1652
1653            let mut expected_left: Vec<f32> = vec![0.; len];
1654            let mut expected_right = vec![0.; len];
1655            for i in (1..len).step_by(buffer_len) {
1656                expected_left[i] = 1.;
1657
1658                if i < expected_right.len() - 1 {
1659                    expected_right[i + 1] = 1.;
1660                }
1661            }
1662
1663            assert_float_eq!(
1664                result.get_channel_data(0)[..],
1665                expected_left[..],
1666                abs_all <= 1e-9
1667            );
1668            assert_float_eq!(
1669                result.get_channel_data(1)[..],
1670                expected_right[..],
1671                abs_all <= 1e-9
1672            );
1673        }
1674    }
1675
1676    #[test]
1677    fn test_loop_out_of_bounds() {
1678        [
1679            // these will go in fast track
1680            (-2., -1., 0.),
1681            (-1., -2., 0.),
1682            (0., 0., 0.),
1683            (-1., 2., 0.),
1684            // these will go in slow track
1685            (2., -1., 1e-10),
1686            (1., 1., 1e-10),
1687            (2., 3., 1e-10),
1688            (3., 2., 1e-10),
1689        ]
1690        .iter()
1691        .for_each(|(loop_start, loop_end, error)| {
1692            let sample_rate = 48_000.;
1693            let length = sample_rate as usize / 10;
1694            let mut context = OfflineAudioContext::new(1, length, sample_rate);
1695
1696            let buffer_size = 500;
1697            let mut buffer = context.create_buffer(1, buffer_size, sample_rate);
1698            let data = vec![1.; 1];
1699            buffer.copy_to_channel(&data, 0);
1700
1701            let mut src = context.create_buffer_source();
1702            src.connect(&context.destination());
1703            src.set_buffer(buffer);
1704
1705            src.set_loop(true);
1706            src.set_loop_start(*loop_start); // outside of buffer duration
1707            src.set_loop_end(*loop_end); // outside of buffer duration
1708            src.start();
1709
1710            let result = context.start_rendering_sync(); // should terminate
1711            let channel = result.get_channel_data(0);
1712
1713            // Both loop points will be clamped to buffer duration due to rules defined at
1714            // https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopstart
1715            // https://webaudio.github.io/web-audio-api/#dom-audiobuffersourcenode-loopend
1716            // Thus it violates the rule defined in
1717            // https://webaudio.github.io/web-audio-api/#playback-AudioBufferSourceNode
1718            // `loopStart >= 0 && loopEnd > 0 && loopStart < loopEnd`
1719            // Hence the whole buffer should be looped
1720
1721            let mut expected = vec![0.; length];
1722            for i in (0..length).step_by(buffer_size) {
1723                expected[i] = 1.;
1724            }
1725
1726            assert_float_eq!(channel[..], expected[..], abs_all <= error);
1727        });
1728    }
1729
1730    #[test]
1731    // regression test for #452
1732    // - duration not set so `self.duration` is `f64::MAX`
1733    // - stop time is > buffer length
1734    fn test_end_of_file_fast_track_2() {
1735        let sample_rate = 48_000.;
1736        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1737
1738        let mut buffer = context.create_buffer(1, 5, sample_rate);
1739        let data = vec![1.; 1];
1740        buffer.copy_to_channel(&data, 0);
1741
1742        let mut src = context.create_buffer_source();
1743        src.connect(&context.destination());
1744        src.set_buffer(buffer);
1745        // play in fast track
1746        src.start_at(0.);
1747        // stop after end of buffer but before the end of render quantum
1748        src.stop_at(125. / sample_rate as f64);
1749
1750        let result = context.start_rendering_sync();
1751        let channel = result.get_channel_data(0);
1752
1753        let mut expected = vec![0.; 128];
1754        expected[0] = 1.;
1755
1756        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1757    }
1758
1759    #[test]
1760    // regression test for #452
1761    // - duration not set so `self.duration` is `f64::MAX`
1762    // - stop time is > buffer length
1763    fn test_end_of_file_slow_track_2() {
1764        let sample_rate = 48_000.;
1765        let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1766
1767        let mut buffer = context.create_buffer(1, 5, sample_rate);
1768        let data = vec![1.; 1];
1769        buffer.copy_to_channel(&data, 0);
1770
1771        let mut src = context.create_buffer_source();
1772        src.connect(&context.destination());
1773        src.set_buffer(buffer);
1774        // play in fast track
1775        src.start_at(1. / sample_rate as f64);
1776        // stop after end of buffer but before the end of render quantum
1777        src.stop_at(125. / sample_rate as f64);
1778
1779        let result = context.start_rendering_sync();
1780        let channel = result.get_channel_data(0);
1781
1782        let mut expected = vec![0.; 128];
1783        expected[1] = 1.;
1784
1785        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1786    }
1787
1788    #[test]
1789    fn test_loop_no_restart_suspend() {
1790        let sample_rate = 48_000.;
1791        let result_size = RENDER_QUANTUM_SIZE * 2;
1792        let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1793
1794        let mut buffer = context.create_buffer(1, 1, sample_rate);
1795        let data = vec![1.; 1];
1796        buffer.copy_to_channel(&data, 0);
1797
1798        let mut src = context.create_buffer_source();
1799        src.connect(&context.destination());
1800        src.set_buffer(buffer);
1801        src.start_at(0.);
1802
1803        context.suspend_sync(RENDER_QUANTUM_SIZE as f64 / sample_rate as f64, move |_| {
1804            src.set_loop(true);
1805        });
1806
1807        let result = context.start_rendering_sync();
1808        let channel = result.get_channel_data(0);
1809
1810        let mut expected = vec![0.; result_size];
1811        expected[0] = 1.;
1812
1813        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1814    }
1815
1816    #[test]
1817    fn test_loop_no_restart_onended_fast_track() {
1818        let sample_rate = 48_000.;
1819        // ended event is send on second render quantum, let's take a few more to be sure
1820        let result_size = RENDER_QUANTUM_SIZE * 4;
1821        let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1822
1823        let mut buffer = context.create_buffer(1, 1, sample_rate);
1824        let data = vec![1.; 1];
1825        buffer.copy_to_channel(&data, 0);
1826
1827        let mut src = context.create_buffer_source();
1828        src.connect(&context.destination());
1829        src.set_buffer(buffer);
1830        // play in fast track
1831        src.start_at(0.);
1832
1833        let src = Arc::new(Mutex::new(src));
1834        let clone = Arc::clone(&src);
1835        src.lock().unwrap().set_onended(move |_| {
1836            clone.lock().unwrap().set_loop(true);
1837        });
1838
1839        let result = context.start_rendering_sync();
1840        let channel = result.get_channel_data(0);
1841
1842        let mut expected = vec![0.; result_size];
1843        expected[0] = 1.;
1844
1845        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1846    }
1847
1848    #[test]
1849    fn test_loop_no_restart_onended_slow_track() {
1850        let sample_rate = 48_000.;
1851        // ended event is send on second render quantum, let's take a few more to be sure
1852        let result_size = RENDER_QUANTUM_SIZE * 4;
1853        let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1854
1855        let mut buffer = context.create_buffer(1, 1, sample_rate);
1856        let data = vec![1.; 1];
1857        buffer.copy_to_channel(&data, 0);
1858
1859        let mut src = context.create_buffer_source();
1860        src.connect(&context.destination());
1861        src.set_buffer(buffer);
1862        // play in slow track
1863        src.start_at(1. / sample_rate as f64);
1864
1865        let src = Arc::new(Mutex::new(src));
1866        let clone = Arc::clone(&src);
1867        src.lock().unwrap().set_onended(move |_| {
1868            clone.lock().unwrap().set_loop(true);
1869        });
1870
1871        let result = context.start_rendering_sync();
1872        let channel = result.get_channel_data(0);
1873
1874        let mut expected = vec![0.; result_size];
1875        expected[1] = 1.;
1876
1877        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1878    }
1879
1880    #[test]
1881    // Ported from wpt: the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html
1882    // Note that in wpt, results are tested against an oscillator node, which fails
1883    // in the (44_100., 43_800., 3.8986e-3) condition for some (yet) unknown reason
1884    fn test_subsample_buffer_stitching() {
1885        [(44_100., 44_100., 9.0957e-5), (44_100., 43_800., 3.8986e-3)]
1886            .iter()
1887            .for_each(|(sample_rate, buffer_rate, error_threshold)| {
1888                let sample_rate = *sample_rate;
1889                let buffer_rate = *buffer_rate;
1890                let buffer_length = 30;
1891                let frequency = 440.;
1892
1893                // let length = sample_rate as usize;
1894                let length = buffer_length * 15;
1895                let mut context = OfflineAudioContext::new(2, length, sample_rate);
1896
1897                let mut wave_signal = vec![0.; context.length()];
1898                let omega = 2. * PI / buffer_rate * frequency;
1899                wave_signal.iter_mut().enumerate().for_each(|(i, s)| {
1900                    *s = (omega * i as f32).sin();
1901                });
1902
1903                // Slice the sine wave into many little buffers to be assigned to ABSNs
1904                // that are started at the appropriate times to produce a final sine
1905                // wave.
1906                for k in (0..context.length()).step_by(buffer_length) {
1907                    let mut buffer = AudioBuffer::new(AudioBufferOptions {
1908                        number_of_channels: 1,
1909                        length: buffer_length,
1910                        sample_rate: buffer_rate,
1911                    });
1912                    buffer.copy_to_channel(&wave_signal[k..k + buffer_length], 0);
1913
1914                    let mut src = AudioBufferSourceNode::new(
1915                        &context,
1916                        AudioBufferSourceOptions {
1917                            buffer: Some(buffer),
1918                            ..Default::default()
1919                        },
1920                    );
1921                    src.connect(&context.destination());
1922                    src.start_at(k as f64 / buffer_rate as f64);
1923                }
1924
1925                let mut expected = vec![0.; context.length()];
1926                let omega = 2. * PI / sample_rate * frequency;
1927                expected.iter_mut().enumerate().for_each(|(i, s)| {
1928                    *s = (omega * i as f32).sin();
1929                });
1930
1931                let result = context.start_rendering_sync();
1932                let actual = result.get_channel_data(0);
1933
1934                assert_float_eq!(actual[..], expected[..], abs_all <= error_threshold);
1935            });
1936    }
1937
1938    #[test]
1939    fn test_onended_before_drop() {
1940        let sample_rate = 48_000.;
1941        let result_size = RENDER_QUANTUM_SIZE;
1942        let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1943        // buffer is larger than context output so it never goes into the ended check condition
1944        let mut buffer = context.create_buffer(1, result_size * 2, sample_rate);
1945        let data = vec![1.; 1];
1946        buffer.copy_to_channel(&data, 0);
1947
1948        let mut src = context.create_buffer_source();
1949        src.connect(&context.destination());
1950        src.set_buffer(buffer);
1951        src.start();
1952
1953        let onended_called = Arc::new(AtomicBool::new(false));
1954        let onended_called_clone = Arc::clone(&onended_called);
1955
1956        src.set_onended(move |_| {
1957            onended_called_clone.store(true, Ordering::SeqCst);
1958        });
1959
1960        let result = context.start_rendering_sync();
1961        let channel = result.get_channel_data(0);
1962
1963        let mut expected = vec![0.; result_size];
1964        expected[0] = 1.;
1965
1966        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1967        assert!(onended_called.load(Ordering::SeqCst));
1968    }
1969}