1use std::any::Any;
2use std::sync::atomic::Ordering;
3use std::sync::Arc;
4
5use crate::buffer::AudioBuffer;
6use crate::context::{AudioContextRegistration, AudioParamId, BaseAudioContext};
7use crate::param::{AudioParam, AudioParamDescriptor, AutomationRate};
8use crate::render::{
9 AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
10};
11use crate::{assert_valid_time_value, AtomicF64, RENDER_QUANTUM_SIZE};
12
13use super::{AudioNode, AudioScheduledSourceNode, ChannelConfig};
14
15#[derive(Clone, Debug)]
30pub struct AudioBufferSourceOptions {
31 pub buffer: Option<AudioBuffer>,
32 pub detune: f32,
33 pub loop_: bool,
34 pub loop_start: f64,
35 pub loop_end: f64,
36 pub playback_rate: f32,
37}
38
39impl Default for AudioBufferSourceOptions {
40 fn default() -> Self {
41 Self {
42 buffer: None,
43 detune: 0.,
44 loop_: false,
45 loop_start: 0.,
46 loop_end: 0.,
47 playback_rate: 1.,
48 }
49 }
50}
51
52#[derive(Debug, Copy, Clone)]
53struct PlaybackInfo {
54 prev_frame_index: usize,
55 k: f64,
56}
57
58#[derive(Debug, Clone, Copy)]
59struct LoopState {
60 pub is_looping: bool,
61 pub start: f64,
62 pub end: f64,
63}
64
65#[derive(Debug, Clone)]
67enum ControlMessage {
68 StartWithOffsetAndDuration(f64, f64, f64),
69 Stop(f64),
70 Loop(bool),
71 LoopStart(f64),
72 LoopEnd(f64),
73}
74
75#[derive(Debug)]
108pub struct AudioBufferSourceNode {
109 registration: AudioContextRegistration,
110 channel_config: ChannelConfig,
111 detune: AudioParam, playback_rate: AudioParam, buffer_time: Arc<AtomicF64>,
114 buffer: Option<AudioBuffer>,
115 loop_state: LoopState,
116 has_start: bool,
117}
118
119impl AudioNode for AudioBufferSourceNode {
120 fn registration(&self) -> &AudioContextRegistration {
121 &self.registration
122 }
123
124 fn channel_config(&self) -> &ChannelConfig {
125 &self.channel_config
126 }
127
128 fn number_of_inputs(&self) -> usize {
129 0
130 }
131
132 fn number_of_outputs(&self) -> usize {
133 1
134 }
135}
136
137impl AudioScheduledSourceNode for AudioBufferSourceNode {
138 fn start(&mut self) {
139 let start = self.registration.context().current_time();
140 self.start_at_with_offset_and_duration(start, 0., f64::MAX);
141 }
142
143 fn start_at(&mut self, when: f64) {
144 self.start_at_with_offset_and_duration(when, 0., f64::MAX);
145 }
146
147 fn stop(&mut self) {
148 let stop = self.registration.context().current_time();
149 self.stop_at(stop);
150 }
151
152 fn stop_at(&mut self, when: f64) {
153 assert_valid_time_value(when);
154 assert!(self.has_start, "InvalidStateError cannot stop before start");
155
156 self.registration.post_message(ControlMessage::Stop(when));
157 }
158}
159
160impl AudioBufferSourceNode {
161 pub fn new<C: BaseAudioContext>(context: &C, options: AudioBufferSourceOptions) -> Self {
163 let AudioBufferSourceOptions {
164 buffer,
165 detune,
166 loop_,
167 loop_start,
168 loop_end,
169 playback_rate,
170 } = options;
171
172 let mut node = context.base().register(move |registration| {
173 let detune_param_options = AudioParamDescriptor {
176 name: String::new(),
177 min_value: f32::MIN,
178 max_value: f32::MAX,
179 default_value: 0.,
180 automation_rate: AutomationRate::K,
181 };
182 let (mut d_param, d_proc) =
183 context.create_audio_param(detune_param_options, ®istration);
184 d_param.set_automation_rate_constrained(true);
185 d_param.set_value(detune);
186
187 let playback_rate_param_options = AudioParamDescriptor {
188 name: String::new(),
189 min_value: f32::MIN,
190 max_value: f32::MAX,
191 default_value: 1.,
192 automation_rate: AutomationRate::K,
193 };
194 let (mut pr_param, pr_proc) =
195 context.create_audio_param(playback_rate_param_options, ®istration);
196 pr_param.set_automation_rate_constrained(true);
197 pr_param.set_value(playback_rate);
198
199 let loop_state = LoopState {
200 is_looping: loop_,
201 start: loop_start,
202 end: loop_end,
203 };
204
205 let renderer = AudioBufferSourceRenderer {
206 start_time: f64::MAX,
207 stop_time: f64::MAX,
208 duration: f64::MAX,
209 offset: 0.,
210 buffer: None,
211 detune: d_proc,
212 playback_rate: pr_proc,
213 loop_state,
214 render_state: AudioBufferRendererState::default(),
215 };
216
217 let node = Self {
218 registration,
219 channel_config: ChannelConfig::default(),
220 detune: d_param,
221 playback_rate: pr_param,
222 buffer_time: Arc::clone(&renderer.render_state.buffer_time),
223 buffer: None,
224 loop_state,
225 has_start: false,
226 };
227
228 (node, Box::new(renderer))
229 });
230
231 if let Some(buf) = buffer {
233 node.set_buffer(buf);
234 }
235
236 node
237 }
238
239 pub fn start_at_with_offset(&mut self, start: f64, offset: f64) {
245 self.start_at_with_offset_and_duration(start, offset, f64::MAX);
246 }
247
248 pub fn start_at_with_offset_and_duration(&mut self, start: f64, offset: f64, duration: f64) {
254 assert_valid_time_value(start);
255 assert_valid_time_value(offset);
256 assert_valid_time_value(duration);
257 assert!(
258 !self.has_start,
259 "InvalidStateError - Cannot call `start` twice"
260 );
261
262 self.has_start = true;
263 let control = ControlMessage::StartWithOffsetAndDuration(start, offset, duration);
264 self.registration.post_message(control);
265 }
266
267 pub fn buffer(&self) -> Option<&AudioBuffer> {
269 self.buffer.as_ref()
270 }
271
272 pub fn set_buffer(&mut self, audio_buffer: AudioBuffer) {
279 let clone = audio_buffer.clone();
280
281 assert!(
282 self.buffer.is_none(),
283 "InvalidStateError - cannot assign buffer twice",
284 );
285 self.buffer = Some(audio_buffer);
286
287 self.registration.post_message(clone);
288 }
289
290 pub fn playback_rate(&self) -> &AudioParam {
297 &self.playback_rate
298 }
299
300 pub fn position(&self) -> f64 {
307 self.buffer_time.load(Ordering::Relaxed)
308 }
309
310 pub fn detune(&self) -> &AudioParam {
315 &self.detune
316 }
317
318 #[allow(clippy::missing_panics_doc)]
320 pub fn loop_(&self) -> bool {
321 self.loop_state.is_looping
322 }
323
324 pub fn set_loop(&mut self, value: bool) {
325 self.loop_state.is_looping = value;
326 self.registration.post_message(ControlMessage::Loop(value));
327 }
328
329 pub fn loop_start(&self) -> f64 {
331 self.loop_state.start
332 }
333
334 pub fn set_loop_start(&mut self, value: f64) {
335 self.loop_state.start = value;
336 self.registration
337 .post_message(ControlMessage::LoopStart(value));
338 }
339
340 pub fn loop_end(&self) -> f64 {
342 self.loop_state.end
343 }
344
345 pub fn set_loop_end(&mut self, value: f64) {
346 self.loop_state.end = value;
347 self.registration
348 .post_message(ControlMessage::LoopEnd(value));
349 }
350}
351
352struct AudioBufferRendererState {
353 buffer_time: Arc<AtomicF64>,
354 started: bool,
355 entered_loop: bool,
356 buffer_time_elapsed: f64,
357 is_aligned: bool,
358 ended: bool,
359}
360
361impl Default for AudioBufferRendererState {
362 fn default() -> Self {
363 Self {
364 buffer_time: Arc::new(AtomicF64::new(0.)),
365 started: false,
366 entered_loop: false,
367 buffer_time_elapsed: 0.,
368 is_aligned: false,
369 ended: false,
370 }
371 }
372}
373
374struct AudioBufferSourceRenderer {
375 start_time: f64,
376 stop_time: f64,
377 offset: f64,
378 duration: f64,
379 buffer: Option<AudioBuffer>,
380 detune: AudioParamId,
381 playback_rate: AudioParamId,
382 loop_state: LoopState,
383 render_state: AudioBufferRendererState,
384}
385
386impl AudioBufferSourceRenderer {
387 fn handle_control_message(&mut self, control: &ControlMessage) {
388 match control {
389 ControlMessage::StartWithOffsetAndDuration(when, offset, duration) => {
390 self.start_time = *when;
391 self.offset = *offset;
392 self.duration = *duration;
393 }
394 ControlMessage::Stop(when) => self.stop_time = *when,
395 ControlMessage::Loop(is_looping) => self.loop_state.is_looping = *is_looping,
396 ControlMessage::LoopStart(loop_start) => self.loop_state.start = *loop_start,
397 ControlMessage::LoopEnd(loop_end) => self.loop_state.end = *loop_end,
398 }
399
400 self.clamp_loop_boundaries();
401 }
402
403 fn clamp_loop_boundaries(&mut self) {
404 if let Some(buffer) = &self.buffer {
405 let duration = buffer.duration();
406
407 if self.loop_state.start < 0. {
409 self.loop_state.start = 0.;
410 } else if self.loop_state.start > duration {
411 self.loop_state.start = duration;
412 }
413
414 if self.loop_state.end <= 0. || self.loop_state.end > duration {
416 self.loop_state.end = duration;
417 }
418 }
419 }
420}
421
422impl AudioProcessor for AudioBufferSourceRenderer {
423 fn process(
424 &mut self,
425 _inputs: &[AudioRenderQuantum], outputs: &mut [AudioRenderQuantum],
427 params: AudioParamValues<'_>,
428 scope: &AudioWorkletGlobalScope,
429 ) -> bool {
430 let output = &mut outputs[0];
432
433 if self.render_state.ended {
434 output.make_silent();
435 return false;
436 }
437
438 let sample_rate = scope.sample_rate as f64;
439 let dt = 1. / sample_rate;
440 let block_duration = dt * RENDER_QUANTUM_SIZE as f64;
441 let next_block_time = scope.current_time + block_duration;
442
443 if self.start_time >= next_block_time {
445 output.make_silent();
446
447 if self.stop_time <= next_block_time {
448 self.render_state.ended = true;
449 scope.send_ended_event();
450 return false;
451 }
452
453 return self.start_time != f64::MAX;
456 }
457
458 let buffer = match &self.buffer {
460 None => {
461 output.make_silent();
462 return false;
465 }
466 Some(b) => b,
467 };
468
469 let LoopState {
470 is_looping,
471 start: loop_start,
472 end: loop_end,
473 } = self.loop_state;
474
475 let mut actual_loop_start = 0.;
477 let mut actual_loop_end = 0.;
478
479 let detune = params.get(&self.detune)[0];
482 let playback_rate = params.get(&self.playback_rate)[0];
483 let computed_playback_rate = (playback_rate * (detune / 1200.).exp2()) as f64;
484
485 let buffer_duration = buffer.duration();
486 let buffer_length = buffer.length();
487 let sampling_ratio = buffer.sample_rate() as f64 / sample_rate;
491
492 let mut buffer_time = self.render_state.buffer_time.load(Ordering::Relaxed);
495
496 output.set_number_of_channels(buffer.number_of_channels());
497
498 let block_time = scope.current_time;
501
502 if !self.render_state.started && self.start_time < block_time {
507 self.start_time = block_time;
508 }
509
510 if self.start_time == block_time && self.offset == 0. {
518 self.render_state.is_aligned = true;
519 }
520
521 if sampling_ratio != 1. || computed_playback_rate != 1. {
523 self.render_state.is_aligned = false;
524 }
525
526 if loop_start != 0. || loop_end != buffer_duration {
532 self.render_state.is_aligned = false;
533 }
534
535 if buffer_time + block_duration > self.duration
539 || block_time + block_duration > self.stop_time
540 {
541 self.render_state.is_aligned = false;
542 }
543
544 if self.render_state.is_aligned {
545 if self.start_time == block_time {
549 self.render_state.started = true;
550 }
551
552 if buffer_time + block_duration > buffer_duration {
554 let end_index = buffer.length();
555 let mut loop_point_index: Option<usize> = None;
558
559 buffer
560 .channels()
561 .iter()
562 .zip(output.channels_mut().iter_mut())
563 .for_each(|(buffer_channel, output_channel)| {
564 let buffer_channel = buffer_channel.as_slice();
566 let mut start_index = (buffer_time * sample_rate).round() as usize;
567 let mut offset = 0;
568
569 for (index, o) in output_channel.iter_mut().enumerate() {
570 let mut buffer_index = start_index + index - offset;
571
572 *o = if buffer_index < end_index {
573 buffer_channel[buffer_index]
574 } else {
575 if is_looping && buffer_index >= end_index {
576 loop_point_index = Some(index);
577 start_index = 0;
579 offset = index;
580 buffer_index = 0;
581 }
582
583 if is_looping {
584 buffer_channel[buffer_index]
585 } else {
586 0.
587 }
588 };
589 }
590 });
591
592 if let Some(loop_point_index) = loop_point_index {
593 buffer_time = ((RENDER_QUANTUM_SIZE - loop_point_index) as f64 / sample_rate)
594 % buffer_duration;
595 } else {
596 buffer_time += block_duration;
597 }
598 } else {
599 let start_index = (buffer_time * sample_rate).round() as usize;
600 let end_index = start_index + RENDER_QUANTUM_SIZE;
601 buffer
603 .channels()
604 .iter()
605 .zip(output.channels_mut().iter_mut())
606 .for_each(|(buffer_channel, output_channel)| {
607 let buffer_channel = buffer_channel.as_slice();
608 output_channel.copy_from_slice(&buffer_channel[start_index..end_index]);
609 });
610
611 buffer_time += block_duration;
612 }
613
614 self.render_state.buffer_time_elapsed += block_duration;
615 } else {
616 if is_looping {
620 if loop_start >= 0. && loop_end > 0. && loop_start < loop_end {
621 actual_loop_start = loop_start;
622 actual_loop_end = loop_end;
623 } else {
624 actual_loop_start = 0.;
625 actual_loop_end = buffer_duration;
626 }
627 } else {
628 self.render_state.entered_loop = false;
629 }
630
631 let mut playback_infos = [None; RENDER_QUANTUM_SIZE];
634
635 for (i, playback_info) in playback_infos.iter_mut().enumerate() {
637 let current_time = block_time + i as f64 * dt;
638
639 if !self.render_state.started && almost::equal(current_time, self.start_time) {
642 self.start_time = current_time;
643 }
644
645 if current_time < self.start_time
651 || current_time >= self.stop_time
652 || self.render_state.buffer_time_elapsed >= self.duration
653 {
654 continue; }
656
657 if !self.render_state.started {
659 let delta = current_time - self.start_time;
660 self.offset += delta * computed_playback_rate;
662
663 if is_looping && computed_playback_rate >= 0. && self.offset >= actual_loop_end
664 {
665 self.offset = actual_loop_end;
666 }
667
668 if is_looping && computed_playback_rate < 0. && self.offset < actual_loop_start
669 {
670 self.offset = actual_loop_start;
671 }
672
673 buffer_time = self.offset;
674 self.render_state.buffer_time_elapsed = delta * computed_playback_rate;
675 self.render_state.started = true;
676 }
677
678 if is_looping {
679 if !self.render_state.entered_loop {
680 if self.offset < actual_loop_end && buffer_time >= actual_loop_start {
682 self.render_state.entered_loop = true;
683 }
684
685 if self.offset >= actual_loop_end && buffer_time < actual_loop_end {
687 self.render_state.entered_loop = true;
688 }
689 }
690
691 if self.render_state.entered_loop {
693 while buffer_time >= actual_loop_end {
694 buffer_time -= actual_loop_end - actual_loop_start;
695 }
696
697 while buffer_time < actual_loop_start {
698 buffer_time += actual_loop_end - actual_loop_start;
699 }
700 }
701 }
702
703 if buffer_time >= 0. && buffer_time < buffer_duration {
704 let position = buffer_time * sampling_ratio;
705 let playhead = position * sample_rate;
706 let playhead_floored = playhead.floor();
707 let prev_frame_index = playhead_floored as usize; let k = playhead - playhead_floored;
709
710 if prev_frame_index < buffer_length {
714 *playback_info = Some(PlaybackInfo {
715 prev_frame_index,
716 k,
717 });
718 }
719 }
720
721 let time_incr = dt * computed_playback_rate;
722 buffer_time += time_incr;
723 self.render_state.buffer_time_elapsed += time_incr;
724 }
725
726 buffer
728 .channels()
729 .iter()
730 .zip(output.channels_mut().iter_mut())
731 .for_each(|(buffer_channel, output_channel)| {
732 let buffer_channel = buffer_channel.as_slice();
733
734 playback_infos
735 .iter()
736 .zip(output_channel.iter_mut())
737 .for_each(|(playhead, o)| {
738 *o = match playhead {
739 Some(PlaybackInfo {
740 prev_frame_index,
741 k,
742 }) => {
743 let prev_sample = buffer_channel[*prev_frame_index] as f64;
745 let next_sample = match buffer_channel.get(prev_frame_index + 1)
746 {
747 Some(val) => *val as f64,
748 None => {
750 if is_looping {
751 if playback_rate >= 0. {
752 let start_playhead =
753 actual_loop_start * sample_rate;
754 let start_index = if start_playhead.floor()
755 == start_playhead
756 {
757 start_playhead as usize
758 } else {
759 start_playhead as usize + 1
760 };
761
762 buffer_channel[start_index] as f64
763 } else {
764 let end_playhead =
765 actual_loop_end * sample_rate;
766 let end_index = end_playhead as usize;
767 buffer_channel[end_index] as f64
768 }
769 } else {
770 if almost::equal(*k, 1.) || *prev_frame_index == 0 {
778 0.
779 } else {
780 let prev_prev_sample =
783 buffer_channel[*prev_frame_index - 1];
784 2. * prev_sample - prev_prev_sample as f64
785 }
786 }
787 }
788 };
789
790 (1. - k).mul_add(prev_sample, k * next_sample) as f32
791 }
792 None => 0.,
793 };
794 });
795 });
796 }
797
798 self.render_state
800 .buffer_time
801 .store(buffer_time, Ordering::Relaxed);
802
803 if next_block_time >= self.stop_time
808 || self.render_state.buffer_time_elapsed >= self.duration
809 || !is_looping
810 && (computed_playback_rate > 0. && buffer_time >= buffer_duration
811 || computed_playback_rate < 0. && buffer_time < 0.)
812 {
813 self.render_state.ended = true;
814 scope.send_ended_event();
815 }
816
817 true
818 }
819
820 fn onmessage(&mut self, msg: &mut dyn Any) {
821 if let Some(control) = msg.downcast_ref::<ControlMessage>() {
822 self.handle_control_message(control);
823 return;
824 };
825
826 if let Some(buffer) = msg.downcast_mut::<AudioBuffer>() {
827 if let Some(current_buffer) = &mut self.buffer {
828 std::mem::swap(current_buffer, buffer);
830 } else {
831 let tombstone_buffer = AudioBuffer {
833 channels: Default::default(),
834 sample_rate: Default::default(),
835 };
836 self.buffer = Some(std::mem::replace(buffer, tombstone_buffer));
837 self.clamp_loop_boundaries();
838 }
839 return;
840 };
841
842 log::warn!("AudioBufferSourceRenderer: Dropping incoming message {msg:?}");
843 }
844
845 fn before_drop(&mut self, scope: &AudioWorkletGlobalScope) {
846 if !self.render_state.ended
847 && (scope.current_time >= self.start_time || scope.current_time >= self.stop_time)
848 {
849 scope.send_ended_event();
850 self.render_state.ended = true;
851 }
852 }
853}
854
855#[cfg(test)]
856mod tests {
857 use float_eq::assert_float_eq;
858 use std::f32::consts::PI;
859 use std::sync::atomic::{AtomicBool, Ordering};
860 use std::sync::{Arc, Mutex};
861
862 use crate::context::{BaseAudioContext, OfflineAudioContext};
863 use crate::AudioBufferOptions;
864 use crate::RENDER_QUANTUM_SIZE;
865
866 use super::*;
867
868 #[test]
869 fn test_construct_with_options_and_run() {
870 let sample_rate = 44100.;
871 let length = RENDER_QUANTUM_SIZE;
872 let mut context = OfflineAudioContext::new(1, length, sample_rate);
873
874 let buffer = AudioBuffer::from(vec![vec![1.; RENDER_QUANTUM_SIZE]], sample_rate);
875 let options = AudioBufferSourceOptions {
876 buffer: Some(buffer),
877 ..Default::default()
878 };
879 let mut src = AudioBufferSourceNode::new(&context, options);
880 src.connect(&context.destination());
881 src.start();
882 let res = context.start_rendering_sync();
883
884 assert_float_eq!(
885 res.channel_data(0).as_slice()[..],
886 &[1.; RENDER_QUANTUM_SIZE][..],
887 abs_all <= 0.
888 );
889 }
890
891 #[test]
892 fn test_playing_some_file() {
893 let context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, 44_100.);
894
895 let file = std::fs::File::open("samples/sample.wav").unwrap();
896 let expected = context.decode_audio_data_sync(file).unwrap();
897
898 [44100, 48000].iter().for_each(|sr| {
901 let decoding_context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, *sr as f32);
902
903 let mut filename = "samples/sample-".to_owned();
904 filename.push_str(&sr.to_string());
905 filename.push_str(".wav");
906
907 let file = std::fs::File::open("samples/sample.wav").unwrap();
908 let audio_buffer = decoding_context.decode_audio_data_sync(file).unwrap();
909
910 assert_eq!(audio_buffer.sample_rate(), *sr as f32);
911
912 let mut context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, 44_100.);
913
914 let mut src = context.create_buffer_source();
915 src.set_buffer(audio_buffer);
916 src.connect(&context.destination());
917 src.start_at(context.current_time());
918 src.stop_at(context.current_time() + 128.);
919
920 let res = context.start_rendering_sync();
921 let diff_abs = if *sr == 44100 {
922 0. } else {
924 5e-3 };
926
927 assert_eq!(res.number_of_channels(), expected.number_of_channels());
929
930 assert_float_eq!(
932 res.channel_data(0).as_slice()[..],
933 expected.get_channel_data(0)[0..128],
934 abs_all <= diff_abs
935 );
936
937 assert_float_eq!(
938 res.channel_data(1).as_slice()[..],
939 expected.get_channel_data(1)[0..128],
940 abs_all <= diff_abs
941 );
942 });
943 }
944
945 #[test]
947 fn test_sub_quantum_start_1() {
948 let sample_rate = 48_000.;
949 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
950
951 let mut dirac = context.create_buffer(1, 1, sample_rate);
952 dirac.copy_to_channel(&[1.], 0);
953
954 let mut src = context.create_buffer_source();
955 src.connect(&context.destination());
956 src.set_buffer(dirac);
957 src.start_at(1. / sample_rate as f64);
958
959 let result = context.start_rendering_sync();
960 let channel = result.get_channel_data(0);
961
962 let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
963 expected[1] = 1.;
964
965 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
966 }
967
968 #[test]
970 fn test_sub_quantum_start_2() {
971 let sample_rate = 44_100.;
972 let length_in_seconds = 4.;
973 let mut context =
974 OfflineAudioContext::new(2, (length_in_seconds * sample_rate) as usize, sample_rate);
975
976 let mut dirac = context.create_buffer(2, 512, sample_rate);
977 dirac.copy_to_channel(&[1.], 0);
978 dirac.copy_to_channel(&[1.], 1);
979
980 let sample_offsets = [0, 3, 512, 517, 1000, 1005, 20000, 21234, 37590];
981
982 sample_offsets.iter().for_each(|index| {
983 let time_in_seconds = *index as f64 / sample_rate as f64;
984
985 let mut src = context.create_buffer_source();
986 src.set_buffer(dirac.clone());
987 src.connect(&context.destination());
988 src.start_at(time_in_seconds);
989 });
990
991 let res = context.start_rendering_sync();
992
993 let channel_left = res.get_channel_data(0);
994 let channel_right = res.get_channel_data(1);
995 assert_float_eq!(channel_left[..], channel_right[..], abs_all <= 0.);
997 sample_offsets.iter().for_each(|index| {
1000 assert_ne!(
1001 channel_left[*index], 0.,
1002 "non zero sample at index {:?}",
1003 index
1004 );
1005 });
1006 }
1007
1008 #[test]
1009 fn test_sub_sample_start() {
1010 let sample_rate = 48_000.;
1012 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1013
1014 let mut dirac = context.create_buffer(1, 1, sample_rate);
1015 dirac.copy_to_channel(&[1.], 0);
1016
1017 let mut src = context.create_buffer_source();
1018 src.connect(&context.destination());
1019 src.set_buffer(dirac);
1020 src.start_at(1.5 / sample_rate as f64);
1021
1022 let result = context.start_rendering_sync();
1023 let channel = result.get_channel_data(0);
1024
1025 let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
1026 expected[2] = 0.5;
1027
1028 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1029 }
1030
1031 #[test]
1032 fn test_sub_quantum_stop_fast_track() {
1033 let sample_rate = 48_000.;
1034 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1035
1036 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1037 dirac.copy_to_channel(&[0., 0., 0., 0., 1.], 0);
1038
1039 let mut src = context.create_buffer_source();
1040 src.connect(&context.destination());
1041 src.set_buffer(dirac);
1042 src.start_at(0. / sample_rate as f64);
1043 src.stop_at(4. / sample_rate as f64);
1045
1046 let result = context.start_rendering_sync();
1047 let channel = result.get_channel_data(0);
1048 let expected = vec![0.; RENDER_QUANTUM_SIZE];
1049
1050 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1051 }
1052
1053 #[test]
1054 fn test_sub_quantum_stop_slow_track() {
1055 let sample_rate = 48_000.;
1056 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1057
1058 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1059 dirac.copy_to_channel(&[0., 0., 0., 1.], 0);
1060
1061 let mut src = context.create_buffer_source();
1062 src.connect(&context.destination());
1063 src.set_buffer(dirac);
1064
1065 src.start_at(1. / sample_rate as f64);
1066 src.stop_at(4. / sample_rate as f64);
1067
1068 let result = context.start_rendering_sync();
1069 let channel = result.get_channel_data(0);
1070 let expected = vec![0.; RENDER_QUANTUM_SIZE];
1071
1072 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1073 }
1074
1075 #[test]
1076 fn test_sub_sample_stop_fast_track() {
1077 let sample_rate = 48_000.;
1078 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1079
1080 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1081 dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1082
1083 let mut src = context.create_buffer_source();
1084 src.connect(&context.destination());
1085 src.set_buffer(dirac);
1086 src.start_at(0. / sample_rate as f64);
1087 src.stop_at(4.5 / sample_rate as f64);
1089
1090 let result = context.start_rendering_sync();
1091 let channel = result.get_channel_data(0);
1092
1093 let mut expected = vec![0.; 128];
1094 expected[4] = 1.;
1095
1096 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1097 }
1098
1099 #[test]
1100 fn test_sub_sample_stop_slow_track() {
1101 let sample_rate = 48_000.;
1102 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1103
1104 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1105 dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1106
1107 let mut src = context.create_buffer_source();
1108 src.connect(&context.destination());
1109 src.set_buffer(dirac);
1110 src.start_at(1. / sample_rate as f64);
1111 src.stop_at(5.5 / sample_rate as f64);
1113
1114 let result = context.start_rendering_sync();
1115 let channel = result.get_channel_data(0);
1116
1117 let mut expected = vec![0.; 128];
1118 expected[5] = 1.;
1119
1120 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1121 }
1122
1123 #[test]
1124 fn test_start_in_the_past() {
1125 let sample_rate = 48_000.;
1126 let mut context = OfflineAudioContext::new(1, 2 * RENDER_QUANTUM_SIZE, sample_rate);
1127
1128 let mut dirac = context.create_buffer(1, 1, sample_rate);
1129 dirac.copy_to_channel(&[1.], 0);
1130
1131 context.suspend_sync((128. / sample_rate).into(), |context| {
1132 let mut src = context.create_buffer_source();
1133 src.connect(&context.destination());
1134 src.set_buffer(dirac);
1135 src.start_at(0.);
1136 });
1137
1138 let result = context.start_rendering_sync();
1139 let channel = result.get_channel_data(0);
1140
1141 let mut expected = vec![0.; 2 * RENDER_QUANTUM_SIZE];
1142 expected[128] = 1.;
1143
1144 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1145 }
1146
1147 #[test]
1148 fn test_audio_buffer_resampling() {
1149 [22_500, 38_000, 43_800, 48_000, 96_000]
1150 .iter()
1151 .for_each(|sr| {
1152 let freq = 1.;
1153 let base_sr = 44_100;
1154 let mut context = OfflineAudioContext::new(1, base_sr, base_sr as f32);
1155
1156 let buf_sr = *sr;
1158 let sample_rate = buf_sr as f32;
1160 let mut buffer = context.create_buffer(1, buf_sr, sample_rate);
1161 let mut sine = vec![];
1162
1163 for i in 0..buf_sr {
1164 let phase = freq * i as f32 / buf_sr as f32 * 2. * PI;
1165 let sample = phase.sin();
1166 sine.push(sample);
1167 }
1168
1169 buffer.copy_to_channel(&sine[..], 0);
1170
1171 let mut src = context.create_buffer_source();
1172 src.connect(&context.destination());
1173 src.set_buffer(buffer);
1174 src.start_at(0. / sample_rate as f64);
1175
1176 let result = context.start_rendering_sync();
1177 let channel = result.get_channel_data(0);
1178
1179 let mut expected = vec![];
1181
1182 for i in 0..base_sr {
1183 let phase = freq * i as f32 / base_sr as f32 * 2. * PI;
1184 let sample = phase.sin();
1185 expected.push(sample);
1186 }
1187
1188 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1189 });
1190 }
1191
1192 #[test]
1193 fn test_playback_rate() {
1194 let sample_rate = 44_100;
1195 let mut context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
1196
1197 let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
1198 let mut sine = vec![];
1199
1200 for i in 0..sample_rate {
1202 let phase = i as f32 / sample_rate as f32 * 2. * PI;
1203 let sample = phase.sin();
1204 sine.push(sample);
1205 }
1206
1207 buffer.copy_to_channel(&sine[..], 0);
1208
1209 let mut src = context.create_buffer_source();
1210 src.connect(&context.destination());
1211 src.set_buffer(buffer);
1212 src.playback_rate.set_value(0.5);
1213 src.start();
1214
1215 let result = context.start_rendering_sync();
1216 let channel = result.get_channel_data(0);
1217
1218 let mut expected = vec![];
1220
1221 for i in 0..sample_rate {
1222 let phase = i as f32 / sample_rate as f32 * PI;
1223 let sample = phase.sin();
1224 expected.push(sample);
1225 }
1226
1227 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1228 }
1229
1230 #[test]
1231 fn test_negative_playback_rate() {
1232 let sample_rate = 44_100;
1233 let mut context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
1234
1235 let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
1236 let mut sine = vec![];
1237
1238 for i in 0..sample_rate {
1240 let phase = i as f32 / sample_rate as f32 * 2. * PI;
1241 let sample = phase.sin();
1242 sine.push(sample);
1243 }
1244
1245 buffer.copy_to_channel(&sine[..], 0);
1246
1247 let mut src = context.create_buffer_source();
1248 src.connect(&context.destination());
1249 src.set_buffer(buffer.clone());
1250 src.playback_rate.set_value(-1.);
1251 src.start_at_with_offset(context.current_time(), buffer.duration());
1252
1253 let result = context.start_rendering_sync();
1254 let channel = result.get_channel_data(0);
1255
1256 let mut expected: Vec<f32> = sine.into_iter().rev().collect();
1258 expected.pop();
1261 expected.insert(0, 0.);
1262
1263 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1264 }
1265
1266 #[test]
1267 fn test_detune() {
1268 let sample_rate = 44_100;
1269 let mut context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
1270
1271 let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
1272 let mut sine = vec![];
1273
1274 for i in 0..sample_rate {
1276 let phase = i as f32 / sample_rate as f32 * 2. * PI;
1277 let sample = phase.sin();
1278 sine.push(sample);
1279 }
1280
1281 buffer.copy_to_channel(&sine[..], 0);
1282
1283 let mut src = context.create_buffer_source();
1284 src.connect(&context.destination());
1285 src.set_buffer(buffer);
1286 src.detune.set_value(-1200.);
1287 src.start();
1288
1289 let result = context.start_rendering_sync();
1290 let channel = result.get_channel_data(0);
1291
1292 let mut expected = vec![];
1294
1295 for i in 0..sample_rate {
1296 let phase = i as f32 / sample_rate as f32 * PI;
1297 let sample = phase.sin();
1298 expected.push(sample);
1299 }
1300
1301 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
1302 }
1303
1304 #[test]
1305 fn test_end_of_file_fast_track() {
1306 let sample_rate = 48_000.;
1307 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE * 2, sample_rate);
1308
1309 let mut buffer = context.create_buffer(1, 129, sample_rate);
1310 let mut data = vec![0.; 129];
1311 data[0] = 1.;
1312 data[128] = 1.;
1313 buffer.copy_to_channel(&data, 0);
1314
1315 let mut src = context.create_buffer_source();
1316 src.connect(&context.destination());
1317 src.set_buffer(buffer);
1318 src.start_at(0. / sample_rate as f64);
1319
1320 let result = context.start_rendering_sync();
1321 let channel = result.get_channel_data(0);
1322
1323 let mut expected = vec![0.; 256];
1324 expected[0] = 1.;
1325 expected[128] = 1.;
1326
1327 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1328 }
1329
1330 #[test]
1331 fn test_end_of_file_slow_track_1() {
1332 let sample_rate = 48_000.;
1333 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE * 2, sample_rate);
1334
1335 let mut buffer = context.create_buffer(1, 129, sample_rate);
1336 let mut data = vec![0.; 129];
1337 data[0] = 1.;
1338 data[128] = 1.;
1339 buffer.copy_to_channel(&data, 0);
1340
1341 let mut src = context.create_buffer_source();
1342 src.connect(&context.destination());
1343 src.set_buffer(buffer);
1344 src.start_at(1. / sample_rate as f64);
1345
1346 let result = context.start_rendering_sync();
1347 let channel = result.get_channel_data(0);
1348
1349 let mut expected = vec![0.; 256];
1350 expected[1] = 1.;
1351 expected[129] = 1.;
1352
1353 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-10);
1354 }
1355
1356 #[test]
1357 fn test_with_duration_0() {
1358 let sample_rate = 48_000.;
1359 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1360
1361 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1362 dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1363
1364 let mut src = context.create_buffer_source();
1365 src.connect(&context.destination());
1366 src.set_buffer(dirac);
1367 src.start_at_with_offset_and_duration(0., 0., 4.5 / sample_rate as f64);
1369
1370 let result = context.start_rendering_sync();
1371 let channel = result.get_channel_data(0);
1372
1373 let mut expected = vec![0.; 128];
1374 expected[4] = 1.;
1375
1376 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1377 }
1378
1379 #[test]
1380 fn test_with_duration_1() {
1381 let sample_rate = 48_000.;
1382 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1383
1384 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1385 dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1386
1387 let mut src = context.create_buffer_source();
1388 src.connect(&context.destination());
1389 src.set_buffer(dirac);
1390 src.start_at_with_offset_and_duration(
1394 1. / sample_rate as f64,
1395 0. / sample_rate as f64,
1396 4.5 / sample_rate as f64,
1397 );
1398
1399 let result = context.start_rendering_sync();
1400 let channel = result.get_channel_data(0);
1401
1402 let mut expected = vec![0.; 128];
1403 expected[5] = 1.;
1404
1405 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1406 }
1407
1408 #[test]
1409 fn test_with_duration_2() {
1411 let sample_rate = 32_768.;
1412 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1413
1414 let mut buffer = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1415 buffer.copy_to_channel(&[1.; RENDER_QUANTUM_SIZE], 0);
1416
1417 let start_grain_index = 3.1;
1418 let end_grain_index = 37.2;
1419
1420 let mut src = context.create_buffer_source();
1421 src.connect(&context.destination());
1422 src.set_buffer(buffer);
1423
1424 src.start_at_with_offset_and_duration(
1425 start_grain_index / sample_rate as f64,
1426 0.,
1427 (end_grain_index - start_grain_index) / sample_rate as f64,
1428 );
1429
1430 let result = context.start_rendering_sync();
1431 let channel = result.get_channel_data(0);
1432
1433 let mut expected = [1.; RENDER_QUANTUM_SIZE];
1434 for s in expected
1435 .iter_mut()
1436 .take(start_grain_index.floor() as usize + 1)
1437 {
1438 *s = 0.;
1439 }
1440 for s in expected
1441 .iter_mut()
1442 .take(RENDER_QUANTUM_SIZE)
1443 .skip(end_grain_index.ceil() as usize)
1444 {
1445 *s = 0.;
1446 }
1447
1448 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1449 }
1450
1451 #[test]
1452 fn test_with_offset() {
1453 let sample_rate = 48_000.;
1455 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1456
1457 let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
1458 dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
1459
1460 let mut src = context.create_buffer_source();
1461 src.connect(&context.destination());
1462 src.set_buffer(dirac);
1463 src.start_at_with_offset_and_duration(
1467 0. / sample_rate as f64,
1468 1. / sample_rate as f64,
1469 3.5 / sample_rate as f64,
1470 );
1471
1472 let result = context.start_rendering_sync();
1473 let channel = result.get_channel_data(0);
1474
1475 let mut expected = vec![0.; 128];
1476 expected[3] = 1.;
1477
1478 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1479 }
1480
1481 #[test]
1482 fn test_offset_larger_than_buffer_duration() {
1483 let sample_rate = 48_000.;
1484 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1485 let mut buffer = context.create_buffer(1, 13, sample_rate);
1486 buffer.copy_to_channel(&[1.; 13], 0);
1487
1488 let mut src = context.create_buffer_source();
1489 src.set_buffer(buffer);
1490 src.start_at_with_offset(0., 64. / sample_rate as f64); let result = context.start_rendering_sync();
1493 let channel = result.get_channel_data(0);
1494
1495 let expected = [0.; RENDER_QUANTUM_SIZE];
1496 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1497 }
1498
1499 #[test]
1500 fn test_fast_track_loop_mono() {
1501 let sample_rate = 48_000.;
1502 let len = RENDER_QUANTUM_SIZE * 4;
1503
1504 for buffer_len in [
1505 RENDER_QUANTUM_SIZE / 2 - 1,
1506 RENDER_QUANTUM_SIZE / 2,
1507 RENDER_QUANTUM_SIZE / 2 + 1,
1508 RENDER_QUANTUM_SIZE - 1,
1509 RENDER_QUANTUM_SIZE,
1510 RENDER_QUANTUM_SIZE + 1,
1511 RENDER_QUANTUM_SIZE * 2 - 1,
1512 RENDER_QUANTUM_SIZE * 2,
1513 RENDER_QUANTUM_SIZE * 2 + 1,
1514 ] {
1515 let mut context = OfflineAudioContext::new(1, len, sample_rate);
1516
1517 let mut dirac = context.create_buffer(1, buffer_len, sample_rate);
1518 dirac.copy_to_channel(&[1.], 0);
1519
1520 let mut src = context.create_buffer_source();
1521 src.connect(&context.destination());
1522 src.set_loop(true);
1523 src.set_buffer(dirac);
1524 src.start();
1525
1526 let result = context.start_rendering_sync();
1527 let channel = result.get_channel_data(0);
1528
1529 let mut expected = vec![0.; len];
1530 for i in (0..len).step_by(buffer_len) {
1531 expected[i] = 1.;
1532 }
1533
1534 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-10);
1535 }
1536 }
1537
1538 #[test]
1539 fn test_slow_track_loop_mono() {
1540 let sample_rate = 48_000.;
1541 let len = RENDER_QUANTUM_SIZE * 4;
1542
1543 for buffer_len in [
1544 RENDER_QUANTUM_SIZE / 2 - 1,
1545 RENDER_QUANTUM_SIZE / 2,
1546 RENDER_QUANTUM_SIZE / 2 + 1,
1547 RENDER_QUANTUM_SIZE - 1,
1548 RENDER_QUANTUM_SIZE,
1549 RENDER_QUANTUM_SIZE + 1,
1550 RENDER_QUANTUM_SIZE * 2 - 1,
1551 RENDER_QUANTUM_SIZE * 2,
1552 RENDER_QUANTUM_SIZE * 2 + 1,
1553 ] {
1554 let mut context = OfflineAudioContext::new(1, len, sample_rate);
1555
1556 let mut dirac = context.create_buffer(1, buffer_len, sample_rate);
1557 dirac.copy_to_channel(&[1.], 0);
1558
1559 let mut src = context.create_buffer_source();
1560 src.connect(&context.destination());
1561 src.set_loop(true);
1562 src.set_buffer(dirac);
1563 src.start_at(1. / sample_rate as f64);
1564
1565 let result = context.start_rendering_sync();
1566 let channel = result.get_channel_data(0);
1567
1568 let mut expected = vec![0.; len];
1569 for i in (1..len).step_by(buffer_len) {
1570 expected[i] = 1.;
1571 }
1572
1573 assert_float_eq!(channel[..], expected[..], abs_all <= 1e-9);
1574 }
1575 }
1576
1577 #[test]
1578 fn test_fast_track_loop_stereo() {
1579 let sample_rate = 48_000.;
1580 let len = RENDER_QUANTUM_SIZE * 4;
1581
1582 for buffer_len in [
1583 RENDER_QUANTUM_SIZE / 2 - 1,
1584 RENDER_QUANTUM_SIZE / 2,
1585 RENDER_QUANTUM_SIZE / 2 + 1,
1586 RENDER_QUANTUM_SIZE - 1,
1587 RENDER_QUANTUM_SIZE,
1588 RENDER_QUANTUM_SIZE + 1,
1589 RENDER_QUANTUM_SIZE * 2 - 1,
1590 RENDER_QUANTUM_SIZE * 2,
1591 RENDER_QUANTUM_SIZE * 2 + 1,
1592 ] {
1593 let mut context = OfflineAudioContext::new(2, len, sample_rate);
1594 let mut dirac = context.create_buffer(2, buffer_len, sample_rate);
1595 dirac.copy_to_channel(&[1.], 0);
1596 dirac.copy_to_channel(&[0., 1.], 1);
1597
1598 let mut src = context.create_buffer_source();
1599 src.connect(&context.destination());
1600 src.set_loop(true);
1601 src.set_buffer(dirac);
1602 src.start();
1603
1604 let result = context.start_rendering_sync();
1605
1606 let mut expected_left: Vec<f32> = vec![0.; len];
1607 let mut expected_right = vec![0.; len];
1608 for i in (0..len).step_by(buffer_len) {
1609 expected_left[i] = 1.;
1610
1611 if i < expected_right.len() - 1 {
1612 expected_right[i + 1] = 1.;
1613 }
1614 }
1615
1616 assert_float_eq!(
1617 result.get_channel_data(0)[..],
1618 expected_left[..],
1619 abs_all <= 1e-10
1620 );
1621 assert_float_eq!(
1622 result.get_channel_data(1)[..],
1623 expected_right[..],
1624 abs_all <= 1e-10
1625 );
1626 }
1627 }
1628
1629 #[test]
1630 fn test_slow_track_loop_stereo() {
1631 let sample_rate = 48_000.;
1632 let len = RENDER_QUANTUM_SIZE * 4;
1633
1634 for buffer_len in [
1635 RENDER_QUANTUM_SIZE / 2 - 1,
1636 RENDER_QUANTUM_SIZE / 2,
1637 RENDER_QUANTUM_SIZE / 2 + 1,
1638 RENDER_QUANTUM_SIZE - 1,
1639 RENDER_QUANTUM_SIZE,
1640 RENDER_QUANTUM_SIZE + 1,
1641 RENDER_QUANTUM_SIZE * 2 - 1,
1642 RENDER_QUANTUM_SIZE * 2,
1643 RENDER_QUANTUM_SIZE * 2 + 1,
1644 ] {
1645 let mut context = OfflineAudioContext::new(2, len, sample_rate);
1646 let mut dirac = context.create_buffer(2, buffer_len, sample_rate);
1647 dirac.copy_to_channel(&[1.], 0);
1648 dirac.copy_to_channel(&[0., 1.], 1);
1649
1650 let mut src = context.create_buffer_source();
1651 src.connect(&context.destination());
1652 src.set_loop(true);
1653 src.set_buffer(dirac);
1654 src.start_at(1. / sample_rate as f64);
1655
1656 let result = context.start_rendering_sync();
1657
1658 let mut expected_left: Vec<f32> = vec![0.; len];
1659 let mut expected_right = vec![0.; len];
1660 for i in (1..len).step_by(buffer_len) {
1661 expected_left[i] = 1.;
1662
1663 if i < expected_right.len() - 1 {
1664 expected_right[i + 1] = 1.;
1665 }
1666 }
1667
1668 assert_float_eq!(
1669 result.get_channel_data(0)[..],
1670 expected_left[..],
1671 abs_all <= 1e-9
1672 );
1673 assert_float_eq!(
1674 result.get_channel_data(1)[..],
1675 expected_right[..],
1676 abs_all <= 1e-9
1677 );
1678 }
1679 }
1680
1681 #[test]
1682 fn test_loop_out_of_bounds() {
1683 [
1684 (-2., -1., 0.),
1686 (-1., -2., 0.),
1687 (0., 0., 0.),
1688 (-1., 2., 0.),
1689 (2., -1., 1e-10),
1691 (1., 1., 1e-10),
1692 (2., 3., 1e-10),
1693 (3., 2., 1e-10),
1694 ]
1695 .iter()
1696 .for_each(|(loop_start, loop_end, error)| {
1697 let sample_rate = 48_000.;
1698 let length = sample_rate as usize / 10;
1699 let mut context = OfflineAudioContext::new(1, length, sample_rate);
1700
1701 let buffer_size = 500;
1702 let mut buffer = context.create_buffer(1, buffer_size, sample_rate);
1703 let data = vec![1.; 1];
1704 buffer.copy_to_channel(&data, 0);
1705
1706 let mut src = context.create_buffer_source();
1707 src.connect(&context.destination());
1708 src.set_buffer(buffer);
1709
1710 src.set_loop(true);
1711 src.set_loop_start(*loop_start); src.set_loop_end(*loop_end); src.start();
1714
1715 let result = context.start_rendering_sync(); let channel = result.get_channel_data(0);
1717
1718 let mut expected = vec![0.; length];
1727 for i in (0..length).step_by(buffer_size) {
1728 expected[i] = 1.;
1729 }
1730
1731 assert_float_eq!(channel[..], expected[..], abs_all <= error);
1732 });
1733 }
1734
1735 #[test]
1736 fn test_end_of_file_fast_track_2() {
1740 let sample_rate = 48_000.;
1741 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1742
1743 let mut buffer = context.create_buffer(1, 5, sample_rate);
1744 let data = vec![1.; 1];
1745 buffer.copy_to_channel(&data, 0);
1746
1747 let mut src = context.create_buffer_source();
1748 src.connect(&context.destination());
1749 src.set_buffer(buffer);
1750 src.start_at(0.);
1752 src.stop_at(125. / sample_rate as f64);
1754
1755 let result = context.start_rendering_sync();
1756 let channel = result.get_channel_data(0);
1757
1758 let mut expected = vec![0.; 128];
1759 expected[0] = 1.;
1760
1761 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1762 }
1763
1764 #[test]
1765 fn test_end_of_file_slow_track_2() {
1769 let sample_rate = 48_000.;
1770 let mut context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
1771
1772 let mut buffer = context.create_buffer(1, 5, sample_rate);
1773 let data = vec![1.; 1];
1774 buffer.copy_to_channel(&data, 0);
1775
1776 let mut src = context.create_buffer_source();
1777 src.connect(&context.destination());
1778 src.set_buffer(buffer);
1779 src.start_at(1. / sample_rate as f64);
1781 src.stop_at(125. / sample_rate as f64);
1783
1784 let result = context.start_rendering_sync();
1785 let channel = result.get_channel_data(0);
1786
1787 let mut expected = vec![0.; 128];
1788 expected[1] = 1.;
1789
1790 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1791 }
1792
1793 #[test]
1794 fn test_loop_no_restart_suspend() {
1795 let sample_rate = 48_000.;
1796 let result_size = RENDER_QUANTUM_SIZE * 2;
1797 let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1798
1799 let mut buffer = context.create_buffer(1, 1, sample_rate);
1800 let data = vec![1.; 1];
1801 buffer.copy_to_channel(&data, 0);
1802
1803 let mut src = context.create_buffer_source();
1804 src.connect(&context.destination());
1805 src.set_buffer(buffer);
1806 src.start_at(0.);
1807
1808 context.suspend_sync(RENDER_QUANTUM_SIZE as f64 / sample_rate as f64, move |_| {
1809 src.set_loop(true);
1810 });
1811
1812 let result = context.start_rendering_sync();
1813 let channel = result.get_channel_data(0);
1814
1815 let mut expected = vec![0.; result_size];
1816 expected[0] = 1.;
1817
1818 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1819 }
1820
1821 #[test]
1822 fn test_loop_no_restart_onended_fast_track() {
1823 let sample_rate = 48_000.;
1824 let result_size = RENDER_QUANTUM_SIZE * 4;
1826 let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1827
1828 let mut buffer = context.create_buffer(1, 1, sample_rate);
1829 let data = vec![1.; 1];
1830 buffer.copy_to_channel(&data, 0);
1831
1832 let mut src = context.create_buffer_source();
1833 src.connect(&context.destination());
1834 src.set_buffer(buffer);
1835 src.start_at(0.);
1837
1838 let src = Arc::new(Mutex::new(src));
1839 let clone = Arc::clone(&src);
1840 src.lock().unwrap().set_onended(move |_| {
1841 clone.lock().unwrap().set_loop(true);
1842 });
1843
1844 let result = context.start_rendering_sync();
1845 let channel = result.get_channel_data(0);
1846
1847 let mut expected = vec![0.; result_size];
1848 expected[0] = 1.;
1849
1850 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1851 }
1852
1853 #[test]
1854 fn test_loop_no_restart_onended_slow_track() {
1855 let sample_rate = 48_000.;
1856 let result_size = RENDER_QUANTUM_SIZE * 4;
1858 let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1859
1860 let mut buffer = context.create_buffer(1, 1, sample_rate);
1861 let data = vec![1.; 1];
1862 buffer.copy_to_channel(&data, 0);
1863
1864 let mut src = context.create_buffer_source();
1865 src.connect(&context.destination());
1866 src.set_buffer(buffer);
1867 src.start_at(1. / sample_rate as f64);
1869
1870 let src = Arc::new(Mutex::new(src));
1871 let clone = Arc::clone(&src);
1872 src.lock().unwrap().set_onended(move |_| {
1873 clone.lock().unwrap().set_loop(true);
1874 });
1875
1876 let result = context.start_rendering_sync();
1877 let channel = result.get_channel_data(0);
1878
1879 let mut expected = vec![0.; result_size];
1880 expected[1] = 1.;
1881
1882 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1883 }
1884
1885 #[test]
1886 fn test_subsample_buffer_stitching() {
1890 [(44_100., 44_100., 9.0957e-5), (44_100., 43_800., 3.8986e-3)]
1891 .iter()
1892 .for_each(|(sample_rate, buffer_rate, error_threshold)| {
1893 let sample_rate = *sample_rate;
1894 let buffer_rate = *buffer_rate;
1895 let buffer_length = 30;
1896 let frequency = 440.;
1897
1898 let length = buffer_length * 15;
1900 let mut context = OfflineAudioContext::new(2, length, sample_rate);
1901
1902 let mut wave_signal = vec![0.; context.length()];
1903 let omega = 2. * PI / buffer_rate * frequency;
1904 wave_signal.iter_mut().enumerate().for_each(|(i, s)| {
1905 *s = (omega * i as f32).sin();
1906 });
1907
1908 for k in (0..context.length()).step_by(buffer_length) {
1912 let mut buffer = AudioBuffer::new(AudioBufferOptions {
1913 number_of_channels: 1,
1914 length: buffer_length,
1915 sample_rate: buffer_rate,
1916 });
1917 buffer.copy_to_channel(&wave_signal[k..k + buffer_length], 0);
1918
1919 let mut src = AudioBufferSourceNode::new(
1920 &context,
1921 AudioBufferSourceOptions {
1922 buffer: Some(buffer),
1923 ..Default::default()
1924 },
1925 );
1926 src.connect(&context.destination());
1927 src.start_at(k as f64 / buffer_rate as f64);
1928 }
1929
1930 let mut expected = vec![0.; context.length()];
1931 let omega = 2. * PI / sample_rate * frequency;
1932 expected.iter_mut().enumerate().for_each(|(i, s)| {
1933 *s = (omega * i as f32).sin();
1934 });
1935
1936 let result = context.start_rendering_sync();
1937 let actual = result.get_channel_data(0);
1938
1939 assert_float_eq!(actual[..], expected[..], abs_all <= error_threshold);
1940 });
1941 }
1942
1943 #[test]
1944 fn test_onended_before_drop() {
1945 let sample_rate = 48_000.;
1946 let result_size = RENDER_QUANTUM_SIZE;
1947 let mut context = OfflineAudioContext::new(1, result_size, sample_rate);
1948 let mut buffer = context.create_buffer(1, result_size * 2, sample_rate);
1950 let data = vec![1.; 1];
1951 buffer.copy_to_channel(&data, 0);
1952
1953 let mut src = context.create_buffer_source();
1954 src.connect(&context.destination());
1955 src.set_buffer(buffer);
1956 src.start();
1957
1958 let onended_called = Arc::new(AtomicBool::new(false));
1959 let onended_called_clone = Arc::clone(&onended_called);
1960
1961 src.set_onended(move |_| {
1962 onended_called_clone.store(true, Ordering::SeqCst);
1963 });
1964
1965 let result = context.start_rendering_sync();
1966 let channel = result.get_channel_data(0);
1967
1968 let mut expected = vec![0.; result_size];
1969 expected[0] = 1.;
1970
1971 assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
1972 assert!(onended_called.load(Ordering::SeqCst));
1973 }
1974}