use crate::context::{AudioContextRegistration, AudioParamId, BaseAudioContext};
use crate::param::{AudioParam, AudioParamDescriptor};
use crate::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use crate::RENDER_QUANTUM_SIZE;
use super::{AudioNode, ChannelConfig, ChannelConfigOptions, ChannelInterpretation};
use std::cell::{Cell, RefCell, RefMut};
use std::rc::Rc;
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Clone, Debug)]
pub struct DelayOptions {
pub max_delay_time: f64,
pub delay_time: f64,
pub channel_config: ChannelConfigOptions,
}
impl Default for DelayOptions {
fn default() -> Self {
Self {
max_delay_time: 1.,
delay_time: 0.,
channel_config: ChannelConfigOptions::default(),
}
}
}
#[derive(Copy, Clone, Debug, Default)]
struct PlaybackInfo {
prev_block_index: usize,
prev_frame_index: usize,
k: f32,
}
pub struct DelayNode {
reader_registration: AudioContextRegistration,
writer_registration: AudioContextRegistration,
delay_time: AudioParam,
channel_config: ChannelConfig,
}
impl AudioNode for DelayNode {
fn registration(&self) -> &AudioContextRegistration {
&self.writer_registration
}
fn channel_config(&self) -> &ChannelConfig {
&self.channel_config
}
fn number_of_inputs(&self) -> usize {
1
}
fn number_of_outputs(&self) -> usize {
1
}
fn connect_at<'a>(
&self,
dest: &'a dyn AudioNode,
output: usize,
input: usize,
) -> &'a dyn AudioNode {
if self.context() != dest.context() {
panic!("InvalidAccessError: Attempting to connect nodes from different contexts");
}
if self.number_of_outputs() <= output {
panic!("IndexSizeError: output port {} is out of bounds", output);
}
if dest.number_of_inputs() <= input {
panic!("IndexSizeError: input port {} is out of bounds", input);
}
self.context().connect(
self.reader_registration.id(),
dest.registration().id(),
output,
input,
);
dest
}
fn disconnect_from<'a>(&self, dest: &'a dyn AudioNode) -> &'a dyn AudioNode {
if self.context() != dest.context() {
panic!("attempting to disconnect nodes from different contexts");
}
self.context()
.disconnect_from(self.reader_registration.id(), dest.registration().id());
dest
}
fn disconnect(&self) {
self.context().disconnect(self.reader_registration.id());
}
}
impl DelayNode {
pub fn new<C: BaseAudioContext>(context: &C, options: DelayOptions) -> Self {
let sample_rate = context.sample_rate() as f64;
if options.max_delay_time <= 0. || options.max_delay_time >= 180. {
panic!("NotSupportedError: MUST be greater than zero and less than three minutes");
}
let quantum_duration = 1. / sample_rate * RENDER_QUANTUM_SIZE as f64;
let max_delay_time = options.max_delay_time.max(quantum_duration);
let num_samples = max_delay_time * sample_rate + 1.;
let num_quanta =
(num_samples.ceil() as usize + RENDER_QUANTUM_SIZE - 1) / RENDER_QUANTUM_SIZE;
let ring_buffer = Vec::with_capacity(num_quanta);
let shared_ring_buffer = Rc::new(RefCell::new(ring_buffer));
let shared_ring_buffer_clone = shared_ring_buffer.clone();
let last_written_index = Rc::new(Cell::<Option<usize>>::new(None));
let last_written_index_clone = last_written_index.clone();
let latest_frame_written = Rc::new(AtomicU64::new(u64::MAX));
let latest_frame_written_clone = latest_frame_written.clone();
let node = context.register(move |writer_registration| {
let node = context.register(move |reader_registration| {
let param_opts = AudioParamDescriptor {
min_value: 0.,
max_value: max_delay_time as f32,
default_value: 0.,
automation_rate: crate::param::AutomationRate::A,
};
let (param, proc) = context.create_audio_param(param_opts, &reader_registration);
param.set_value_at_time(options.delay_time as f32, 0.);
let reader_render = DelayReader {
delay_time: proc,
ring_buffer: shared_ring_buffer_clone,
index: 0,
last_written_index: last_written_index_clone,
in_cycle: false,
last_written_index_checked: None,
latest_frame_written: latest_frame_written_clone,
};
let node = DelayNode {
reader_registration,
writer_registration,
channel_config: options.channel_config.into(),
delay_time: param,
};
(node, Box::new(reader_render))
});
let writer_render = DelayWriter {
ring_buffer: shared_ring_buffer,
index: 0,
last_written_index,
latest_frame_written,
};
(node, Box::new(writer_render))
});
let writer_id = node.writer_registration.id();
let reader_id = node.reader_registration.id();
context.base().mark_cycle_breaker(&node.writer_registration);
context.base().connect(writer_id, reader_id, 0, 0);
node
}
pub fn delay_time(&self) -> &AudioParam {
&self.delay_time
}
}
struct DelayWriter {
ring_buffer: Rc<RefCell<Vec<AudioRenderQuantum>>>,
index: usize,
latest_frame_written: Rc<AtomicU64>,
last_written_index: Rc<Cell<Option<usize>>>,
}
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for DelayWriter {}
trait RingBufferChecker {
fn ring_buffer_mut(&self) -> RefMut<Vec<AudioRenderQuantum>>;
#[inline(always)]
fn check_ring_buffer_size(&self, render_quantum: &AudioRenderQuantum) {
let mut ring_buffer = self.ring_buffer_mut();
if ring_buffer.len() < ring_buffer.capacity() {
let len = ring_buffer.capacity();
let mut silence = render_quantum.clone();
silence.make_silent();
ring_buffer.resize(len, silence);
}
}
}
impl Drop for DelayWriter {
fn drop(&mut self) {
let last_written_index = if self.index == 0 {
self.ring_buffer.borrow().capacity() - 1
} else {
self.index - 1
};
self.last_written_index.set(Some(last_written_index));
}
}
impl RingBufferChecker for DelayWriter {
#[inline(always)]
fn ring_buffer_mut(&self) -> RefMut<Vec<AudioRenderQuantum>> {
self.ring_buffer.borrow_mut()
}
}
impl AudioProcessor for DelayWriter {
fn process(
&mut self,
inputs: &[AudioRenderQuantum],
outputs: &mut [AudioRenderQuantum],
_params: AudioParamValues,
scope: &RenderScope,
) -> bool {
let input = inputs[0].clone();
let output = &mut outputs[0];
self.check_ring_buffer_size(&input);
self.check_ring_buffer_up_down_mix(&input);
let mut buffer = self.ring_buffer.borrow_mut();
buffer[self.index] = input;
self.index = (self.index + 1) % buffer.capacity();
self.latest_frame_written
.store(scope.current_frame, Ordering::SeqCst);
output.make_silent();
false
}
}
impl DelayWriter {
#[inline(always)]
fn check_ring_buffer_up_down_mix(&self, input: &AudioRenderQuantum) {
let mut ring_buffer = self.ring_buffer_mut();
let buffer_number_of_channels = ring_buffer[0].number_of_channels();
let input_number_of_channels = input.number_of_channels();
if buffer_number_of_channels != input_number_of_channels {
for render_quantum in ring_buffer.iter_mut() {
render_quantum.mix(input_number_of_channels, ChannelInterpretation::Speakers);
}
}
}
}
struct DelayReader {
delay_time: AudioParamId,
ring_buffer: Rc<RefCell<Vec<AudioRenderQuantum>>>,
index: usize,
latest_frame_written: Rc<AtomicU64>,
in_cycle: bool,
last_written_index: Rc<Cell<Option<usize>>>,
last_written_index_checked: Option<usize>,
}
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for DelayReader {}
impl RingBufferChecker for DelayReader {
#[inline(always)]
fn ring_buffer_mut(&self) -> RefMut<Vec<AudioRenderQuantum>> {
self.ring_buffer.borrow_mut()
}
}
impl AudioProcessor for DelayReader {
fn process(
&mut self,
_inputs: &[AudioRenderQuantum], outputs: &mut [AudioRenderQuantum],
params: AudioParamValues,
scope: &RenderScope,
) -> bool {
let output = &mut outputs[0];
self.check_ring_buffer_size(output);
let ring_buffer = self.ring_buffer.borrow();
let number_of_channels = ring_buffer[0].number_of_channels();
output.set_number_of_channels(number_of_channels);
if !self.in_cycle {
let latest_frame_written = self.latest_frame_written.load(Ordering::SeqCst);
self.in_cycle = latest_frame_written != scope.current_frame;
}
let delay = params.get(&self.delay_time);
let sample_rate = scope.sample_rate as f64;
let dt = 1. / sample_rate;
let quantum_duration = RENDER_QUANTUM_SIZE as f64 * dt;
let ring_size = ring_buffer.len() as i32;
let ring_index = self.index as i32;
let mut playback_infos = [PlaybackInfo::default(); RENDER_QUANTUM_SIZE];
if delay.len() == 1 {
playback_infos[0] = Self::get_playback_infos(
f64::from(delay[0]),
self.in_cycle,
0.,
quantum_duration,
sample_rate,
ring_size,
ring_index,
);
for i in 1..RENDER_QUANTUM_SIZE {
let PlaybackInfo {
prev_block_index,
prev_frame_index,
k,
} = playback_infos[i - 1];
let mut prev_block_index = prev_block_index;
let mut prev_frame_index = prev_frame_index + 1;
if prev_frame_index >= RENDER_QUANTUM_SIZE {
prev_block_index = (prev_block_index + 1) % ring_buffer.len();
prev_frame_index = 0;
}
playback_infos[i] = PlaybackInfo {
prev_block_index,
prev_frame_index,
k,
};
}
} else {
delay
.iter()
.zip(playback_infos.iter_mut())
.enumerate()
.for_each(|(index, (&d, infos))| {
*infos = Self::get_playback_infos(
f64::from(d),
self.in_cycle,
index as f64,
quantum_duration,
sample_rate,
ring_size,
ring_index,
);
});
}
let mut is_actively_processing = false;
for (channel_number, output_channel) in output.channels_mut().iter_mut().enumerate() {
let mut block_index = playback_infos[0].prev_block_index;
let mut channel_data = ring_buffer[block_index].channel_data(channel_number);
output_channel
.iter_mut()
.zip(playback_infos.iter_mut())
.for_each(|(o, infos)| {
let PlaybackInfo {
prev_block_index,
prev_frame_index,
k,
} = *infos;
let mut next_block_index = prev_block_index;
let mut next_frame_index = prev_frame_index + 1;
if next_frame_index >= RENDER_QUANTUM_SIZE {
next_block_index = (next_block_index + 1) % ring_buffer.len();
next_frame_index = 0;
}
if block_index != prev_block_index {
block_index = prev_block_index;
channel_data = ring_buffer[block_index].channel_data(channel_number);
}
let prev_sample = channel_data[prev_frame_index];
if block_index != next_block_index {
block_index = next_block_index;
channel_data = ring_buffer[block_index].channel_data(channel_number);
}
let next_sample = channel_data[next_frame_index];
let value = (1. - k).mul_add(prev_sample, k * next_sample);
if value.is_normal() {
is_actively_processing = true;
}
*o = value;
});
}
if !is_actively_processing {
output.make_silent();
}
if matches!(self.last_written_index_checked, Some(index) if index == self.index) {
return false;
}
let last_written_index = self.last_written_index.get();
if last_written_index.is_some() && self.last_written_index_checked.is_none() {
self.last_written_index_checked = last_written_index;
}
self.index = (self.index + 1) % ring_buffer.capacity();
true
}
}
impl DelayReader {
#[inline(always)]
fn get_playback_infos(
delay: f64,
in_cycle: bool,
sample_index: f64,
quantum_duration: f64,
sample_rate: f64,
ring_size: i32,
ring_index: i32,
) -> PlaybackInfo {
let clamped_delay = if in_cycle {
delay.max(quantum_duration)
} else {
delay
};
let num_samples = clamped_delay * sample_rate;
let position = sample_index - num_samples;
let position_floored = position.floor();
let num_frames = RENDER_QUANTUM_SIZE as i32;
let block_offset = (position_floored / num_frames as f64).floor();
let mut prev_block_index = ring_index + block_offset as i32;
if prev_block_index < 0 {
prev_block_index += ring_size;
}
let mut frame_offset = position_floored as i32 % num_frames;
if frame_offset == 0 {
frame_offset = -num_frames;
}
let prev_frame_index = if frame_offset <= 0 {
num_frames + frame_offset
} else {
frame_offset
};
let k = (position - position_floored) as f32;
PlaybackInfo {
prev_block_index: prev_block_index as usize,
prev_frame_index: prev_frame_index as usize,
k,
}
}
}
#[cfg(test)]
mod tests {
use float_eq::assert_float_eq;
use crate::context::OfflineAudioContext;
use crate::node::AudioScheduledSourceNode;
use super::*;
#[test]
fn test_sample_accurate() {
for delay_in_samples in [128., 131., 197.].iter() {
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 256, sample_rate);
let delay = context.create_delay(2.);
delay.delay_time.set_value(delay_in_samples / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 256];
expected[*delay_in_samples as usize] = 1.;
assert_float_eq!(channel[..], expected[..], abs_all <= 0.00001);
}
}
#[test]
fn test_sub_sample_accurate() {
{
let delay_in_samples = 128.5;
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 256, sample_rate);
let delay = context.create_delay(2.);
delay.delay_time.set_value(delay_in_samples / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 256];
expected[128] = 0.5;
expected[129] = 0.5;
assert_float_eq!(channel[..], expected[..], abs_all <= 0.00001);
}
{
let delay_in_samples = 128.8;
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 256, sample_rate);
let delay = context.create_delay(2.);
delay.delay_time.set_value(delay_in_samples / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 256];
expected[128] = 0.2;
expected[129] = 0.8;
assert_float_eq!(channel[..], expected[..], abs_all <= 1e-5);
}
}
#[test]
fn test_multichannel() {
let delay_in_samples = 128.;
let sample_rate = 48000.;
let context = OfflineAudioContext::new(2, 2 * 128, sample_rate);
let delay = context.create_delay(2.);
delay.delay_time.set_value(delay_in_samples / sample_rate);
delay.connect(&context.destination());
let mut two_chan_dirac = context.create_buffer(2, 256, sample_rate);
two_chan_dirac.copy_to_channel(&[1.], 0);
two_chan_dirac.copy_to_channel(&[0., 1.], 1);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(two_chan_dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel_left = result.get_channel_data(0);
let mut expected_left = vec![0.; 256];
expected_left[128] = 1.;
assert_float_eq!(channel_left[..], expected_left[..], abs_all <= 1e-5);
let channel_right = result.get_channel_data(1);
let mut expected_right = vec![0.; 256];
expected_right[128 + 1] = 1.;
assert_float_eq!(channel_right[..], expected_right[..], abs_all <= 1e-5);
}
#[test]
fn test_input_number_of_channels_change() {
let delay_in_samples = 128.;
let sample_rate = 48000.;
let context = OfflineAudioContext::new(2, 3 * 128, sample_rate);
let delay = context.create_delay(2.);
delay.delay_time.set_value(delay_in_samples / sample_rate);
delay.connect(&context.destination());
let mut one_chan_dirac = context.create_buffer(1, 128, sample_rate);
one_chan_dirac.copy_to_channel(&[1.], 0);
let src1 = context.create_buffer_source();
src1.connect(&delay);
src1.set_buffer(one_chan_dirac);
src1.start_at(0.);
let mut two_chan_dirac = context.create_buffer(2, 256, sample_rate);
two_chan_dirac.copy_to_channel(&[1.], 0);
two_chan_dirac.copy_to_channel(&[0., 1.], 1);
let src2 = context.create_buffer_source();
src2.connect(&delay);
src2.set_buffer(two_chan_dirac);
src2.start_at(delay_in_samples as f64 / sample_rate as f64);
let result = context.start_rendering_sync();
let channel_left = result.get_channel_data(0);
let mut expected_left = vec![0.; 3 * 128];
expected_left[128] = 1.;
expected_left[256] = 1.;
assert_float_eq!(channel_left[..], expected_left[..], abs_all <= 1e-5);
let channel_right = result.get_channel_data(1);
let mut expected_right = vec![0.; 3 * 128];
expected_right[128] = 1.;
expected_right[256 + 1] = 1.;
assert_float_eq!(channel_right[..], expected_right[..], abs_all <= 1e-5);
}
#[test]
fn test_node_stays_alive_long_enough() {
for _ in 0..10 {
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 5 * 128, sample_rate);
{
let delay = context.create_delay(1.);
delay.delay_time.set_value(128. / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(128. * 3. / sample_rate as f64);
}
let result = context.start_rendering_sync();
let mut expected = vec![0.; 5 * 128];
expected[4 * 128] = 1.;
assert_float_eq!(result.get_channel_data(0), &expected[..], abs_all <= 1e-5);
}
}
#[test]
fn test_subquantum_delay() {
for i in 0..128 {
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 128, sample_rate);
let delay = context.create_delay(1.);
delay.delay_time.set_value(i as f32 / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 128];
expected[i] = 1.;
assert_float_eq!(channel[..], expected[..], abs_all <= 1e-5);
}
}
#[test]
fn test_min_delay_when_in_loop() {
let sample_rate = 480000.;
let context = OfflineAudioContext::new(1, 256, sample_rate);
let delay = context.create_delay(1.);
delay.delay_time.set_value(1. / sample_rate);
delay.connect(&context.destination());
let gain = context.create_gain();
gain.gain().set_value(0.);
delay.connect(&gain);
gain.connect(&delay);
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 256];
expected[128] = 1.;
assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
}
#[test]
fn test_max_delay_smaller_than_quantum_size() {
for _ in 0..10 {
let sample_rate = 480000.;
let context = OfflineAudioContext::new(1, 256, sample_rate);
let delay = context.create_delay((64. / sample_rate).into());
delay.delay_time.set_value(64. / sample_rate);
delay.connect(&context.destination());
let gain = context.create_gain();
gain.gain().set_value(0.);
delay.connect(&gain);
gain.connect(&delay);
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 256];
expected[128] = 1.;
assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
}
}
#[test]
fn test_max_delay_multiple_of_quantum_size() {
{
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 256, sample_rate);
let delay = context.create_delay(1.);
delay.delay_time.set_value(128. / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 256];
expected[128] = 1.;
assert_float_eq!(channel[..], expected[..], abs_all <= 1e-5);
}
{
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 3 * 128, sample_rate);
let delay = context.create_delay(2.);
delay.delay_time.set_value(128. * 2. / sample_rate);
delay.connect(&context.destination());
let mut dirac = context.create_buffer(1, 1, sample_rate);
dirac.copy_to_channel(&[1.], 0);
let src = context.create_buffer_source();
src.connect(&delay);
src.set_buffer(dirac);
src.start_at(0.);
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 3 * 128];
expected[256] = 1.;
assert_float_eq!(channel[..], expected[..], abs_all <= 1e-5);
}
}
#[test]
fn test_subquantum_delay_dynamic_lifetime() {
let sample_rate = 48000.;
let context = OfflineAudioContext::new(1, 3 * 128, sample_rate);
{
let delay = context.create_delay(1.);
delay.delay_time.set_value(64_f32 / sample_rate);
delay.connect(&context.destination());
let src = context.create_constant_source();
src.connect(&delay);
src.start_at(0.);
src.stop_at(120. / sample_rate as f64);
}
let result = context.start_rendering_sync();
let channel = result.get_channel_data(0);
let mut expected = vec![0.; 3 * 128];
expected[64..64 + 120].fill(1.);
assert_float_eq!(channel[..], expected[..], abs_all <= 1e-5);
}
}