use std::any::Any;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use crate::buffer::AudioBuffer;
use crate::context::{AudioContextRegistration, AudioParamId, BaseAudioContext};
use crate::param::{AudioParam, AudioParamDescriptor, AutomationRate};
use crate::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use crate::{AtomicF64, RENDER_QUANTUM_SIZE};
use super::{AudioNode, AudioScheduledSourceNode, ChannelConfig};
#[derive(Clone, Debug)]
pub struct AudioBufferSourceOptions {
    pub buffer: Option<AudioBuffer>,
    pub detune: f32,
    pub loop_: bool,
    pub loop_start: f64,
    pub loop_end: f64,
    pub playback_rate: f32,
}
impl Default for AudioBufferSourceOptions {
    fn default() -> Self {
        Self {
            buffer: None,
            detune: 0.,
            loop_: false,
            loop_start: 0.,
            loop_end: 0.,
            playback_rate: 1.,
        }
    }
}
#[derive(Copy, Clone)]
struct PlaybackInfo {
    prev_frame_index: usize,
    k: f32,
}
#[derive(Debug, Clone)]
struct LoopState {
    pub is_looping: bool,
    pub start: f64,
    pub end: f64,
}
#[derive(Debug, Clone)]
enum ControlMessage {
    StartWithOffsetAndDuration(f64, f64, f64),
    Stop(f64),
    Loop(bool),
    LoopStart(f64),
    LoopEnd(f64),
}
pub struct AudioBufferSourceNode {
    registration: AudioContextRegistration,
    channel_config: ChannelConfig,
    detune: AudioParam,        playback_rate: AudioParam, buffer_time: Arc<AtomicF64>,
    buffer: Option<AudioBuffer>,
    loop_state: LoopState,
    source_started: bool,
}
impl AudioNode for AudioBufferSourceNode {
    fn registration(&self) -> &AudioContextRegistration {
        &self.registration
    }
    fn channel_config(&self) -> &ChannelConfig {
        &self.channel_config
    }
    fn number_of_inputs(&self) -> usize {
        0
    }
    fn number_of_outputs(&self) -> usize {
        1
    }
}
impl AudioScheduledSourceNode for AudioBufferSourceNode {
    fn start(&mut self) {
        let start = self.registration.context().current_time();
        self.start_at_with_offset_and_duration(start, 0., f64::MAX);
    }
    fn start_at(&mut self, when: f64) {
        self.start_at_with_offset_and_duration(when, 0., f64::MAX);
    }
    fn stop(&mut self) {
        let stop = self.registration.context().current_time();
        self.stop_at(stop);
    }
    fn stop_at(&mut self, when: f64) {
        assert!(
            self.source_started,
            "InvalidStateError cannot stop before start"
        );
        self.registration.post_message(ControlMessage::Stop(when));
    }
}
impl AudioBufferSourceNode {
    pub fn new<C: BaseAudioContext>(context: &C, options: AudioBufferSourceOptions) -> Self {
        context.register(move |registration| {
            let AudioBufferSourceOptions {
                buffer,
                detune,
                loop_,
                loop_start,
                loop_end,
                playback_rate,
            } = options;
            let detune_param_options = AudioParamDescriptor {
                min_value: f32::MIN,
                max_value: f32::MAX,
                default_value: 0.,
                automation_rate: AutomationRate::K,
            };
            let (mut d_param, d_proc) =
                context.create_audio_param(detune_param_options, ®istration);
            d_param.set_automation_rate_constrained(true);
            d_param.set_value(detune);
            let playback_rate_param_options = AudioParamDescriptor {
                min_value: f32::MIN,
                max_value: f32::MAX,
                default_value: 1.,
                automation_rate: AutomationRate::K,
            };
            let (mut pr_param, pr_proc) =
                context.create_audio_param(playback_rate_param_options, ®istration);
            pr_param.set_automation_rate_constrained(true);
            pr_param.set_value(playback_rate);
            let loop_state = LoopState {
                is_looping: loop_,
                start: loop_start,
                end: loop_end,
            };
            let renderer = AudioBufferSourceRenderer {
                start_time: f64::MAX,
                stop_time: f64::MAX,
                duration: f64::MAX,
                offset: 0.,
                buffer: None,
                detune: d_proc,
                playback_rate: pr_proc,
                loop_state: loop_state.clone(),
                render_state: AudioBufferRendererState::default(),
                ended_triggered: false,
            };
            let mut node = Self {
                registration,
                channel_config: ChannelConfig::default(),
                detune: d_param,
                playback_rate: pr_param,
                buffer_time: Arc::clone(&renderer.render_state.buffer_time),
                buffer: None,
                loop_state,
                source_started: false,
            };
            if let Some(buf) = buffer {
                node.set_buffer(buf);
            }
            (node, Box::new(renderer))
        })
    }
    pub fn start_at_with_offset(&mut self, start: f64, offset: f64) {
        self.start_at_with_offset_and_duration(start, offset, f64::MAX);
    }
    pub fn start_at_with_offset_and_duration(&mut self, start: f64, offset: f64, duration: f64) {
        assert!(
            !self.source_started,
            "InvalidStateError: Cannot call `start` twice"
        );
        self.source_started = true;
        let control = ControlMessage::StartWithOffsetAndDuration(start, offset, duration);
        self.registration.post_message(control);
    }
    pub fn buffer(&self) -> Option<&AudioBuffer> {
        self.buffer.as_ref()
    }
    pub fn set_buffer(&mut self, audio_buffer: AudioBuffer) {
        let clone = audio_buffer.clone();
        if self.buffer.is_some() {
            panic!("InvalidStateError - cannot assign buffer twice");
        }
        self.buffer = Some(audio_buffer);
        self.registration.post_message(clone);
    }
    pub fn playback_rate(&self) -> &AudioParam {
        &self.playback_rate
    }
    pub fn position(&self) -> f64 {
        self.buffer_time.load(Ordering::Relaxed)
    }
    pub fn detune(&self) -> &AudioParam {
        &self.detune
    }
    #[allow(clippy::missing_panics_doc)]
    pub fn loop_(&self) -> bool {
        self.loop_state.is_looping
    }
    pub fn set_loop(&mut self, value: bool) {
        self.loop_state.is_looping = value;
        self.registration.post_message(ControlMessage::Loop(value));
    }
    pub fn loop_start(&self) -> f64 {
        self.loop_state.start
    }
    pub fn set_loop_start(&mut self, value: f64) {
        self.loop_state.start = value;
        self.registration
            .post_message(ControlMessage::LoopStart(value));
    }
    pub fn loop_end(&self) -> f64 {
        self.loop_state.end
    }
    pub fn set_loop_end(&mut self, value: f64) {
        self.loop_state.end = value;
        self.registration
            .post_message(ControlMessage::LoopEnd(value));
    }
}
struct AudioBufferRendererState {
    buffer_time: Arc<AtomicF64>,
    started: bool,
    entered_loop: bool,
    buffer_time_elapsed: f64,
    is_aligned: bool,
}
impl Default for AudioBufferRendererState {
    fn default() -> Self {
        Self {
            buffer_time: Arc::new(AtomicF64::new(0.)),
            started: false,
            entered_loop: false,
            buffer_time_elapsed: 0.,
            is_aligned: false,
        }
    }
}
struct AudioBufferSourceRenderer {
    start_time: f64,
    stop_time: f64,
    offset: f64,
    duration: f64,
    buffer: Option<AudioBuffer>,
    detune: AudioParamId,
    playback_rate: AudioParamId,
    loop_state: LoopState,
    render_state: AudioBufferRendererState,
    ended_triggered: bool,
}
impl AudioBufferSourceRenderer {
    fn handle_control_message(&mut self, control: &ControlMessage) {
        match control {
            ControlMessage::StartWithOffsetAndDuration(when, offset, duration) => {
                self.start_time = *when;
                self.offset = *offset;
                self.duration = *duration;
            }
            ControlMessage::Stop(when) => self.stop_time = *when,
            ControlMessage::Loop(is_looping) => self.loop_state.is_looping = *is_looping,
            ControlMessage::LoopStart(loop_start) => self.loop_state.start = *loop_start,
            ControlMessage::LoopEnd(loop_end) => self.loop_state.end = *loop_end,
        }
    }
}
impl AudioProcessor for AudioBufferSourceRenderer {
    fn process(
        &mut self,
        _inputs: &[AudioRenderQuantum], outputs: &mut [AudioRenderQuantum],
        params: AudioParamValues<'_>,
        scope: &RenderScope,
    ) -> bool {
        let output = &mut outputs[0];
        let sample_rate = scope.sample_rate as f64;
        let dt = 1. / sample_rate;
        let block_duration = dt * RENDER_QUANTUM_SIZE as f64;
        let next_block_time = scope.current_time + block_duration;
        let LoopState {
            is_looping,
            start: loop_start,
            end: loop_end,
        } = self.loop_state.clone();
        let mut actual_loop_start = 0.;
        let mut actual_loop_end = 0.;
        if self.start_time >= next_block_time {
            output.make_silent();
            return true;
        }
        let buffer = match &self.buffer {
            None => {
                output.make_silent();
                return true;
            }
            Some(b) => b,
        };
        let detune = params.get(&self.detune)[0];
        let playback_rate = params.get(&self.playback_rate)[0];
        let computed_playback_rate = (playback_rate * (detune / 1200.).exp2()) as f64;
        let buffer_duration = buffer.duration();
        let sampling_ratio = buffer.sample_rate() as f64 / sample_rate;
        if scope.current_time >= self.stop_time
            || self.render_state.buffer_time_elapsed >= self.duration
        {
            output.make_silent(); if !self.ended_triggered {
                scope.send_ended_event();
                self.ended_triggered = true;
            }
            return false;
        }
        let mut buffer_time = self.render_state.buffer_time.load(Ordering::Relaxed);
        if !is_looping {
            if computed_playback_rate > 0. && buffer_time >= buffer_duration {
                output.make_silent(); if !self.ended_triggered {
                    scope.send_ended_event();
                    self.ended_triggered = true;
                }
                return false;
            }
            if computed_playback_rate < 0. && buffer_time < 0. {
                output.make_silent(); if !self.ended_triggered {
                    scope.send_ended_event();
                    self.ended_triggered = true;
                }
                return false;
            }
        }
        output.set_number_of_channels(buffer.number_of_channels());
        let mut current_time = scope.current_time;
        if !self.render_state.started && self.start_time < current_time {
            self.start_time = current_time;
        }
        if self.start_time == current_time && self.offset == 0. {
            self.render_state.is_aligned = true;
        }
        if sampling_ratio != 1. || computed_playback_rate != 1. {
            self.render_state.is_aligned = false;
        }
        if loop_start != 0. || (loop_end != 0. && loop_end != self.duration) {
            self.render_state.is_aligned = false;
        }
        if self.render_state.is_aligned {
            if self.start_time == current_time {
                self.render_state.started = true;
            }
            if buffer_time + block_duration > buffer_duration
                || buffer_time + block_duration > self.duration
                || current_time + block_duration > self.stop_time
            {
                let end_index = if current_time + block_duration > self.stop_time
                    || buffer_time + block_duration > self.duration
                {
                    let dt = (self.stop_time - current_time).min(self.duration - buffer_time);
                    let end_buffer_time = buffer_time + dt;
                    (end_buffer_time * sample_rate).round() as usize
                } else {
                    buffer.length()
                };
                let mut loop_point_index: Option<usize> = None;
                buffer
                    .channels()
                    .iter()
                    .zip(output.channels_mut().iter_mut())
                    .for_each(|(buffer_channel, output_channel)| {
                        let buffer_channel = buffer_channel.as_slice();
                        let mut start_index = (buffer_time * sample_rate).round() as usize;
                        let mut offset = 0;
                        for (index, o) in output_channel.iter_mut().enumerate() {
                            let mut buffer_index = start_index + index - offset;
                            *o = if buffer_index < end_index {
                                buffer_channel[buffer_index]
                            } else {
                                if is_looping && buffer_index == end_index {
                                    loop_point_index = Some(index);
                                    start_index = 0;
                                    offset = index;
                                    buffer_index = 0;
                                }
                                if is_looping {
                                    buffer_channel[buffer_index]
                                } else {
                                    0.
                                }
                            };
                        }
                    });
                if let Some(loop_point_index) = loop_point_index {
                    buffer_time = ((RENDER_QUANTUM_SIZE - loop_point_index) as f64 / sample_rate)
                        % buffer_duration;
                } else {
                    buffer_time += block_duration;
                }
            } else {
                let start_index = (buffer_time * sample_rate).round() as usize;
                let end_index = start_index + RENDER_QUANTUM_SIZE;
                buffer
                    .channels()
                    .iter()
                    .zip(output.channels_mut().iter_mut())
                    .for_each(|(buffer_channel, output_channel)| {
                        let buffer_channel = buffer_channel.as_slice();
                        output_channel.copy_from_slice(&buffer_channel[start_index..end_index]);
                    });
                buffer_time += block_duration;
            }
            self.render_state
                .buffer_time
                .store(buffer_time, Ordering::Relaxed);
            self.render_state.buffer_time_elapsed += block_duration;
            return true;
        }
        if is_looping {
            if loop_start >= 0. && loop_end > 0. && loop_start < loop_end {
                actual_loop_start = loop_start;
                actual_loop_end = loop_end.min(buffer_duration);
            } else {
                actual_loop_start = 0.;
                actual_loop_end = buffer_duration;
            }
        } else {
            self.render_state.entered_loop = false;
        }
        let mut playback_infos = [None; RENDER_QUANTUM_SIZE];
        for playback_info in playback_infos.iter_mut() {
            if current_time < self.start_time
                || current_time >= self.stop_time
                || self.render_state.buffer_time_elapsed >= self.duration
            {
                *playback_info = None;
                current_time += dt;
                continue; }
            if !self.render_state.started {
                self.offset += current_time - self.start_time;
                if is_looping && computed_playback_rate >= 0. && self.offset >= actual_loop_end {
                    self.offset = actual_loop_end;
                }
                if is_looping && computed_playback_rate < 0. && self.offset < actual_loop_start {
                    self.offset = actual_loop_start;
                }
                buffer_time = self.offset;
                self.render_state.started = true;
            }
            if is_looping {
                if !self.render_state.entered_loop {
                    if self.offset < actual_loop_end && buffer_time >= actual_loop_start {
                        self.render_state.entered_loop = true;
                    }
                    if self.offset >= actual_loop_end && buffer_time < actual_loop_end {
                        self.render_state.entered_loop = true;
                    }
                }
                if self.render_state.entered_loop {
                    while buffer_time >= actual_loop_end {
                        buffer_time -= actual_loop_end - actual_loop_start;
                    }
                    while buffer_time < actual_loop_start {
                        buffer_time += actual_loop_end - actual_loop_start;
                    }
                }
            }
            if buffer_time >= 0. && buffer_time < buffer_duration {
                let position = buffer_time * sampling_ratio;
                let playhead = position * sample_rate;
                let playhead_floored = playhead.floor();
                let prev_frame_index = playhead_floored as usize; let k = (playhead - playhead_floored) as f32;
                *playback_info = Some(PlaybackInfo {
                    prev_frame_index,
                    k,
                });
            } else {
                *playback_info = None;
            }
            let time_incr = dt * computed_playback_rate;
            buffer_time += time_incr;
            self.render_state.buffer_time_elapsed += time_incr;
            current_time += dt;
        }
        buffer
            .channels()
            .iter()
            .zip(output.channels_mut().iter_mut())
            .for_each(|(buffer_channel, output_channel)| {
                let buffer_channel = buffer_channel.as_slice();
                playback_infos
                    .iter()
                    .zip(output_channel.iter_mut())
                    .for_each(|(playhead, o)| {
                        *o = match playhead {
                            Some(PlaybackInfo {
                                prev_frame_index,
                                k,
                            }) => {
                                let prev_sample = buffer_channel[*prev_frame_index];
                                let next_sample = match buffer_channel.get(prev_frame_index + 1) {
                                    Some(val) => *val,
                                    None => 0.,
                                };
                                (1. - k).mul_add(prev_sample, k * next_sample)
                            }
                            None => 0.,
                        };
                    });
            });
        self.render_state
            .buffer_time
            .store(buffer_time, Ordering::Relaxed);
        true
    }
    fn onmessage(&mut self, msg: &mut dyn Any) {
        if let Some(control) = msg.downcast_ref::<ControlMessage>() {
            self.handle_control_message(control);
            return;
        };
        if let Some(buffer) = msg.downcast_mut::<AudioBuffer>() {
            if let Some(current_buffer) = &mut self.buffer {
                std::mem::swap(current_buffer, buffer);
            } else {
                let tombstone_buffer = AudioBuffer {
                    channels: Default::default(),
                    sample_rate: Default::default(),
                };
                self.buffer = Some(std::mem::replace(buffer, tombstone_buffer));
            }
            return;
        };
        log::warn!("AudioBufferSourceRenderer: Dropping incoming message {msg:?}");
    }
}
#[cfg(test)]
mod tests {
    use float_eq::assert_float_eq;
    use std::f32::consts::PI;
    use crate::context::{BaseAudioContext, OfflineAudioContext};
    use crate::RENDER_QUANTUM_SIZE;
    use super::*;
    #[test]
    fn test_playing_some_file() {
        let context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, 44_100.);
        let file = std::fs::File::open("samples/sample.wav").unwrap();
        let expected = context.decode_audio_data_sync(file).unwrap();
        [44100, 48000].iter().for_each(|sr| {
            let decoding_context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, *sr as f32);
            let mut filename = "samples/sample-".to_owned();
            filename.push_str(&sr.to_string());
            filename.push_str(".wav");
            let file = std::fs::File::open("samples/sample.wav").unwrap();
            let audio_buffer = decoding_context.decode_audio_data_sync(file).unwrap();
            assert_eq!(audio_buffer.sample_rate(), *sr as f32);
            let context = OfflineAudioContext::new(2, RENDER_QUANTUM_SIZE, 44_100.);
            let mut src = context.create_buffer_source();
            src.set_buffer(audio_buffer);
            src.connect(&context.destination());
            src.start_at(context.current_time());
            src.stop_at(context.current_time() + 128.);
            let res = context.start_rendering_sync();
            let diff_abs = if *sr == 44100 {
                0. } else {
                5e-3 };
            assert_eq!(res.number_of_channels(), expected.number_of_channels());
            assert_float_eq!(
                res.channel_data(0).as_slice()[..],
                expected.get_channel_data(0)[0..128],
                abs_all <= diff_abs
            );
            assert_float_eq!(
                res.channel_data(1).as_slice()[..],
                expected.get_channel_data(1)[0..128],
                abs_all <= diff_abs
            );
        });
    }
    #[test]
    fn test_sub_quantum_start() {
        let sample_rate = 480000.;
        let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
        let mut dirac = context.create_buffer(1, 1, sample_rate);
        dirac.copy_to_channel(&[1.], 0);
        let mut src = context.create_buffer_source();
        src.connect(&context.destination());
        src.set_buffer(dirac);
        src.start_at(1. / sample_rate as f64);
        let result = context.start_rendering_sync();
        let channel = result.get_channel_data(0);
        let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
        expected[1] = 1.;
        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
    }
    #[test]
    fn test_sub_sample_start() {
        let sample_rate = 480000.;
        let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
        let mut dirac = context.create_buffer(1, 1, sample_rate);
        dirac.copy_to_channel(&[1.], 0);
        let mut src = context.create_buffer_source();
        src.connect(&context.destination());
        src.set_buffer(dirac);
        src.start_at(1.5 / sample_rate as f64);
        let result = context.start_rendering_sync();
        let channel = result.get_channel_data(0);
        let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
        expected[2] = 0.5;
        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
    }
    #[test]
    fn test_sub_quantum_stop() {
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
            let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
            dirac.copy_to_channel(&[0., 0., 0., 0., 1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(dirac);
            src.start_at(0. / sample_rate as f64);
            src.stop_at(4. / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let expected = vec![0.; RENDER_QUANTUM_SIZE];
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
            let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
            dirac.copy_to_channel(&[0., 0., 0., 0., 1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(dirac);
            src.start_at(1. / sample_rate as f64);
            src.stop_at(5. / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let expected = vec![0.; RENDER_QUANTUM_SIZE];
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
    }
    #[test]
    fn test_sub_sample_stop() {
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
            let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
            dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(dirac);
            src.start_at(0. / sample_rate as f64);
            src.stop_at(4.5 / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; 128];
            expected[4] = 1.;
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
            let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
            dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(dirac);
            src.start_at(1. / sample_rate as f64);
            src.stop_at(5.5 / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; 128];
            expected[5] = 1.;
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
    }
    #[test]
    fn test_schedule_in_the_past() {
        let sample_rate = 48000.;
        let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
        let mut dirac = context.create_buffer(1, 1, sample_rate);
        dirac.copy_to_channel(&[1.], 0);
        let mut src = context.create_buffer_source();
        src.connect(&context.destination());
        src.set_buffer(dirac);
        src.start_at(-1.);
        let result = context.start_rendering_sync();
        let channel = result.get_channel_data(0);
        let mut expected = vec![0.; RENDER_QUANTUM_SIZE];
        expected[0] = 1.;
        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
    }
    #[test]
    fn test_audio_buffer_resampling() {
        [22500, 38000, 48000, 96000].iter().for_each(|sr| {
            let base_sr = 44100;
            let context = OfflineAudioContext::new(1, base_sr, base_sr as f32);
            let buf_sr = *sr;
            let sample_rate = buf_sr as f32;
            let mut buffer = context.create_buffer(1, buf_sr, sample_rate);
            let mut sine = vec![];
            for i in 0..buf_sr {
                let phase = i as f32 / buf_sr as f32 * 2. * PI;
                let sample = phase.sin();
                sine.push(sample);
            }
            buffer.copy_to_channel(&sine[..], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(buffer);
            src.start_at(0. / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![];
            for i in 0..base_sr {
                let phase = i as f32 / base_sr as f32 * 2. * PI;
                let sample = phase.sin();
                expected.push(sample);
            }
            assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
        });
    }
    #[test]
    fn test_playback_rate() {
        let sample_rate = 44100;
        let context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
        let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
        let mut sine = vec![];
        for i in 0..sample_rate {
            let phase = i as f32 / sample_rate as f32 * 2. * PI;
            let sample = phase.sin();
            sine.push(sample);
        }
        buffer.copy_to_channel(&sine[..], 0);
        let mut src = context.create_buffer_source();
        src.connect(&context.destination());
        src.set_buffer(buffer);
        src.playback_rate.set_value(0.5);
        src.start();
        let result = context.start_rendering_sync();
        let channel = result.get_channel_data(0);
        let mut expected = vec![];
        for i in 0..sample_rate {
            let phase = i as f32 / sample_rate as f32 * PI;
            let sample = phase.sin();
            expected.push(sample);
        }
        assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
    }
    #[test]
    fn test_detune() {
        let sample_rate = 44100;
        let context = OfflineAudioContext::new(1, sample_rate, sample_rate as f32);
        let mut buffer = context.create_buffer(1, sample_rate, sample_rate as f32);
        let mut sine = vec![];
        for i in 0..sample_rate {
            let phase = i as f32 / sample_rate as f32 * 2. * PI;
            let sample = phase.sin();
            sine.push(sample);
        }
        buffer.copy_to_channel(&sine[..], 0);
        let mut src = context.create_buffer_source();
        src.connect(&context.destination());
        src.set_buffer(buffer);
        src.detune.set_value(-1200.);
        src.start();
        let result = context.start_rendering_sync();
        let channel = result.get_channel_data(0);
        let mut expected = vec![];
        for i in 0..sample_rate {
            let phase = i as f32 / sample_rate as f32 * PI;
            let sample = phase.sin();
            expected.push(sample);
        }
        assert_float_eq!(channel[..], expected[..], abs_all <= 1e-6);
    }
    #[test]
    fn test_end_of_file() {
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE * 2, sample_rate);
            let mut buffer = context.create_buffer(1, 129, sample_rate);
            let mut data = vec![0.; 129];
            data[0] = 1.;
            data[128] = 1.;
            buffer.copy_to_channel(&data, 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(buffer);
            src.start_at(0. / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; 256];
            expected[0] = 1.;
            expected[128] = 1.;
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE * 2, sample_rate);
            let mut buffer = context.create_buffer(1, 129, sample_rate);
            let mut data = vec![0.; 129];
            data[0] = 1.;
            data[128] = 1.;
            buffer.copy_to_channel(&data, 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(buffer);
            src.start_at(1. / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; 256];
            expected[1] = 1.;
            expected[129] = 1.;
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
    }
    #[test]
    fn test_with_duration() {
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
            let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
            dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(dirac);
            src.start_at_with_offset_and_duration(0., 0., 4.5 / sample_rate as f64);
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; 128];
            expected[4] = 1.;
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
        {
            let sample_rate = 480000.;
            let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
            let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
            dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_buffer(dirac);
            src.start_at_with_offset_and_duration(
                1. / sample_rate as f64,
                0. / sample_rate as f64,
                4.5 / sample_rate as f64,
            );
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; 128];
            expected[5] = 1.;
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
    }
    #[test]
    fn test_with_offset() {
        let sample_rate = 480000.;
        let context = OfflineAudioContext::new(1, RENDER_QUANTUM_SIZE, sample_rate);
        let mut dirac = context.create_buffer(1, RENDER_QUANTUM_SIZE, sample_rate);
        dirac.copy_to_channel(&[0., 0., 0., 0., 1., 1.], 0);
        let mut src = context.create_buffer_source();
        src.connect(&context.destination());
        src.set_buffer(dirac);
        src.start_at_with_offset_and_duration(
            0. / sample_rate as f64,
            1. / sample_rate as f64,
            3.5 / sample_rate as f64,
        );
        let result = context.start_rendering_sync();
        let channel = result.get_channel_data(0);
        let mut expected = vec![0.; 128];
        expected[3] = 1.;
        assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
    }
    #[test]
    fn test_fast_track_loop_mono() {
        let sample_rate = 480000.;
        let len = RENDER_QUANTUM_SIZE * 4;
        for buffer_len in [
            RENDER_QUANTUM_SIZE / 2 - 1,
            RENDER_QUANTUM_SIZE / 2,
            RENDER_QUANTUM_SIZE / 2 + 1,
            RENDER_QUANTUM_SIZE - 1,
            RENDER_QUANTUM_SIZE,
            RENDER_QUANTUM_SIZE + 1,
            RENDER_QUANTUM_SIZE * 2 - 1,
            RENDER_QUANTUM_SIZE * 2,
            RENDER_QUANTUM_SIZE * 2 + 1,
        ] {
            let context = OfflineAudioContext::new(1, len, sample_rate);
            let mut dirac = context.create_buffer(1, buffer_len, sample_rate);
            dirac.copy_to_channel(&[1.], 0);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_loop(true);
            src.set_buffer(dirac);
            src.start();
            let result = context.start_rendering_sync();
            let channel = result.get_channel_data(0);
            let mut expected = vec![0.; len];
            for i in (0..len).step_by(buffer_len) {
                expected[i] = 1.;
            }
            assert_float_eq!(channel[..], expected[..], abs_all <= 0.);
        }
    }
    #[test]
    fn test_fast_track_loop_stereo() {
        let sample_rate = 480000.;
        let len = RENDER_QUANTUM_SIZE * 4;
        for buffer_len in [
            RENDER_QUANTUM_SIZE / 2 - 1,
            RENDER_QUANTUM_SIZE / 2,
            RENDER_QUANTUM_SIZE / 2 + 1,
            RENDER_QUANTUM_SIZE - 1,
            RENDER_QUANTUM_SIZE,
            RENDER_QUANTUM_SIZE + 1,
            RENDER_QUANTUM_SIZE * 2 - 1,
            RENDER_QUANTUM_SIZE * 2,
            RENDER_QUANTUM_SIZE * 2 + 1,
        ] {
            let context = OfflineAudioContext::new(2, len, sample_rate);
            let mut dirac = context.create_buffer(2, buffer_len, sample_rate);
            dirac.copy_to_channel(&[1.], 0);
            dirac.copy_to_channel(&[0., 1.], 1);
            let mut src = context.create_buffer_source();
            src.connect(&context.destination());
            src.set_loop(true);
            src.set_buffer(dirac);
            src.start();
            let result = context.start_rendering_sync();
            let mut expected_left: Vec<f32> = vec![0.; len];
            let mut expected_right = vec![0.; len];
            for i in (0..len - 1).step_by(buffer_len) {
                expected_left[i] = 1.;
                expected_right[i + 1] = 1.;
            }
            assert_float_eq!(
                result.get_channel_data(0)[..],
                expected_left[..],
                abs_all <= 0.
            );
            assert_float_eq!(
                result.get_channel_data(1)[..],
                expected_right[..],
                abs_all <= 0.
            );
        }
    }
}