use std::time::Duration;
use rand::rngs::SmallRng;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use audio_garbage_collector::{make_shared, Shared};
use audio_processor_traits::parameters::{
make_handle_ref, AudioProcessorHandleProvider, AudioProcessorHandleRef,
};
use audio_processor_traits::simple_processor::MonoAudioProcessor;
use audio_processor_traits::{AudioBuffer, AudioContext, AudioProcessor};
use augmented_dsp_filters::rbj::{FilterProcessor, FilterType};
use augmented_oscillator::Oscillator;
use generic_handle::GenericHandle;
use crate::MonoDelayProcessor;
use self::mix_matrix::{apply_householder, HadamardMatrix};
mod generic_handle;
mod mix_matrix;
fn flip_polarities(frame: &mut [f32]) {
for sample in frame {
*sample = -*sample
}
}
pub struct ModReverbHandle {}
pub struct ModReverbProcessor {
handle: Shared<ModReverbHandle>,
diffusers: [Diffuser<8>; 6],
delay: [MonoDelayProcessor<f32>; 8],
filter: [FilterProcessor<f32>; 2],
diffuser_modulator: Oscillator<f32>,
delay_modulator: Oscillator<f32>,
}
impl AudioProcessorHandleProvider for ModReverbProcessor {
fn generic_handle(&self) -> AudioProcessorHandleRef {
make_handle_ref(GenericHandle(self.handle.clone()))
}
}
impl Default for ModReverbProcessor {
fn default() -> Self {
Self {
handle: make_shared(ModReverbHandle {}),
diffusers: [
Diffuser::default(),
Diffuser::default(),
Diffuser::default(),
Diffuser::default(),
Diffuser::default(),
Diffuser::default(),
],
delay: [
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
MonoDelayProcessor::default(),
],
filter: [
FilterProcessor::new(FilterType::LowPass),
FilterProcessor::new(FilterType::LowPass),
],
diffuser_modulator: Oscillator::sine(44100.0),
delay_modulator: Oscillator::sine(44100.0),
}
}
}
impl AudioProcessor for ModReverbProcessor {
type SampleType = f32;
fn prepare(&mut self, context: &mut AudioContext) {
let mut max_delay_time = 0.5 / (self.diffusers.len() as f32).powf(2.0);
for diffuser in self.diffusers.iter_mut() {
diffuser.max_delay_time = Duration::from_secs_f32(max_delay_time);
diffuser.prepare(context);
max_delay_time *= 2.0;
}
for delay in &mut self.delay {
delay.m_prepare(context);
delay.handle().set_delay_time_secs(0.2);
}
self.diffuser_modulator
.set_sample_rate(context.settings.sample_rate());
self.diffuser_modulator.set_frequency(1.0);
self.delay_modulator
.set_sample_rate(context.settings.sample_rate());
self.delay_modulator.set_frequency(0.3);
for filter in &mut self.filter {
filter.m_prepare(context);
filter.set_q(1.0);
filter.set_cutoff(800.0);
}
}
fn process(&mut self, context: &mut AudioContext, data: &mut AudioBuffer<Self::SampleType>) {
let delay_feedback = 0.9;
let delay_volume = 0.5;
let delay_time = 0.15;
let reverb_volume = 0.5;
let delay_modulated_amount = 0.0005;
let diffuser_modulated_amount = 0.0;
for sample_num in 0..data.num_samples() {
let diffuser_modulation = self.diffuser_modulator.next_sample(); let diffuser_modulation = 1.0 + diffuser_modulation * diffuser_modulated_amount;
for diffuser in self.diffusers.iter_mut() {
diffuser.set_delay_mult(diffuser_modulation);
}
let delay_modulation = self.delay_modulator.next_sample();
let delay_modulation = 1.0 + delay_modulation * delay_modulated_amount;
let delay_duration = delay_time * delay_modulation;
for delay in &mut self.delay {
delay.handle().set_delay_time_secs(delay_duration);
}
let left = data.channel(0)[sample_num];
let right = data.channel(1)[sample_num];
let mut frame8 = [left, right, left, right, left, right, left, right];
for diffuser in &mut self.diffusers {
diffuser.process(context, &mut frame8);
}
let mut delayed = [0.0; 8];
for (delay, delay_output) in self.delay.iter_mut().zip(&mut delayed) {
*delay_output = delay.read();
}
apply_householder(&mut delayed);
for ((sample, delay), delay_output) in
frame8.iter_mut().zip(&mut self.delay).zip(delayed)
{
delay.write(*sample + delay_output * delay_feedback);
*sample += delay_output * delay_volume;
}
let scale = 1.0 / (self.diffusers.len() as f32);
let mut reverb_output = [
(frame8[0] + frame8[2] + frame8[4] + frame8[6]) * scale * reverb_volume,
(frame8[1] + frame8[3] + frame8[5] + frame8[7]) * scale * reverb_volume,
];
reverb_output[0] = self.filter[0].m_process(context, reverb_output[0]);
reverb_output[1] = self.filter[1].m_process(context, reverb_output[1]);
data.channel_mut(0)[sample_num] = reverb_output[0] + left * (1.0 - reverb_volume);
data.channel_mut(1)[sample_num] = reverb_output[1] + right * (1.0 - reverb_volume);
}
}
}
struct Diffuser<const CHANNELS: usize> {
rng: SmallRng,
max_delay_time: Duration,
#[allow(dead_code)]
shuffle_positions: [usize; CHANNELS],
mono_delay_processors: [MonoDelayProcessor<f32>; CHANNELS],
delay_times: [f32; CHANNELS],
hadamard_matrix: HadamardMatrix<CHANNELS>,
}
impl<const CHANNELS: usize> Default for Diffuser<CHANNELS>
where
[[f32; CHANNELS]; CHANNELS]: Default,
{
fn default() -> Self {
let rng = SmallRng::from_entropy();
Self::new(rng)
}
}
impl<const CHANNELS: usize> Diffuser<CHANNELS>
where
[[f32; CHANNELS]; CHANNELS]: Default,
{
fn new(mut rng: SmallRng) -> Self {
let mut shuffle_positions: [usize; CHANNELS] = [0; CHANNELS];
for (i, shuffle_pos) in shuffle_positions.iter_mut().enumerate().take(CHANNELS) {
*shuffle_pos = i;
}
shuffle_positions.shuffle(&mut rng);
let mono_delay_processors = [(); CHANNELS].map(|_| MonoDelayProcessor::default());
Self {
rng,
shuffle_positions,
max_delay_time: Duration::from_secs_f32(0.0_f32),
mono_delay_processors,
delay_times: [0.0; CHANNELS],
hadamard_matrix: HadamardMatrix::new(),
}
}
fn prepare(&mut self, context: &mut AudioContext) {
let max_delay = self.max_delay_time.as_secs_f32();
let mut slots: Vec<f32> = (0..self.mono_delay_processors.len())
.map(|i| 0.003 + i as f32 * (max_delay / (self.mono_delay_processors.len() as f32)))
.collect();
for (d, delay_time) in self
.mono_delay_processors
.iter_mut()
.zip(&mut self.delay_times)
{
d.m_prepare(context);
let index = self.rng.gen_range(0..slots.len());
*delay_time = slots[index];
slots.remove(index);
d.handle().set_delay_time_secs(*delay_time);
d.handle().set_feedback(0.0);
}
}
fn set_delay_mult(&mut self, mult: f32) {
for (delay, delay_basis) in self.mono_delay_processors.iter_mut().zip(&self.delay_times) {
delay.handle().set_delay_time_secs(*delay_basis * mult);
}
}
fn process(&mut self, context: &mut AudioContext, frame: &mut [f32; CHANNELS]) {
for (sample, delay_processor) in frame.iter_mut().zip(&mut self.mono_delay_processors) {
*sample = delay_processor.m_process(context, *sample);
}
flip_polarities(frame);
self.hadamard_matrix.apply(frame);
}
}
#[cfg(test)]
mod test {
use assert_no_alloc::assert_no_alloc;
use audio_processor_traits::AudioProcessorSettings;
use super::*;
#[test]
fn test_no_alloc_diffuser() {
let mut diffuser = Diffuser::<8>::default();
let mut settings = AudioProcessorSettings::default();
settings.input_channels = 8;
settings.output_channels = 8;
let mut context = AudioContext::from(settings);
diffuser.prepare(&mut context);
let mut frame = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
assert_no_alloc(|| {
diffuser.process(&mut context, &mut frame);
});
}
}