audio_processor_time/reverb/mod_reverb/
mod.rs1use std::time::Duration;
25
26use rand::rngs::SmallRng;
27use rand::seq::SliceRandom;
28use rand::{Rng, SeedableRng};
29
30use audio_garbage_collector::{make_shared, Shared};
31use audio_processor_traits::parameters::{
32 make_handle_ref, AudioProcessorHandleProvider, AudioProcessorHandleRef,
33};
34use audio_processor_traits::simple_processor::MonoAudioProcessor;
35use audio_processor_traits::{AudioBuffer, AudioContext, AudioProcessor};
36use augmented_dsp_filters::rbj::{FilterProcessor, FilterType};
37use augmented_oscillator::Oscillator;
38use generic_handle::GenericHandle;
39
40use crate::MonoDelayProcessor;
41
42use self::mix_matrix::{apply_householder, HadamardMatrix};
43
44mod generic_handle;
45mod mix_matrix;
46
47fn flip_polarities(frame: &mut [f32]) {
48 for sample in frame {
49 *sample = -*sample
50 }
51}
52
53pub struct ModReverbHandle {}
54
55pub struct ModReverbProcessor {
63 handle: Shared<ModReverbHandle>,
64 diffusers: [Diffuser<8>; 6],
65 delay: [MonoDelayProcessor<f32>; 8],
66 filter: [FilterProcessor<f32>; 2],
67 diffuser_modulator: Oscillator<f32>,
68 delay_modulator: Oscillator<f32>,
69}
70
71impl AudioProcessorHandleProvider for ModReverbProcessor {
72 fn generic_handle(&self) -> AudioProcessorHandleRef {
73 make_handle_ref(GenericHandle(self.handle.clone()))
74 }
75}
76
77impl Default for ModReverbProcessor {
78 fn default() -> Self {
79 Self {
80 handle: make_shared(ModReverbHandle {}),
81 diffusers: [
82 Diffuser::default(),
83 Diffuser::default(),
84 Diffuser::default(),
85 Diffuser::default(),
86 Diffuser::default(),
87 Diffuser::default(),
88 ],
89 delay: [
90 MonoDelayProcessor::default(),
91 MonoDelayProcessor::default(),
92 MonoDelayProcessor::default(),
93 MonoDelayProcessor::default(),
94 MonoDelayProcessor::default(),
95 MonoDelayProcessor::default(),
96 MonoDelayProcessor::default(),
97 MonoDelayProcessor::default(),
98 ],
99 filter: [
100 FilterProcessor::new(FilterType::LowPass),
101 FilterProcessor::new(FilterType::LowPass),
102 ],
103 diffuser_modulator: Oscillator::sine(44100.0),
104 delay_modulator: Oscillator::sine(44100.0),
105 }
106 }
107}
108
109impl AudioProcessor for ModReverbProcessor {
110 type SampleType = f32;
111
112 fn prepare(&mut self, context: &mut AudioContext) {
113 let mut max_delay_time = 0.5 / (self.diffusers.len() as f32).powf(2.0);
114 for diffuser in self.diffusers.iter_mut() {
115 diffuser.max_delay_time = Duration::from_secs_f32(max_delay_time);
116 diffuser.prepare(context);
117 max_delay_time *= 2.0;
118 }
119
120 for delay in &mut self.delay {
121 delay.m_prepare(context);
122 delay.handle().set_delay_time_secs(0.2);
123 }
124
125 self.diffuser_modulator
126 .set_sample_rate(context.settings.sample_rate());
127 self.diffuser_modulator.set_frequency(1.0);
128 self.delay_modulator
129 .set_sample_rate(context.settings.sample_rate());
130 self.delay_modulator.set_frequency(0.3);
131
132 for filter in &mut self.filter {
133 filter.m_prepare(context);
134 filter.set_q(1.0);
135 filter.set_cutoff(800.0);
136 }
137 }
138
139 fn process(&mut self, context: &mut AudioContext, data: &mut AudioBuffer<Self::SampleType>) {
140 let delay_feedback = 0.9;
142 let delay_volume = 0.5;
143 let delay_time = 0.15;
144 let reverb_volume = 0.5;
146 let delay_modulated_amount = 0.0005;
148 let diffuser_modulated_amount = 0.0;
149
150 for sample_num in 0..data.num_samples() {
152 let diffuser_modulation = self.diffuser_modulator.next_sample(); let diffuser_modulation = 1.0 + diffuser_modulation * diffuser_modulated_amount;
155 for diffuser in self.diffusers.iter_mut() {
156 diffuser.set_delay_mult(diffuser_modulation);
157 }
158 let delay_modulation = self.delay_modulator.next_sample();
160 let delay_modulation = 1.0 + delay_modulation * delay_modulated_amount;
161 let delay_duration = delay_time * delay_modulation;
162 for delay in &mut self.delay {
163 delay.handle().set_delay_time_secs(delay_duration);
164 }
165
166 let left = data.channel(0)[sample_num];
167 let right = data.channel(1)[sample_num];
168
169 let mut frame8 = [left, right, left, right, left, right, left, right];
171
172 for diffuser in &mut self.diffusers {
174 diffuser.process(context, &mut frame8);
175 }
176
177 let mut delayed = [0.0; 8];
179 for (delay, delay_output) in self.delay.iter_mut().zip(&mut delayed) {
180 *delay_output = delay.read();
181 }
182
183 apply_householder(&mut delayed);
185
186 for ((sample, delay), delay_output) in
188 frame8.iter_mut().zip(&mut self.delay).zip(delayed)
189 {
190 delay.write(*sample + delay_output * delay_feedback);
191 *sample += delay_output * delay_volume;
192 }
193
194 let scale = 1.0 / (self.diffusers.len() as f32);
196 let mut reverb_output = [
197 (frame8[0] + frame8[2] + frame8[4] + frame8[6]) * scale * reverb_volume,
198 (frame8[1] + frame8[3] + frame8[5] + frame8[7]) * scale * reverb_volume,
199 ];
200 reverb_output[0] = self.filter[0].m_process(context, reverb_output[0]);
201 reverb_output[1] = self.filter[1].m_process(context, reverb_output[1]);
202
203 data.channel_mut(0)[sample_num] = reverb_output[0] + left * (1.0 - reverb_volume);
204 data.channel_mut(1)[sample_num] = reverb_output[1] + right * (1.0 - reverb_volume);
205 }
206 }
207}
208
209struct Diffuser<const CHANNELS: usize> {
210 rng: SmallRng,
211 max_delay_time: Duration,
212 #[allow(dead_code)]
213 shuffle_positions: [usize; CHANNELS],
214 mono_delay_processors: [MonoDelayProcessor<f32>; CHANNELS],
215 delay_times: [f32; CHANNELS],
216 hadamard_matrix: HadamardMatrix<CHANNELS>,
217}
218
219impl<const CHANNELS: usize> Default for Diffuser<CHANNELS>
220where
221 [[f32; CHANNELS]; CHANNELS]: Default,
222{
223 fn default() -> Self {
224 let rng = SmallRng::from_entropy();
225 Self::new(rng)
226 }
227}
228
229impl<const CHANNELS: usize> Diffuser<CHANNELS>
230where
231 [[f32; CHANNELS]; CHANNELS]: Default,
232{
233 fn new(mut rng: SmallRng) -> Self {
234 let mut shuffle_positions: [usize; CHANNELS] = [0; CHANNELS];
235 for (i, shuffle_pos) in shuffle_positions.iter_mut().enumerate().take(CHANNELS) {
236 *shuffle_pos = i;
237 }
238 shuffle_positions.shuffle(&mut rng);
239
240 let mono_delay_processors = [(); CHANNELS].map(|_| MonoDelayProcessor::default());
241
242 Self {
243 rng,
244 shuffle_positions,
245 max_delay_time: Duration::from_secs_f32(0.0_f32),
246 mono_delay_processors,
247 delay_times: [0.0; CHANNELS],
248 hadamard_matrix: HadamardMatrix::new(),
249 }
250 }
251
252 fn prepare(&mut self, context: &mut AudioContext) {
253 let max_delay = self.max_delay_time.as_secs_f32();
254 let mut slots: Vec<f32> = (0..self.mono_delay_processors.len())
255 .map(|i| 0.003 + i as f32 * (max_delay / (self.mono_delay_processors.len() as f32)))
256 .collect();
257
258 for (d, delay_time) in self
259 .mono_delay_processors
260 .iter_mut()
261 .zip(&mut self.delay_times)
262 {
263 d.m_prepare(context);
264 let index = self.rng.gen_range(0..slots.len());
265 *delay_time = slots[index];
266 slots.remove(index);
267 d.handle().set_delay_time_secs(*delay_time);
268 d.handle().set_feedback(0.0);
269 }
270 }
271
272 fn set_delay_mult(&mut self, mult: f32) {
273 for (delay, delay_basis) in self.mono_delay_processors.iter_mut().zip(&self.delay_times) {
274 delay.handle().set_delay_time_secs(*delay_basis * mult);
275 }
276 }
277
278 fn process(&mut self, context: &mut AudioContext, frame: &mut [f32; CHANNELS]) {
279 for (sample, delay_processor) in frame.iter_mut().zip(&mut self.mono_delay_processors) {
280 *sample = delay_processor.m_process(context, *sample);
281 }
282 flip_polarities(frame);
283 self.hadamard_matrix.apply(frame);
284 }
285}
286
287#[cfg(test)]
288mod test {
289 use assert_no_alloc::assert_no_alloc;
290 use audio_processor_traits::AudioProcessorSettings;
291
292 use super::*;
293
294 #[test]
295 fn test_no_alloc_diffuser() {
296 let mut diffuser = Diffuser::<8>::default();
297 let mut settings = AudioProcessorSettings::default();
298 settings.input_channels = 8;
299 settings.output_channels = 8;
300 let mut context = AudioContext::from(settings);
301 diffuser.prepare(&mut context);
302
303 let mut frame = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
304 assert_no_alloc(|| {
305 diffuser.process(&mut context, &mut frame);
306 });
307 }
308}