1use super::{AudioNode, AudioNodeOptions, ChannelConfig, ChannelCountMode, ChannelInterpretation};
2use crate::context::{AudioContextRegistration, BaseAudioContext};
3use crate::events::{AudioProcessingEvent, EventHandler, EventPayload, EventType};
4use crate::render::{
5 AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
6};
7use crate::{AudioBuffer, RENDER_QUANTUM_SIZE};
8
9use std::any::Any;
10
11#[derive(Clone, Debug)]
13pub struct ScriptProcessorOptions {
14 pub buffer_size: usize,
15 pub number_of_input_channels: usize,
16 pub number_of_output_channels: usize,
17}
18
19#[derive(Debug)]
21pub struct ScriptProcessorNode {
22 registration: AudioContextRegistration,
23 channel_config: ChannelConfig,
24 buffer_size: usize,
25}
26
27impl AudioNode for ScriptProcessorNode {
28 fn registration(&self) -> &AudioContextRegistration {
29 &self.registration
30 }
31
32 fn channel_config(&self) -> &ChannelConfig {
33 &self.channel_config
34 }
35
36 fn number_of_inputs(&self) -> usize {
37 1
38 }
39
40 fn number_of_outputs(&self) -> usize {
41 1
42 }
43
44 fn set_channel_count_mode(&self, mode: ChannelCountMode) {
45 assert_eq!(
46 mode,
47 ChannelCountMode::Explicit,
48 "NotSupportedError - ScriptProcessorNode channel count mode must be 'explicit'",
49 );
50 self.channel_config
51 .set_count_mode(mode, self.registration());
52 }
53
54 fn set_channel_count(&self, count: usize) {
55 assert_eq!(
56 count,
57 self.channel_config.count(),
58 "NotSupportedError - ScriptProcessorNode channel count must equal numberOfInputChannels"
59 );
60 self.channel_config.set_count(count, self.registration());
61 }
62}
63
64impl ScriptProcessorNode {
65 pub fn new<C: BaseAudioContext>(context: &C, options: ScriptProcessorOptions) -> Self {
79 let ScriptProcessorOptions {
80 buffer_size,
81 number_of_input_channels,
82 number_of_output_channels,
83 } = options;
84
85 assert!(
86 (buffer_size / 256).is_power_of_two() && buffer_size <= 16384,
87 "IndexSizeError - bufferSize must be one of: 256, 512, 1024, 2048, 4096, 8192, 16384",
88 );
89
90 match (number_of_input_channels, number_of_output_channels) {
91 (0, 0) => panic!("IndexSizeError - numberOfInputChannels and numberOfOutputChannels cannot both be zero"),
92 (0, c) | (c, 0) => crate::assert_valid_number_of_channels(c),
93 (c, d) => {
94 crate::assert_valid_number_of_channels(c);
95 crate::assert_valid_number_of_channels(d);
96 }
97 };
98
99 context.base().register(move |registration| {
100 let number_of_quanta = buffer_size / RENDER_QUANTUM_SIZE;
101 let render = ScriptProcessorRenderer {
102 input_buffer: Vec::with_capacity(number_of_quanta),
103 output_buffer: Vec::with_capacity(number_of_quanta),
104 next_output_buffer: Vec::with_capacity(number_of_quanta),
105 buffer_size,
106 number_of_output_channels,
107 };
108
109 let upmix_input_channels = if number_of_input_channels == 0 {
110 1 } else {
112 number_of_input_channels
113 };
114 let audio_node_options = AudioNodeOptions {
115 channel_count: upmix_input_channels,
116 channel_count_mode: ChannelCountMode::Explicit,
117 channel_interpretation: ChannelInterpretation::Speakers,
118 };
119
120 let node = ScriptProcessorNode {
121 registration,
122 channel_config: audio_node_options.into(),
123 buffer_size,
124 };
125
126 (node, Box::new(render))
127 })
128 }
129
130 pub fn buffer_size(&self) -> usize {
131 self.buffer_size
132 }
133
134 pub fn set_onaudioprocess<F: FnMut(AudioProcessingEvent) + Send + 'static>(
146 &self,
147 mut callback: F,
148 ) {
149 let base = self.registration().context().clone();
151 let id = self.registration().id();
152
153 let callback = move |v| {
154 let mut payload = match v {
155 EventPayload::AudioProcessing(v) => v,
156 _ => unreachable!(),
157 };
158 payload.registration = Some((base.clone(), id));
159 callback(payload);
160 };
161
162 self.context().set_event_handler(
163 EventType::AudioProcessing(self.registration().id()),
164 EventHandler::Multiple(Box::new(callback)),
165 );
166 }
167
168 pub fn clear_onaudioprocess(&self) {
170 self.context()
171 .clear_event_handler(EventType::AudioProcessing(self.registration().id()));
172 }
173}
174
175struct ScriptProcessorRenderer {
176 input_buffer: Vec<AudioRenderQuantum>,
177 output_buffer: Vec<AudioRenderQuantum>,
178 next_output_buffer: Vec<AudioRenderQuantum>,
179 buffer_size: usize,
180 number_of_output_channels: usize,
181}
182
183#[allow(clippy::non_send_fields_in_send_ty)]
187unsafe impl Send for ScriptProcessorRenderer {}
188
189impl AudioProcessor for ScriptProcessorRenderer {
190 fn process(
191 &mut self,
192 inputs: &[AudioRenderQuantum],
193 outputs: &mut [AudioRenderQuantum],
194 _params: AudioParamValues<'_>,
195 scope: &AudioWorkletGlobalScope,
196 ) -> bool {
197 let input = &inputs[0];
199 let output = &mut outputs[0];
200
201 output.make_silent();
203 let silence = output.clone();
204
205 if !self.output_buffer.is_empty() {
207 *output = self.output_buffer.remove(0);
208 }
209
210 let number_of_quanta = self.input_buffer.capacity();
212 self.input_buffer.push(input.clone());
213
214 if self.input_buffer.len() == number_of_quanta {
216 let number_of_input_channels = self
218 .input_buffer
219 .iter()
220 .map(|i| i.number_of_channels())
221 .max()
222 .unwrap();
223 let mut input_samples = vec![vec![0.; self.buffer_size]; number_of_input_channels];
224 self.input_buffer.iter().enumerate().for_each(|(i, b)| {
225 let offset = RENDER_QUANTUM_SIZE * i;
226 b.channels()
227 .iter()
228 .zip(input_samples.iter_mut())
229 .for_each(|(c, o)| {
230 o[offset..(offset + RENDER_QUANTUM_SIZE)].copy_from_slice(c);
231 });
232 });
233 let input_buffer = AudioBuffer::from(input_samples, scope.sample_rate);
234
235 let output_samples = vec![vec![0.; self.buffer_size]; self.number_of_output_channels];
237 let output_buffer = AudioBuffer::from(output_samples, scope.sample_rate);
238
239 let playback_time =
241 scope.current_time + self.buffer_size as f64 / scope.sample_rate as f64;
242 scope.send_audio_processing_event(input_buffer, output_buffer, playback_time);
243
244 self.input_buffer.clear();
246
247 std::mem::swap(&mut self.output_buffer, &mut self.next_output_buffer);
249
250 let mut silent_quantum = silence;
252 silent_quantum.set_number_of_channels(self.number_of_output_channels);
253 self.next_output_buffer.clear();
254 self.next_output_buffer
255 .resize(number_of_quanta, silent_quantum);
256 }
257
258 false }
260
261 fn onmessage(&mut self, msg: &mut dyn Any) {
262 if let Some(buffer) = msg.downcast_mut::<AudioBuffer>() {
263 buffer.channels().iter().enumerate().for_each(|(i, c)| {
264 c.as_slice()
265 .chunks(RENDER_QUANTUM_SIZE)
266 .zip(self.next_output_buffer.iter_mut())
267 .for_each(|(s, o)| o.channel_data_mut(i).copy_from_slice(s))
268 });
269 return;
270 };
271
272 log::warn!("ScriptProcessorRenderer: Dropping incoming message {msg:?}");
273 }
274}
275
276#[cfg(test)]
277mod tests {
278 use super::*;
279 use crate::context::OfflineAudioContext;
280 use crate::node::scheduled_source::AudioScheduledSourceNode;
281 use float_eq::assert_float_eq;
282
283 #[test]
284 fn test_constructor() {
285 let mut context = OfflineAudioContext::new(2, 1024, 48000.);
286 let node = context.create_script_processor(512, 1, 1);
287 node.set_channel_count(1);
288 node.set_channel_count_mode(ChannelCountMode::Explicit);
289 node.connect(&context.destination());
290 let _ = context.start_rendering_sync();
291 }
293
294 #[test]
295 fn test_constructor_zero_inputs() {
296 let context = OfflineAudioContext::new(2, 1024, 48000.);
297 let _ = context.create_script_processor(512, 0, 1); }
299
300 #[test]
301 fn test_constructor_zero_outputs() {
302 let context = OfflineAudioContext::new(2, 1024, 48000.);
303 let _ = context.create_script_processor(512, 1, 0); }
305
306 #[test]
307 fn test_rendering() {
308 const BUFFER_SIZE: usize = 256;
309
310 let mut context = OfflineAudioContext::new(1, BUFFER_SIZE * 3, 48000.);
311
312 let node = context.create_script_processor(BUFFER_SIZE, 0, 1);
313 node.connect(&context.destination());
314 node.set_onaudioprocess(|mut e| {
315 e.output_buffer.get_channel_data_mut(0).fill(1.); });
317
318 let result = context.start_rendering_sync();
319 let channel = result.get_channel_data(0);
320
321 assert_float_eq!(
323 channel[..2 * BUFFER_SIZE],
324 &[0.; 2 * BUFFER_SIZE][..],
325 abs_all <= 0.
326 );
327
328 assert_float_eq!(
330 channel[2 * BUFFER_SIZE..],
331 &[1.; BUFFER_SIZE][..],
332 abs_all <= 0.
333 );
334 }
335
336 #[test]
337 fn test_multiple_channels() {
338 const BUFFER_SIZE: usize = 256;
339
340 let mut context = OfflineAudioContext::new(2, BUFFER_SIZE * 3, 48000.);
341
342 let node = context.create_script_processor(BUFFER_SIZE, 2, 2);
344 node.connect(&context.destination());
345 node.set_onaudioprocess(|mut e| {
346 e.output_buffer
348 .get_channel_data_mut(0)
349 .iter_mut()
350 .zip(e.input_buffer.get_channel_data(0))
351 .for_each(|(o, i)| *o = *i * 2.);
352
353 e.output_buffer
355 .get_channel_data_mut(1)
356 .iter_mut()
357 .zip(e.input_buffer.get_channel_data(1))
358 .for_each(|(o, i)| *o = *i * 3.);
359 });
360
361 let mut src = context.create_constant_source();
363 src.start();
364 src.connect(&node);
365
366 let result = context.start_rendering_sync();
367 let channel1 = result.get_channel_data(0);
368 let channel2 = result.get_channel_data(1);
369
370 assert_float_eq!(
372 channel1[..2 * BUFFER_SIZE],
373 &[0.; 2 * BUFFER_SIZE][..],
374 abs_all <= 0.
375 );
376 assert_float_eq!(
377 channel2[..2 * BUFFER_SIZE],
378 &[0.; 2 * BUFFER_SIZE][..],
379 abs_all <= 0.
380 );
381
382 assert_float_eq!(
384 channel1[2 * BUFFER_SIZE..],
385 &[2.; BUFFER_SIZE][..],
386 abs_all <= 0.
387 );
388 assert_float_eq!(
390 channel2[2 * BUFFER_SIZE..],
391 &[3.; BUFFER_SIZE][..],
392 abs_all <= 0.
393 );
394 }
395}