Skip to main content

web_audio_api/node/
script_processor.rs

1use super::{AudioNode, AudioNodeOptions, ChannelConfig, ChannelCountMode, ChannelInterpretation};
2use crate::context::{AudioContextRegistration, BaseAudioContext};
3use crate::events::{AudioProcessingEvent, EventHandler, EventPayload, EventType};
4use crate::render::{
5    AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
6};
7use crate::{AudioBuffer, RENDER_QUANTUM_SIZE};
8
9use std::any::Any;
10
11/// Options for constructing an [`ScriptProcessorNode`]
12#[derive(Clone, Debug)]
13pub struct ScriptProcessorOptions {
14    pub buffer_size: usize,
15    pub number_of_input_channels: usize,
16    pub number_of_output_channels: usize,
17}
18
19/// An AudioNode which can generate, process, or analyse audio directly using a script (deprecated)
20#[derive(Debug)]
21pub struct ScriptProcessorNode {
22    registration: AudioContextRegistration,
23    channel_config: ChannelConfig,
24    buffer_size: usize,
25}
26
27impl AudioNode for ScriptProcessorNode {
28    fn registration(&self) -> &AudioContextRegistration {
29        &self.registration
30    }
31
32    fn channel_config(&self) -> &ChannelConfig {
33        &self.channel_config
34    }
35
36    fn number_of_inputs(&self) -> usize {
37        1
38    }
39
40    fn number_of_outputs(&self) -> usize {
41        1
42    }
43
44    fn set_channel_count_mode(&self, mode: ChannelCountMode) {
45        assert_eq!(
46            mode,
47            ChannelCountMode::Explicit,
48            "NotSupportedError - ScriptProcessorNode channel count mode must be 'explicit'",
49        );
50        self.channel_config
51            .set_count_mode(mode, self.registration());
52    }
53
54    fn set_channel_count(&self, count: usize) {
55        assert_eq!(
56            count,
57            self.channel_config.count(),
58            "NotSupportedError - ScriptProcessorNode channel count must equal numberOfInputChannels"
59        );
60        self.channel_config.set_count(count, self.registration());
61    }
62}
63
64impl ScriptProcessorNode {
65    /// Creates a `ScriptProcessorNode`
66    ///
67    /// # Arguments
68    ///
69    /// - `context` - Audio context in which the node will live
70    /// - `options` - node options
71    ///
72    /// # Panics
73    ///
74    /// This function panics if:
75    /// - `buffer_size` is not 0, 256, 512, 1024, 2048, 4096, 8192, or 16384
76    /// - the number of input and output channels are both zero
77    /// - either of the channel counts exceed [`crate::MAX_CHANNELS`]
78    pub fn new<C: BaseAudioContext>(context: &C, options: ScriptProcessorOptions) -> Self {
79        let ScriptProcessorOptions {
80            buffer_size,
81            number_of_input_channels,
82            number_of_output_channels,
83        } = options;
84        let buffer_size = if buffer_size == 0 { 256 } else { buffer_size };
85
86        assert!(
87            (buffer_size / 256).is_power_of_two() && buffer_size <= 16384,
88            "IndexSizeError - bufferSize must be 0 or one of: 256, 512, 1024, 2048, 4096, 8192, 16384",
89        );
90
91        match (number_of_input_channels, number_of_output_channels) {
92            (0, 0) => panic!("IndexSizeError - numberOfInputChannels and numberOfOutputChannels cannot both be zero"),
93            (0, c) | (c, 0) => crate::assert_valid_number_of_channels(c),
94            (c, d) => {
95                crate::assert_valid_number_of_channels(c);
96                crate::assert_valid_number_of_channels(d);
97            }
98        };
99
100        context.base().register(move |registration| {
101            let number_of_quanta = buffer_size / RENDER_QUANTUM_SIZE;
102            let render = ScriptProcessorRenderer {
103                input_buffer: Vec::with_capacity(number_of_quanta),
104                output_buffer: Vec::with_capacity(number_of_quanta),
105                next_output_buffer: Vec::with_capacity(number_of_quanta),
106                buffer_size,
107                number_of_output_channels,
108            };
109
110            let upmix_input_channels = if number_of_input_channels == 0 {
111                1 // any value will do, because upmixing is not performed
112            } else {
113                number_of_input_channels
114            };
115            let audio_node_options = AudioNodeOptions {
116                channel_count: upmix_input_channels,
117                channel_count_mode: ChannelCountMode::Explicit,
118                channel_interpretation: ChannelInterpretation::Speakers,
119            };
120
121            let node = ScriptProcessorNode {
122                registration,
123                channel_config: audio_node_options.into(),
124                buffer_size,
125            };
126
127            (node, Box::new(render))
128        })
129    }
130
131    pub fn buffer_size(&self) -> usize {
132        self.buffer_size
133    }
134
135    /// Register callback to run when the AudioProcessingEvent is dispatched
136    ///
137    /// The event handler processes audio from the input (if any) by accessing the audio data from
138    /// the inputBuffer attribute. The audio data which is the result of the processing (or the
139    /// synthesized data if there are no inputs) is then placed into the outputBuffer.
140    ///
141    /// The output buffer is shipped back to the render thread when the AudioProcessingEvent goes
142    /// out of scope, so be sure not to store it somewhere.
143    ///
144    /// Only a single event handler is active at any time. Calling this method multiple times will
145    /// override the previous event handler.
146    pub fn set_onaudioprocess<F: FnMut(AudioProcessingEvent) + Send + 'static>(
147        &self,
148        mut callback: F,
149    ) {
150        // We need these fields to ship the output buffer to the render thread
151        let base = self.registration().context().clone();
152        let id = self.registration().id();
153
154        let callback = move |v| {
155            let mut payload = match v {
156                EventPayload::AudioProcessing(v) => v,
157                _ => unreachable!(),
158            };
159            payload.registration = Some((base.clone(), id));
160            callback(payload);
161        };
162
163        self.context().set_event_handler(
164            EventType::AudioProcessing(self.registration().id()),
165            EventHandler::Multiple(Box::new(callback)),
166        );
167    }
168
169    /// Unset the callback to run when the AudioProcessingEvent is dispatched
170    pub fn clear_onaudioprocess(&self) {
171        self.context()
172            .clear_event_handler(EventType::AudioProcessing(self.registration().id()));
173    }
174}
175
176struct ScriptProcessorRenderer {
177    input_buffer: Vec<AudioRenderQuantum>,
178    output_buffer: Vec<AudioRenderQuantum>,
179    next_output_buffer: Vec<AudioRenderQuantum>,
180    buffer_size: usize,
181    number_of_output_channels: usize,
182}
183
184// SAFETY:
185// AudioRenderQuantums are not Send but we promise the `buffer` VecDeque is empty before we ship it
186// to the render thread.
187#[allow(clippy::non_send_fields_in_send_ty)]
188unsafe impl Send for ScriptProcessorRenderer {}
189
190impl AudioProcessor for ScriptProcessorRenderer {
191    fn process(
192        &mut self,
193        inputs: &[AudioRenderQuantum],
194        outputs: &mut [AudioRenderQuantum],
195        _params: AudioParamValues<'_>,
196        scope: &AudioWorkletGlobalScope,
197    ) -> bool {
198        // single input/output node
199        let input = &inputs[0];
200        let output = &mut outputs[0];
201
202        // default to silent output
203        output.make_silent();
204        let silence = output.clone();
205
206        // when there are output buffers lined up, emit the first one
207        if !self.output_buffer.is_empty() {
208            *output = self.output_buffer.remove(0);
209        }
210
211        // buffer inputs
212        let number_of_quanta = self.input_buffer.capacity();
213        self.input_buffer.push(input.clone());
214
215        // check if we need to emit an event (input buffer is full)
216        if self.input_buffer.len() == number_of_quanta {
217            // convert self.input_buffer to an AudioBuffer
218            let number_of_input_channels = self
219                .input_buffer
220                .iter()
221                .map(|i| i.number_of_channels())
222                .max()
223                .unwrap();
224            let mut input_samples = vec![vec![0.; self.buffer_size]; number_of_input_channels];
225            self.input_buffer.iter().enumerate().for_each(|(i, b)| {
226                let offset = RENDER_QUANTUM_SIZE * i;
227                b.channels()
228                    .iter()
229                    .zip(input_samples.iter_mut())
230                    .for_each(|(c, o)| {
231                        o[offset..(offset + RENDER_QUANTUM_SIZE)].copy_from_slice(c);
232                    });
233            });
234            let input_buffer = AudioBuffer::from(input_samples, scope.sample_rate);
235
236            // create a suitable output AudioBuffer
237            let output_samples = vec![vec![0.; self.buffer_size]; self.number_of_output_channels];
238            let output_buffer = AudioBuffer::from(output_samples, scope.sample_rate);
239
240            // emit event to control thread
241            let playback_time =
242                scope.current_time + self.buffer_size as f64 / scope.sample_rate as f64;
243            scope.send_audio_processing_event(input_buffer, output_buffer, playback_time);
244
245            // clear existing input buffer
246            self.input_buffer.clear();
247
248            // move next output buffer into current output buffer
249            std::mem::swap(&mut self.output_buffer, &mut self.next_output_buffer);
250
251            // fill next output buffer with silence (with the right channel count)
252            let mut silent_quantum = silence;
253            silent_quantum.set_number_of_channels(self.number_of_output_channels);
254            self.next_output_buffer.clear();
255            self.next_output_buffer
256                .resize(number_of_quanta, silent_quantum);
257        }
258
259        false // node is kept alive as long as the handle in the event loop still exists
260    }
261
262    fn onmessage(&mut self, msg: &mut dyn Any) {
263        if let Some(buffer) = msg.downcast_mut::<AudioBuffer>() {
264            buffer.channels().iter().enumerate().for_each(|(i, c)| {
265                c.as_slice()
266                    .chunks(RENDER_QUANTUM_SIZE)
267                    .zip(self.next_output_buffer.iter_mut())
268                    .for_each(|(s, o)| o.channel_data_mut(i).copy_from_slice(s))
269            });
270            return;
271        };
272
273        log::warn!("ScriptProcessorRenderer: Dropping incoming message {msg:?}");
274    }
275}
276
277#[cfg(test)]
278mod tests {
279    use super::*;
280    use crate::context::OfflineAudioContext;
281    use crate::node::scheduled_source::AudioScheduledSourceNode;
282    use float_eq::assert_float_eq;
283
284    #[test]
285    fn test_constructor() {
286        let mut context = OfflineAudioContext::new(2, 1024, 48000.);
287        let node = context.create_script_processor(512, 1, 1);
288        node.set_channel_count(1);
289        node.set_channel_count_mode(ChannelCountMode::Explicit);
290        node.connect(&context.destination());
291        let _ = context.start_rendering_sync();
292        // TODO - does not work with OfflineAudioContext due to lack of event loop
293    }
294
295    #[test]
296    fn test_constructor_zero_inputs() {
297        let context = OfflineAudioContext::new(2, 1024, 48000.);
298        let _ = context.create_script_processor(512, 0, 1); // should not panic
299    }
300
301    #[test]
302    fn test_constructor_zero_outputs() {
303        let context = OfflineAudioContext::new(2, 1024, 48000.);
304        let _ = context.create_script_processor(512, 1, 0); // should not panic
305    }
306
307    #[test]
308    fn test_rendering() {
309        const BUFFER_SIZE: usize = 256;
310
311        let mut context = OfflineAudioContext::new(1, BUFFER_SIZE * 3, 48000.);
312
313        let node = context.create_script_processor(BUFFER_SIZE, 0, 1);
314        node.connect(&context.destination());
315        node.set_onaudioprocess(|mut e| {
316            e.output_buffer.get_channel_data_mut(0).fill(1.); // set all samples to 1.
317        });
318
319        let result = context.start_rendering_sync();
320        let channel = result.get_channel_data(0);
321
322        // first `2 * BUFFER_SIZE` samples should be silent due to buffering
323        assert_float_eq!(
324            channel[..2 * BUFFER_SIZE],
325            &[0.; 2 * BUFFER_SIZE][..],
326            abs_all <= 0.
327        );
328
329        // rest of the samples should be 1.
330        assert_float_eq!(
331            channel[2 * BUFFER_SIZE..],
332            &[1.; BUFFER_SIZE][..],
333            abs_all <= 0.
334        );
335    }
336
337    #[test]
338    fn test_multiple_channels() {
339        const BUFFER_SIZE: usize = 256;
340
341        let mut context = OfflineAudioContext::new(2, BUFFER_SIZE * 3, 48000.);
342
343        // 2 input channels, 2 output channels
344        let node = context.create_script_processor(BUFFER_SIZE, 2, 2);
345        node.connect(&context.destination());
346        node.set_onaudioprocess(|mut e| {
347            // left output buffer is left input * 2
348            e.output_buffer
349                .get_channel_data_mut(0)
350                .iter_mut()
351                .zip(e.input_buffer.get_channel_data(0))
352                .for_each(|(o, i)| *o = *i * 2.);
353
354            // right output buffer is right input * 3
355            e.output_buffer
356                .get_channel_data_mut(1)
357                .iter_mut()
358                .zip(e.input_buffer.get_channel_data(1))
359                .for_each(|(o, i)| *o = *i * 3.);
360        });
361
362        // let the input be a mono constant source, it will be upmixed to two channels
363        let mut src = context.create_constant_source();
364        src.start();
365        src.connect(&node);
366
367        let result = context.start_rendering_sync();
368        let channel1 = result.get_channel_data(0);
369        let channel2 = result.get_channel_data(1);
370
371        // first `2 * BUFFER_SIZE` samples should be silent due to buffering
372        assert_float_eq!(
373            channel1[..2 * BUFFER_SIZE],
374            &[0.; 2 * BUFFER_SIZE][..],
375            abs_all <= 0.
376        );
377        assert_float_eq!(
378            channel2[..2 * BUFFER_SIZE],
379            &[0.; 2 * BUFFER_SIZE][..],
380            abs_all <= 0.
381        );
382
383        // rest of the samples should be 2. for left buffer
384        assert_float_eq!(
385            channel1[2 * BUFFER_SIZE..],
386            &[2.; BUFFER_SIZE][..],
387            abs_all <= 0.
388        );
389        // rest of the samples should be 3. for right buffer
390        assert_float_eq!(
391            channel2[2 * BUFFER_SIZE..],
392            &[3.; BUFFER_SIZE][..],
393            abs_all <= 0.
394        );
395    }
396}