web_audio_api/node/
script_processor.rs

1use super::{AudioNode, AudioNodeOptions, ChannelConfig, ChannelCountMode, ChannelInterpretation};
2use crate::context::{AudioContextRegistration, BaseAudioContext};
3use crate::events::{AudioProcessingEvent, EventHandler, EventPayload, EventType};
4use crate::render::{
5    AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
6};
7use crate::{AudioBuffer, RENDER_QUANTUM_SIZE};
8
9use std::any::Any;
10
11/// Options for constructing an [`ScriptProcessorNode`]
12#[derive(Clone, Debug)]
13pub struct ScriptProcessorOptions {
14    pub buffer_size: usize,
15    pub number_of_input_channels: usize,
16    pub number_of_output_channels: usize,
17}
18
19/// An AudioNode which can generate, process, or analyse audio directly using a script (deprecated)
20#[derive(Debug)]
21pub struct ScriptProcessorNode {
22    registration: AudioContextRegistration,
23    channel_config: ChannelConfig,
24    buffer_size: usize,
25}
26
27impl AudioNode for ScriptProcessorNode {
28    fn registration(&self) -> &AudioContextRegistration {
29        &self.registration
30    }
31
32    fn channel_config(&self) -> &ChannelConfig {
33        &self.channel_config
34    }
35
36    fn number_of_inputs(&self) -> usize {
37        1
38    }
39
40    fn number_of_outputs(&self) -> usize {
41        1
42    }
43
44    fn set_channel_count_mode(&self, mode: ChannelCountMode) {
45        assert_eq!(
46            mode,
47            ChannelCountMode::Explicit,
48            "NotSupportedError - ScriptProcessorNode channel count mode must be 'explicit'",
49        );
50        self.channel_config
51            .set_count_mode(mode, self.registration());
52    }
53
54    fn set_channel_count(&self, count: usize) {
55        assert_eq!(
56            count,
57            self.channel_config.count(),
58            "NotSupportedError - ScriptProcessorNode channel count must equal numberOfInputChannels"
59        );
60        self.channel_config.set_count(count, self.registration());
61    }
62}
63
64impl ScriptProcessorNode {
65    /// Creates a `ScriptProcessorNode`
66    ///
67    /// # Arguments
68    ///
69    /// - `context` - Audio context in which the node will live
70    /// - `options` - node options
71    ///
72    /// # Panics
73    ///
74    /// This function panics if:
75    /// - `buffer_size` is not 256, 512, 1024, 2048, 4096, 8192, or 16384
76    /// - the number of input and output channels are both zero
77    /// - either of the channel counts exceed [`crate::MAX_CHANNELS`]
78    pub fn new<C: BaseAudioContext>(context: &C, options: ScriptProcessorOptions) -> Self {
79        let ScriptProcessorOptions {
80            buffer_size,
81            number_of_input_channels,
82            number_of_output_channels,
83        } = options;
84
85        assert!(
86            (buffer_size / 256).is_power_of_two() && buffer_size <= 16384,
87            "IndexSizeError - bufferSize must be one of: 256, 512, 1024, 2048, 4096, 8192, 16384",
88        );
89
90        match (number_of_input_channels, number_of_output_channels) {
91            (0, 0) => panic!("IndexSizeError - numberOfInputChannels and numberOfOutputChannels cannot both be zero"),
92            (0, c) | (c, 0) => crate::assert_valid_number_of_channels(c),
93            (c, d) => {
94                crate::assert_valid_number_of_channels(c);
95                crate::assert_valid_number_of_channels(d);
96            }
97        };
98
99        context.base().register(move |registration| {
100            let number_of_quanta = buffer_size / RENDER_QUANTUM_SIZE;
101            let render = ScriptProcessorRenderer {
102                input_buffer: Vec::with_capacity(number_of_quanta),
103                output_buffer: Vec::with_capacity(number_of_quanta),
104                next_output_buffer: Vec::with_capacity(number_of_quanta),
105                buffer_size,
106                number_of_output_channels,
107            };
108
109            let upmix_input_channels = if number_of_input_channels == 0 {
110                1 // any value will do, because upmixing is not performed
111            } else {
112                number_of_input_channels
113            };
114            let audio_node_options = AudioNodeOptions {
115                channel_count: upmix_input_channels,
116                channel_count_mode: ChannelCountMode::Explicit,
117                channel_interpretation: ChannelInterpretation::Speakers,
118            };
119
120            let node = ScriptProcessorNode {
121                registration,
122                channel_config: audio_node_options.into(),
123                buffer_size,
124            };
125
126            (node, Box::new(render))
127        })
128    }
129
130    pub fn buffer_size(&self) -> usize {
131        self.buffer_size
132    }
133
134    /// Register callback to run when the AudioProcessingEvent is dispatched
135    ///
136    /// The event handler processes audio from the input (if any) by accessing the audio data from
137    /// the inputBuffer attribute. The audio data which is the result of the processing (or the
138    /// synthesized data if there are no inputs) is then placed into the outputBuffer.
139    ///
140    /// The output buffer is shipped back to the render thread when the AudioProcessingEvent goes
141    /// out of scope, so be sure not to store it somewhere.
142    ///
143    /// Only a single event handler is active at any time. Calling this method multiple times will
144    /// override the previous event handler.
145    pub fn set_onaudioprocess<F: FnMut(AudioProcessingEvent) + Send + 'static>(
146        &self,
147        mut callback: F,
148    ) {
149        // We need these fields to ship the output buffer to the render thread
150        let base = self.registration().context().clone();
151        let id = self.registration().id();
152
153        let callback = move |v| {
154            let mut payload = match v {
155                EventPayload::AudioProcessing(v) => v,
156                _ => unreachable!(),
157            };
158            payload.registration = Some((base.clone(), id));
159            callback(payload);
160        };
161
162        self.context().set_event_handler(
163            EventType::AudioProcessing(self.registration().id()),
164            EventHandler::Multiple(Box::new(callback)),
165        );
166    }
167
168    /// Unset the callback to run when the AudioProcessingEvent is dispatched
169    pub fn clear_onaudioprocess(&self) {
170        self.context()
171            .clear_event_handler(EventType::AudioProcessing(self.registration().id()));
172    }
173}
174
175struct ScriptProcessorRenderer {
176    input_buffer: Vec<AudioRenderQuantum>,
177    output_buffer: Vec<AudioRenderQuantum>,
178    next_output_buffer: Vec<AudioRenderQuantum>,
179    buffer_size: usize,
180    number_of_output_channels: usize,
181}
182
183// SAFETY:
184// AudioRenderQuantums are not Send but we promise the `buffer` VecDeque is empty before we ship it
185// to the render thread.
186#[allow(clippy::non_send_fields_in_send_ty)]
187unsafe impl Send for ScriptProcessorRenderer {}
188
189impl AudioProcessor for ScriptProcessorRenderer {
190    fn process(
191        &mut self,
192        inputs: &[AudioRenderQuantum],
193        outputs: &mut [AudioRenderQuantum],
194        _params: AudioParamValues<'_>,
195        scope: &AudioWorkletGlobalScope,
196    ) -> bool {
197        // single input/output node
198        let input = &inputs[0];
199        let output = &mut outputs[0];
200
201        // default to silent output
202        output.make_silent();
203        let silence = output.clone();
204
205        // when there are output buffers lined up, emit the first one
206        if !self.output_buffer.is_empty() {
207            *output = self.output_buffer.remove(0);
208        }
209
210        // buffer inputs
211        let number_of_quanta = self.input_buffer.capacity();
212        self.input_buffer.push(input.clone());
213
214        // check if we need to emit an event (input buffer is full)
215        if self.input_buffer.len() == number_of_quanta {
216            // convert self.input_buffer to an AudioBuffer
217            let number_of_input_channels = self
218                .input_buffer
219                .iter()
220                .map(|i| i.number_of_channels())
221                .max()
222                .unwrap();
223            let mut input_samples = vec![vec![0.; self.buffer_size]; number_of_input_channels];
224            self.input_buffer.iter().enumerate().for_each(|(i, b)| {
225                let offset = RENDER_QUANTUM_SIZE * i;
226                b.channels()
227                    .iter()
228                    .zip(input_samples.iter_mut())
229                    .for_each(|(c, o)| {
230                        o[offset..(offset + RENDER_QUANTUM_SIZE)].copy_from_slice(c);
231                    });
232            });
233            let input_buffer = AudioBuffer::from(input_samples, scope.sample_rate);
234
235            // create a suitable output AudioBuffer
236            let output_samples = vec![vec![0.; self.buffer_size]; self.number_of_output_channels];
237            let output_buffer = AudioBuffer::from(output_samples, scope.sample_rate);
238
239            // emit event to control thread
240            let playback_time =
241                scope.current_time + self.buffer_size as f64 / scope.sample_rate as f64;
242            scope.send_audio_processing_event(input_buffer, output_buffer, playback_time);
243
244            // clear existing input buffer
245            self.input_buffer.clear();
246
247            // move next output buffer into current output buffer
248            std::mem::swap(&mut self.output_buffer, &mut self.next_output_buffer);
249
250            // fill next output buffer with silence (with the right channel count)
251            let mut silent_quantum = silence;
252            silent_quantum.set_number_of_channels(self.number_of_output_channels);
253            self.next_output_buffer.clear();
254            self.next_output_buffer
255                .resize(number_of_quanta, silent_quantum);
256        }
257
258        false // node is kept alive as long as the handle in the event loop still exists
259    }
260
261    fn onmessage(&mut self, msg: &mut dyn Any) {
262        if let Some(buffer) = msg.downcast_mut::<AudioBuffer>() {
263            buffer.channels().iter().enumerate().for_each(|(i, c)| {
264                c.as_slice()
265                    .chunks(RENDER_QUANTUM_SIZE)
266                    .zip(self.next_output_buffer.iter_mut())
267                    .for_each(|(s, o)| o.channel_data_mut(i).copy_from_slice(s))
268            });
269            return;
270        };
271
272        log::warn!("ScriptProcessorRenderer: Dropping incoming message {msg:?}");
273    }
274}
275
276#[cfg(test)]
277mod tests {
278    use super::*;
279    use crate::context::OfflineAudioContext;
280    use crate::node::scheduled_source::AudioScheduledSourceNode;
281    use float_eq::assert_float_eq;
282
283    #[test]
284    fn test_constructor() {
285        let mut context = OfflineAudioContext::new(2, 1024, 48000.);
286        let node = context.create_script_processor(512, 1, 1);
287        node.set_channel_count(1);
288        node.set_channel_count_mode(ChannelCountMode::Explicit);
289        node.connect(&context.destination());
290        let _ = context.start_rendering_sync();
291        // TODO - does not work with OfflineAudioContext due to lack of event loop
292    }
293
294    #[test]
295    fn test_constructor_zero_inputs() {
296        let context = OfflineAudioContext::new(2, 1024, 48000.);
297        let _ = context.create_script_processor(512, 0, 1); // should not panic
298    }
299
300    #[test]
301    fn test_constructor_zero_outputs() {
302        let context = OfflineAudioContext::new(2, 1024, 48000.);
303        let _ = context.create_script_processor(512, 1, 0); // should not panic
304    }
305
306    #[test]
307    fn test_rendering() {
308        const BUFFER_SIZE: usize = 256;
309
310        let mut context = OfflineAudioContext::new(1, BUFFER_SIZE * 3, 48000.);
311
312        let node = context.create_script_processor(BUFFER_SIZE, 0, 1);
313        node.connect(&context.destination());
314        node.set_onaudioprocess(|mut e| {
315            e.output_buffer.get_channel_data_mut(0).fill(1.); // set all samples to 1.
316        });
317
318        let result = context.start_rendering_sync();
319        let channel = result.get_channel_data(0);
320
321        // first `2 * BUFFER_SIZE` samples should be silent due to buffering
322        assert_float_eq!(
323            channel[..2 * BUFFER_SIZE],
324            &[0.; 2 * BUFFER_SIZE][..],
325            abs_all <= 0.
326        );
327
328        // rest of the samples should be 1.
329        assert_float_eq!(
330            channel[2 * BUFFER_SIZE..],
331            &[1.; BUFFER_SIZE][..],
332            abs_all <= 0.
333        );
334    }
335
336    #[test]
337    fn test_multiple_channels() {
338        const BUFFER_SIZE: usize = 256;
339
340        let mut context = OfflineAudioContext::new(2, BUFFER_SIZE * 3, 48000.);
341
342        // 2 input channels, 2 output channels
343        let node = context.create_script_processor(BUFFER_SIZE, 2, 2);
344        node.connect(&context.destination());
345        node.set_onaudioprocess(|mut e| {
346            // left output buffer is left input * 2
347            e.output_buffer
348                .get_channel_data_mut(0)
349                .iter_mut()
350                .zip(e.input_buffer.get_channel_data(0))
351                .for_each(|(o, i)| *o = *i * 2.);
352
353            // right output buffer is right input * 3
354            e.output_buffer
355                .get_channel_data_mut(1)
356                .iter_mut()
357                .zip(e.input_buffer.get_channel_data(1))
358                .for_each(|(o, i)| *o = *i * 3.);
359        });
360
361        // let the input be a mono constant source, it will be upmixed to two channels
362        let mut src = context.create_constant_source();
363        src.start();
364        src.connect(&node);
365
366        let result = context.start_rendering_sync();
367        let channel1 = result.get_channel_data(0);
368        let channel2 = result.get_channel_data(1);
369
370        // first `2 * BUFFER_SIZE` samples should be silent due to buffering
371        assert_float_eq!(
372            channel1[..2 * BUFFER_SIZE],
373            &[0.; 2 * BUFFER_SIZE][..],
374            abs_all <= 0.
375        );
376        assert_float_eq!(
377            channel2[..2 * BUFFER_SIZE],
378            &[0.; 2 * BUFFER_SIZE][..],
379            abs_all <= 0.
380        );
381
382        // rest of the samples should be 2. for left buffer
383        assert_float_eq!(
384            channel1[2 * BUFFER_SIZE..],
385            &[2.; BUFFER_SIZE][..],
386            abs_all <= 0.
387        );
388        // rest of the samples should be 3. for right buffer
389        assert_float_eq!(
390            channel2[2 * BUFFER_SIZE..],
391            &[3.; BUFFER_SIZE][..],
392            abs_all <= 0.
393        );
394    }
395}