web_audio_api/context/
base.rs

1//! The `BaseAudioContext` interface
2
3use crate::buffer::{AudioBuffer, AudioBufferOptions};
4use crate::context::{
5    AudioContextRegistration, AudioContextState, AudioParamId, ConcreteBaseAudioContext,
6    DESTINATION_NODE_ID,
7};
8use crate::decoding::MediaDecoder;
9use crate::events::{Event, EventHandler, EventType};
10use crate::node::{AudioNode, AudioNodeOptions};
11use crate::param::AudioParamDescriptor;
12use crate::periodic_wave::{PeriodicWave, PeriodicWaveOptions};
13use crate::{node, AudioListener};
14
15use std::future::Future;
16
17/// The interface representing an audio-processing graph built from audio modules linked together,
18/// each represented by an `AudioNode`.
19///
20/// An audio context controls both the creation of the nodes it contains and the execution of the
21/// audio processing, or decoding.
22#[allow(clippy::module_name_repetitions)]
23pub trait BaseAudioContext {
24    /// Returns the [`BaseAudioContext`] concrete type associated with this `AudioContext`
25    #[doc(hidden)] // we'd rather not expose the ConcreteBaseAudioContext
26    fn base(&self) -> &ConcreteBaseAudioContext;
27
28    /// Decode an [`AudioBuffer`] from a given input stream.
29    ///
30    /// The current implementation can decode FLAC, Opus, PCM, Vorbis, and Wav.
31    ///
32    /// In addition to the official spec, the input parameter can be any byte stream (not just an
33    /// array). This means you can decode audio data from a file, network stream, or in memory
34    /// buffer, and any other [`std::io::Read`] implementer. The data is buffered internally so you
35    /// should not wrap the source in a `BufReader`.
36    ///
37    /// This function operates synchronously, which may be undesirable on the control thread. The
38    /// example shows how to avoid this. See also the async method [`Self::decode_audio_data`].
39    ///
40    /// # Errors
41    ///
42    /// This method returns an Error in various cases (IO, mime sniffing, decoding).
43    ///
44    /// # Usage
45    ///
46    /// ```no_run
47    /// use std::io::Cursor;
48    /// use web_audio_api::context::{BaseAudioContext, OfflineAudioContext};
49    ///
50    /// let input = Cursor::new(vec![0; 32]); // or a File, TcpStream, ...
51    ///
52    /// let context = OfflineAudioContext::new(2, 44_100, 44_100.);
53    /// let handle = std::thread::spawn(move || context.decode_audio_data_sync(input));
54    ///
55    /// // do other things
56    ///
57    /// // await result from the decoder thread
58    /// let decode_buffer_result = handle.join();
59    /// ```
60    ///
61    /// # Examples
62    ///
63    /// The following example shows how to use a thread pool for audio buffer decoding:
64    ///
65    /// `cargo run --release --example decode_multithreaded`
66    fn decode_audio_data_sync<R: std::io::Read + Send + Sync + 'static>(
67        &self,
68        input: R,
69    ) -> Result<AudioBuffer, Box<dyn std::error::Error + Send + Sync>> {
70        // Set up a media decoder, consume the stream in full and construct a single buffer out of it
71        let mut buffer = MediaDecoder::try_new(input)?
72            .collect::<Result<Vec<_>, _>>()?
73            .into_iter()
74            .reduce(|mut accum, item| {
75                accum.extend(&item);
76                accum
77            })
78            // if there are no samples decoded, return an empty buffer
79            .unwrap_or_else(|| AudioBuffer::from(vec![vec![]], self.sample_rate()));
80
81        // resample to desired rate (no-op if already matching)
82        buffer.resample(self.sample_rate());
83
84        Ok(buffer)
85    }
86
87    /// Decode an [`AudioBuffer`] from a given input stream.
88    ///
89    /// The current implementation can decode FLAC, Opus, PCM, Vorbis, and Wav.
90    ///
91    /// In addition to the official spec, the input parameter can be any byte stream (not just an
92    /// array). This means you can decode audio data from a file, network stream, or in memory
93    /// buffer, and any other [`std::io::Read`] implementer. The data is buffered internally so you
94    /// should not wrap the source in a `BufReader`.
95    ///
96    /// Warning, the current implementation still uses blocking IO so it's best to use Tokio's
97    /// `spawn_blocking` to run the decoding on a thread dedicated to blocking operations. See also
98    /// the async method [`Self::decode_audio_data_sync`].
99    ///
100    /// # Errors
101    ///
102    /// This method returns an Error in various cases (IO, mime sniffing, decoding).
103    // Use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified,
104    // hence we use `-> impl Future + ..` instead.
105    fn decode_audio_data<R: std::io::Read + Send + Sync + 'static>(
106        &self,
107        input: R,
108    ) -> impl Future<Output = Result<AudioBuffer, Box<dyn std::error::Error + Send + Sync>>>
109           + Send
110           + 'static {
111        let sample_rate = self.sample_rate();
112        async move {
113            // Set up a media decoder, consume the stream in full and construct a single buffer out of it
114            let mut buffer = MediaDecoder::try_new(input)?
115                .collect::<Result<Vec<_>, _>>()?
116                .into_iter()
117                .reduce(|mut accum, item| {
118                    accum.extend(&item);
119                    accum
120                })
121                // if there are no samples decoded, return an empty buffer
122                .unwrap_or_else(|| AudioBuffer::from(vec![vec![]], sample_rate));
123
124            // resample to desired rate (no-op if already matching)
125            buffer.resample(sample_rate);
126
127            Ok(buffer)
128        }
129    }
130
131    /// Create an new "in-memory" `AudioBuffer` with the given number of channels,
132    /// length (i.e. number of samples per channel) and sample rate.
133    ///
134    /// Note: In most cases you will want the sample rate to match the current
135    /// audio context sample rate.
136    #[must_use]
137    fn create_buffer(
138        &self,
139        number_of_channels: usize,
140        length: usize,
141        sample_rate: f32,
142    ) -> AudioBuffer {
143        let options = AudioBufferOptions {
144            number_of_channels,
145            length,
146            sample_rate,
147        };
148
149        AudioBuffer::new(options)
150    }
151
152    /// Creates a `AnalyserNode`
153    #[must_use]
154    fn create_analyser(&self) -> node::AnalyserNode {
155        node::AnalyserNode::new(self.base(), node::AnalyserOptions::default())
156    }
157
158    /// Creates an `BiquadFilterNode` which implements a second order filter
159    #[must_use]
160    fn create_biquad_filter(&self) -> node::BiquadFilterNode {
161        node::BiquadFilterNode::new(self.base(), node::BiquadFilterOptions::default())
162    }
163
164    /// Creates an `AudioBufferSourceNode`
165    #[must_use]
166    fn create_buffer_source(&self) -> node::AudioBufferSourceNode {
167        node::AudioBufferSourceNode::new(self.base(), node::AudioBufferSourceOptions::default())
168    }
169
170    /// Creates an `ConstantSourceNode`, a source representing a constant value
171    #[must_use]
172    fn create_constant_source(&self) -> node::ConstantSourceNode {
173        node::ConstantSourceNode::new(self.base(), node::ConstantSourceOptions::default())
174    }
175
176    /// Creates an `ConvolverNode`, a processing node which applies linear convolution
177    #[must_use]
178    fn create_convolver(&self) -> node::ConvolverNode {
179        node::ConvolverNode::new(self.base(), node::ConvolverOptions::default())
180    }
181
182    /// Creates a `ChannelMergerNode`
183    #[must_use]
184    fn create_channel_merger(&self, number_of_inputs: usize) -> node::ChannelMergerNode {
185        let opts = node::ChannelMergerOptions {
186            number_of_inputs,
187            ..node::ChannelMergerOptions::default()
188        };
189        node::ChannelMergerNode::new(self.base(), opts)
190    }
191
192    /// Creates a `ChannelSplitterNode`
193    #[must_use]
194    fn create_channel_splitter(&self, number_of_outputs: usize) -> node::ChannelSplitterNode {
195        let opts = node::ChannelSplitterOptions {
196            number_of_outputs,
197            ..node::ChannelSplitterOptions::default()
198        };
199        node::ChannelSplitterNode::new(self.base(), opts)
200    }
201
202    /// Creates a `DelayNode`, delaying the audio signal
203    #[must_use]
204    fn create_delay(&self, max_delay_time: f64) -> node::DelayNode {
205        let opts = node::DelayOptions {
206            max_delay_time,
207            ..node::DelayOptions::default()
208        };
209        node::DelayNode::new(self.base(), opts)
210    }
211
212    /// Creates a `DynamicsCompressorNode`, compressing the audio signal
213    #[must_use]
214    fn create_dynamics_compressor(&self) -> node::DynamicsCompressorNode {
215        node::DynamicsCompressorNode::new(self.base(), node::DynamicsCompressorOptions::default())
216    }
217
218    /// Creates an `GainNode`, to control audio volume
219    #[must_use]
220    fn create_gain(&self) -> node::GainNode {
221        node::GainNode::new(self.base(), node::GainOptions::default())
222    }
223
224    /// Creates an `IirFilterNode`
225    ///
226    /// # Arguments
227    ///
228    /// * `feedforward` - An array of the feedforward (numerator) coefficients for the transfer function of the IIR filter.
229    ///   The maximum length of this array is 20
230    /// * `feedback` - An array of the feedback (denominator) coefficients for the transfer function of the IIR filter.
231    ///   The maximum length of this array is 20
232    #[must_use]
233    fn create_iir_filter(&self, feedforward: Vec<f64>, feedback: Vec<f64>) -> node::IIRFilterNode {
234        let options = node::IIRFilterOptions {
235            audio_node_options: AudioNodeOptions::default(),
236            feedforward,
237            feedback,
238        };
239        node::IIRFilterNode::new(self.base(), options)
240    }
241
242    /// Creates an `OscillatorNode`, a source representing a periodic waveform.
243    #[must_use]
244    fn create_oscillator(&self) -> node::OscillatorNode {
245        node::OscillatorNode::new(self.base(), node::OscillatorOptions::default())
246    }
247
248    /// Creates a `PannerNode`
249    #[must_use]
250    fn create_panner(&self) -> node::PannerNode {
251        node::PannerNode::new(self.base(), node::PannerOptions::default())
252    }
253
254    /// Creates a periodic wave
255    ///
256    /// Please note that this constructor deviates slightly from the spec by requiring a single
257    /// argument with the periodic wave options.
258    #[must_use]
259    fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave {
260        PeriodicWave::new(self.base(), options)
261    }
262
263    /// Creates an `ScriptProcessorNode` for custom audio processing (deprecated);
264    ///
265    /// # Panics
266    ///
267    /// This function panics if:
268    /// - `buffer_size` is not 256, 512, 1024, 2048, 4096, 8192, or 16384
269    /// - the number of input and output channels are both zero
270    /// - either of the channel counts exceed [`crate::MAX_CHANNELS`]
271    #[must_use]
272    fn create_script_processor(
273        &self,
274        buffer_size: usize,
275        number_of_input_channels: usize,
276        number_of_output_channels: usize,
277    ) -> node::ScriptProcessorNode {
278        let options = node::ScriptProcessorOptions {
279            buffer_size,
280            number_of_input_channels,
281            number_of_output_channels,
282        };
283
284        node::ScriptProcessorNode::new(self.base(), options)
285    }
286
287    /// Creates an `StereoPannerNode` to pan a stereo output
288    #[must_use]
289    fn create_stereo_panner(&self) -> node::StereoPannerNode {
290        node::StereoPannerNode::new(self.base(), node::StereoPannerOptions::default())
291    }
292
293    /// Creates a `WaveShaperNode`
294    #[must_use]
295    fn create_wave_shaper(&self) -> node::WaveShaperNode {
296        node::WaveShaperNode::new(self.base(), node::WaveShaperOptions::default())
297    }
298
299    /// Returns an `AudioDestinationNode` representing the final destination of all audio in the
300    /// context. It can be thought of as the audio-rendering device.
301    #[must_use]
302    fn destination(&self) -> node::AudioDestinationNode {
303        let registration = AudioContextRegistration {
304            id: DESTINATION_NODE_ID,
305            context: self.base().clone(),
306        };
307        let channel_config = self.base().destination_channel_config();
308        node::AudioDestinationNode::from_raw_parts(registration, channel_config)
309    }
310
311    /// Returns the `AudioListener` which is used for 3D spatialization
312    #[must_use]
313    fn listener(&self) -> AudioListener {
314        self.base().listener()
315    }
316
317    /// The sample rate (in sample-frames per second) at which the `AudioContext` handles audio.
318    #[must_use]
319    fn sample_rate(&self) -> f32 {
320        self.base().sample_rate()
321    }
322
323    /// Returns state of current context
324    #[must_use]
325    fn state(&self) -> AudioContextState {
326        self.base().state()
327    }
328
329    /// This is the time in seconds of the sample frame immediately following the last sample-frame
330    /// in the block of audio most recently processed by the context’s rendering graph.
331    #[must_use]
332    fn current_time(&self) -> f64 {
333        self.base().current_time()
334    }
335
336    /// Create an `AudioParam`.
337    ///
338    /// Call this inside the `register` closure when setting up your `AudioNode`
339    #[must_use]
340    fn create_audio_param(
341        &self,
342        opts: AudioParamDescriptor,
343        dest: &AudioContextRegistration,
344    ) -> (crate::param::AudioParam, AudioParamId) {
345        let param = self.base().register(move |registration| {
346            let (node, proc) = crate::param::audio_param_pair(opts, registration);
347
348            (node, Box::new(proc))
349        });
350
351        // Connect the param to the node, once the node is registered inside the audio graph.
352        self.base().queue_audio_param_connect(&param, dest.id());
353
354        let proc_id = AudioParamId(param.registration().id().0);
355        (param, proc_id)
356    }
357
358    /// Register callback to run when the state of the AudioContext has changed
359    ///
360    /// Only a single event handler is active at any time. Calling this method multiple times will
361    /// override the previous event handler.
362    fn set_onstatechange<F: FnMut(Event) + Send + 'static>(&self, mut callback: F) {
363        let callback = move |_| {
364            callback(Event {
365                type_: "statechange",
366            })
367        };
368
369        self.base().set_event_handler(
370            EventType::StateChange,
371            EventHandler::Multiple(Box::new(callback)),
372        );
373    }
374
375    /// Unset the callback to run when the state of the AudioContext has changed
376    fn clear_onstatechange(&self) {
377        self.base().clear_event_handler(EventType::StateChange);
378    }
379
380    #[cfg(test)]
381    fn mock_registration(&self) -> AudioContextRegistration {
382        AudioContextRegistration {
383            id: crate::context::AudioNodeId(0),
384            context: self.base().clone(),
385        }
386    }
387}
388
389#[cfg(test)]
390mod tests {
391    use super::*;
392    use crate::context::OfflineAudioContext;
393
394    use float_eq::assert_float_eq;
395
396    fn require_send_sync_static<T: Send + Sync + 'static>(_: T) {}
397
398    #[test]
399    fn test_decode_audio_data_sync() {
400        let context = OfflineAudioContext::new(1, 1, 44100.);
401        let file = std::fs::File::open("samples/sample.wav").unwrap();
402        let audio_buffer = context.decode_audio_data_sync(file).unwrap();
403
404        assert_eq!(audio_buffer.sample_rate(), 44100.);
405        assert_eq!(audio_buffer.length(), 142_187);
406        assert_eq!(audio_buffer.number_of_channels(), 2);
407        assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001);
408
409        let left_start = &audio_buffer.get_channel_data(0)[0..100];
410        let right_start = &audio_buffer.get_channel_data(1)[0..100];
411        // assert distinct two channel data
412        assert!(left_start != right_start);
413    }
414
415    #[test]
416    fn test_decode_audio_data_future_send_static() {
417        let context = OfflineAudioContext::new(1, 1, 44100.);
418        let file = std::fs::File::open("samples/sample.wav").unwrap();
419        let future = context.decode_audio_data(file);
420        require_send_sync_static(future);
421    }
422
423    #[test]
424    fn test_decode_audio_data_async() {
425        use futures::executor;
426        let context = OfflineAudioContext::new(1, 1, 44100.);
427        let file = std::fs::File::open("samples/sample.wav").unwrap();
428        let future = context.decode_audio_data(file);
429        let audio_buffer = executor::block_on(future).unwrap();
430
431        assert_eq!(audio_buffer.sample_rate(), 44100.);
432        assert_eq!(audio_buffer.length(), 142_187);
433        assert_eq!(audio_buffer.number_of_channels(), 2);
434        assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001);
435
436        let left_start = &audio_buffer.get_channel_data(0)[0..100];
437        let right_start = &audio_buffer.get_channel_data(1)[0..100];
438        // assert distinct two channel data
439        assert!(left_start != right_start);
440    }
441
442    // #[test]
443    // disabled: symphonia cannot handle empty WAV-files
444    #[allow(dead_code)]
445    fn test_decode_audio_data_empty() {
446        let context = OfflineAudioContext::new(1, 1, 44100.);
447        let file = std::fs::File::open("samples/empty_2c.wav").unwrap();
448        let audio_buffer = context.decode_audio_data_sync(file).unwrap();
449        assert_eq!(audio_buffer.length(), 0);
450    }
451
452    #[test]
453    fn test_decode_audio_data_decoding_error() {
454        let context = OfflineAudioContext::new(1, 1, 44100.);
455        let file = std::fs::File::open("samples/corrupt.wav").unwrap();
456        assert!(context.decode_audio_data_sync(file).is_err());
457    }
458
459    #[test]
460    fn test_create_buffer() {
461        let number_of_channels = 3;
462        let length = 2000;
463        let sample_rate = 96_000.;
464
465        let context = OfflineAudioContext::new(1, 1, 44100.);
466        let buffer = context.create_buffer(number_of_channels, length, sample_rate);
467
468        assert_eq!(buffer.number_of_channels(), 3);
469        assert_eq!(buffer.length(), 2000);
470        assert_float_eq!(buffer.sample_rate(), 96000., abs_all <= 0.);
471    }
472}