pub trait BaseAudioContext {
Show 26 methods fn base(&self) -> &ConcreteBaseAudioContext; fn register<T: AudioNode, F: FnOnce(AudioContextRegistration) -> (T, Box<dyn AudioProcessor>)>(
        &self,
        f: F
    ) -> T { ... } fn decode_audio_data_sync<R: Read + Send + Sync + 'static>(
        &self,
        input: R
    ) -> Result<AudioBuffer, Box<dyn Error + Send + Sync>> { ... } fn create_buffer(
        &self,
        number_of_channels: usize,
        length: usize,
        sample_rate: f32
    ) -> AudioBuffer { ... } fn create_analyser(&self) -> AnalyserNode { ... } fn create_biquad_filter(&self) -> BiquadFilterNode { ... } fn create_buffer_source(&self) -> AudioBufferSourceNode { ... } fn create_constant_source(&self) -> ConstantSourceNode { ... } fn create_convolver(&self) -> ConvolverNode { ... } fn create_channel_merger(&self, number_of_inputs: usize) -> ChannelMergerNode { ... } fn create_channel_splitter(
        &self,
        number_of_outputs: usize
    ) -> ChannelSplitterNode { ... } fn create_delay(&self, max_delay_time: f64) -> DelayNode { ... } fn create_dynamics_compressor(&self) -> DynamicsCompressorNode { ... } fn create_gain(&self) -> GainNode { ... } fn create_iir_filter(
        &self,
        feedforward: Vec<f64>,
        feedback: Vec<f64>
    ) -> IIRFilterNode { ... } fn create_oscillator(&self) -> OscillatorNode { ... } fn create_panner(&self) -> PannerNode { ... } fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave { ... } fn create_stereo_panner(&self) -> StereoPannerNode { ... } fn create_wave_shaper(&self) -> WaveShaperNode { ... } fn destination(&self) -> AudioDestinationNode { ... } fn listener(&self) -> AudioListener { ... } fn sample_rate(&self) -> f32 { ... } fn state(&self) -> AudioContextState { ... } fn current_time(&self) -> f64 { ... } fn create_audio_param(
        &self,
        opts: AudioParamDescriptor,
        dest: &AudioContextRegistration
    ) -> (AudioParam, AudioParamId) { ... }
}
Expand description

The interface representing an audio-processing graph built from audio modules linked together, each represented by an AudioNode.

An audio context controls both the creation of the nodes it contains and the execution of the audio processing, or decoding.

Required Methods§

source

fn base(&self) -> &ConcreteBaseAudioContext

Returns the BaseAudioContext concrete type associated with this AudioContext

Provided Methods§

source

fn register<T: AudioNode, F: FnOnce(AudioContextRegistration) -> (T, Box<dyn AudioProcessor>)>(
    &self,
    f: F
) -> T

Construct a new pair of AudioNode and AudioProcessor

The AudioNode lives in the user-facing control thread. The Processor is sent to the render thread.

source

fn decode_audio_data_sync<R: Read + Send + Sync + 'static>(
    &self,
    input: R
) -> Result<AudioBuffer, Box<dyn Error + Send + Sync>>

Decode an AudioBuffer from a given input stream.

The current implementation can decode FLAC, Opus, PCM, Vorbis, and Wav.

In addition to the official spec, the input parameter can be any byte stream (not just an array). This means you can decode audio data from a file, network stream, or in memory buffer, and any other std::io::Read implementor. The data if buffered internally so you should not wrap the source in a BufReader.

This function operates synchronously, which may be undesirable on the control thread. The example shows how to avoid this. An async version is currently not implemented.

Errors

This method returns an Error in various cases (IO, mime sniffing, decoding).

Usage
use std::io::Cursor;
use web_audio_api::context::{BaseAudioContext, OfflineAudioContext};

let input = Cursor::new(vec![0; 32]); // or a File, TcpStream, ...

let context = OfflineAudioContext::new(2, 44_100, 44_100.);
let handle = std::thread::spawn(move || context.decode_audio_data_sync(input));

// do other things

// await result from the decoder thread
let decode_buffer_result = handle.join();
Examples

The following example shows how to use a thread pool for audio buffer decoding:

cargo run --release --example decode_multithreaded

source

fn create_buffer(
    &self,
    number_of_channels: usize,
    length: usize,
    sample_rate: f32
) -> AudioBuffer

Create an new “in-memory” AudioBuffer with the given number of channels, length (i.e. number of samples per channel) and sample rate.

Note: In most cases you will want the sample rate to match the current audio context sample rate.

source

fn create_analyser(&self) -> AnalyserNode

Creates a AnalyserNode

source

fn create_biquad_filter(&self) -> BiquadFilterNode

Creates an BiquadFilterNode which implements a second order filter

source

fn create_buffer_source(&self) -> AudioBufferSourceNode

Creates an AudioBufferSourceNode

source

fn create_constant_source(&self) -> ConstantSourceNode

Creates an ConstantSourceNode, a source representing a constant value

source

fn create_convolver(&self) -> ConvolverNode

Creates an ConvolverNode, a processing node which applies linear convolution

source

fn create_channel_merger(&self, number_of_inputs: usize) -> ChannelMergerNode

Creates a ChannelMergerNode

source

fn create_channel_splitter(
    &self,
    number_of_outputs: usize
) -> ChannelSplitterNode

Creates a ChannelSplitterNode

source

fn create_delay(&self, max_delay_time: f64) -> DelayNode

Creates a DelayNode, delaying the audio signal

source

fn create_dynamics_compressor(&self) -> DynamicsCompressorNode

Creates a DynamicsCompressorNode, compressing the audio signal

source

fn create_gain(&self) -> GainNode

Creates an GainNode, to control audio volume

source

fn create_iir_filter(
    &self,
    feedforward: Vec<f64>,
    feedback: Vec<f64>
) -> IIRFilterNode

Creates an IirFilterNode

Arguments
  • feedforward - An array of the feedforward (numerator) coefficients for the transfer function of the IIR filter. The maximum length of this array is 20
  • feedback - An array of the feedback (denominator) coefficients for the transfer function of the IIR filter. The maximum length of this array is 20
source

fn create_oscillator(&self) -> OscillatorNode

Creates an OscillatorNode, a source representing a periodic waveform.

source

fn create_panner(&self) -> PannerNode

Creates a PannerNode

source

fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave

Creates a periodic wave

Please note that this constructor deviates slightly from the spec by requiring a single argument with the periodic wave options.

source

fn create_stereo_panner(&self) -> StereoPannerNode

Creates an StereoPannerNode to pan a stereo output

source

fn create_wave_shaper(&self) -> WaveShaperNode

Creates a WaveShaperNode

source

fn destination(&self) -> AudioDestinationNode

Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought of as the audio-rendering device.

source

fn listener(&self) -> AudioListener

Returns the AudioListener which is used for 3D spatialization

source

fn sample_rate(&self) -> f32

The sample rate (in sample-frames per second) at which the AudioContext handles audio.

source

fn state(&self) -> AudioContextState

Returns state of current context

source

fn current_time(&self) -> f64

This is the time in seconds of the sample frame immediately following the last sample-frame in the block of audio most recently processed by the context’s rendering graph.

source

fn create_audio_param(
    &self,
    opts: AudioParamDescriptor,
    dest: &AudioContextRegistration
) -> (AudioParam, AudioParamId)

Create an AudioParam.

Call this inside the register closure when setting up your AudioNode

Implementors§