Trait web_audio_api::context::BaseAudioContext
source · [−]pub trait BaseAudioContext {
Show 25 methods
fn base(&self) -> &ConcreteBaseAudioContext;
fn decode_audio_data_sync<R: Read + Send + Sync + 'static>(
&self,
input: R
) -> Result<AudioBuffer, Box<dyn Error + Send + Sync>> { ... }
fn create_buffer(
&self,
number_of_channels: usize,
length: usize,
sample_rate: SampleRate
) -> AudioBuffer { ... }
fn create_analyser(&self) -> AnalyserNode { ... }
fn create_biquad_filter(&self) -> BiquadFilterNode { ... }
fn create_buffer_source(&self) -> AudioBufferSourceNode { ... }
fn create_constant_source(&self) -> ConstantSourceNode { ... }
fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode { ... }
fn create_channel_splitter(
&self,
number_of_outputs: u32
) -> ChannelSplitterNode { ... }
fn create_delay(&self, max_delay_time: f64) -> DelayNode { ... }
fn create_gain(&self) -> GainNode { ... }
fn create_iir_filter(
&self,
feedforward: Vec<f64>,
feedback: Vec<f64>
) -> IIRFilterNode { ... }
fn create_media_stream_source<M: MediaStream>(
&self,
media: M
) -> MediaStreamAudioSourceNode { ... }
fn create_media_stream_destination(&self) -> MediaStreamAudioDestinationNode { ... }
fn create_oscillator(&self) -> OscillatorNode { ... }
fn create_panner(&self) -> PannerNode { ... }
fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave { ... }
fn create_stereo_panner(&self) -> StereoPannerNode { ... }
fn create_wave_shaper(&self) -> WaveShaperNode { ... }
fn create_audio_param(
&self,
opts: AudioParamDescriptor,
dest: &AudioNodeId
) -> (AudioParam, AudioParamId) { ... }
fn destination(&self) -> AudioDestinationNode { ... }
fn listener(&self) -> AudioListener { ... }
fn sample_rate(&self) -> f32 { ... }
fn sample_rate_raw(&self) -> SampleRate { ... }
fn current_time(&self) -> f64 { ... }
}
Expand description
The interface representing an audio-processing graph built from audio modules linked together,
each represented by an AudioNode
.
An audio context controls both the creation of the nodes it contains and the execution of the audio processing, or decoding.
Please note that in rust, we need to differentiate between the BaseAudioContext
trait and
the ConcreteBaseAudioContext
concrete implementation.
Required methods
fn base(&self) -> &ConcreteBaseAudioContext
fn base(&self) -> &ConcreteBaseAudioContext
retrieves the ConcreteBaseAudioContext
associated with this AudioContext
Provided methods
Decode an AudioBuffer
from a given input stream.
The current implementation can decode FLAC, Opus, PCM, Vorbis, and Wav.
In addition to the official spec, the input parameter can be any byte stream (not just an
array). This means you can decode audio data from a file, network stream, or in memory
buffer, and any other std::io::Read
implementor. The data if buffered internally so you
should not wrap the source in a BufReader
.
This function operates synchronously, which may be undesirable on the control thread. The example shows how to avoid this. An async version is currently not implemented.
Errors
This method returns an Error in various cases (IO, mime sniffing, decoding).
Example
use std::io::Cursor;
use web_audio_api::SampleRate;
use web_audio_api::context::{BaseAudioContext, OfflineAudioContext};
let input = Cursor::new(vec![0; 32]); // or a File, TcpStream, ...
let context = OfflineAudioContext::new(2, 44_100, SampleRate(44_100));
let handle = std::thread::spawn(move || context.decode_audio_data_sync(input));
// do other things
// await result from the decoder thread
let decode_buffer_result = handle.join();
fn create_buffer(
&self,
number_of_channels: usize,
length: usize,
sample_rate: SampleRate
) -> AudioBuffer
fn create_buffer(
&self,
number_of_channels: usize,
length: usize,
sample_rate: SampleRate
) -> AudioBuffer
Create an new “in-memory” AudioBuffer
with the given number of channels,
length (i.e. number of samples per channel) and sample rate.
Note: In most cases you will want the sample rate to match the current audio context sample rate.
fn create_analyser(&self) -> AnalyserNode
fn create_analyser(&self) -> AnalyserNode
Creates a AnalyserNode
fn create_biquad_filter(&self) -> BiquadFilterNode
fn create_biquad_filter(&self) -> BiquadFilterNode
Creates an BiquadFilterNode
which implements a second order filter
fn create_buffer_source(&self) -> AudioBufferSourceNode
fn create_buffer_source(&self) -> AudioBufferSourceNode
Creates an AudioBufferSourceNode
fn create_constant_source(&self) -> ConstantSourceNode
fn create_constant_source(&self) -> ConstantSourceNode
Creates an ConstantSourceNode
, a source representing a constant value
fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode
fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode
Creates a ChannelMergerNode
fn create_channel_splitter(&self, number_of_outputs: u32) -> ChannelSplitterNode
fn create_channel_splitter(&self, number_of_outputs: u32) -> ChannelSplitterNode
Creates a ChannelSplitterNode
fn create_delay(&self, max_delay_time: f64) -> DelayNode
fn create_delay(&self, max_delay_time: f64) -> DelayNode
Creates a DelayNode
, delaying the audio signal
fn create_gain(&self) -> GainNode
fn create_gain(&self) -> GainNode
Creates an GainNode
, to control audio volume
fn create_iir_filter(
&self,
feedforward: Vec<f64>,
feedback: Vec<f64>
) -> IIRFilterNode
fn create_iir_filter(
&self,
feedforward: Vec<f64>,
feedback: Vec<f64>
) -> IIRFilterNode
Creates an IirFilterNode
Arguments
feedforward
- An array of the feedforward (numerator) coefficients for the transfer function of the IIR filter. The maximum length of this array is 20feedback
- An array of the feedback (denominator) coefficients for the transfer function of the IIR filter. The maximum length of this array is 20
fn create_media_stream_source<M: MediaStream>(
&self,
media: M
) -> MediaStreamAudioSourceNode
fn create_media_stream_source<M: MediaStream>(
&self,
media: M
) -> MediaStreamAudioSourceNode
Creates a MediaStreamAudioSourceNode
from a MediaStream
Creates a MediaStreamAudioDestinationNode
fn create_oscillator(&self) -> OscillatorNode
fn create_oscillator(&self) -> OscillatorNode
Creates an OscillatorNode
, a source representing a periodic waveform.
fn create_panner(&self) -> PannerNode
fn create_panner(&self) -> PannerNode
Creates a PannerNode
fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave
fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave
Creates a periodic wave
fn create_stereo_panner(&self) -> StereoPannerNode
fn create_stereo_panner(&self) -> StereoPannerNode
Creates an StereoPannerNode
to pan a stereo output
fn create_wave_shaper(&self) -> WaveShaperNode
fn create_wave_shaper(&self) -> WaveShaperNode
Creates a WaveShaperNode
fn create_audio_param(
&self,
opts: AudioParamDescriptor,
dest: &AudioNodeId
) -> (AudioParam, AudioParamId)
fn create_audio_param(
&self,
opts: AudioParamDescriptor,
dest: &AudioNodeId
) -> (AudioParam, AudioParamId)
Create an AudioParam
.
Call this inside the register
closure when setting up your AudioNode
fn destination(&self) -> AudioDestinationNode
fn destination(&self) -> AudioDestinationNode
Returns an AudioDestinationNode
representing the final destination of all audio in the
context. It can be thought of as the audio-rendering device.
fn listener(&self) -> AudioListener
fn listener(&self) -> AudioListener
Returns the AudioListener
which is used for 3D spatialization
fn sample_rate(&self) -> f32
fn sample_rate(&self) -> f32
The sample rate (in sample-frames per second) at which the AudioContext
handles audio.
fn sample_rate_raw(&self) -> SampleRate
fn sample_rate_raw(&self) -> SampleRate
The raw sample rate of the AudioContext
(which has more precision than the float
sample_rate()
value).
fn current_time(&self) -> f64
fn current_time(&self) -> f64
This is the time in seconds of the sample frame immediately following the last sample-frame in the block of audio most recently processed by the context’s rendering graph.