pub struct OfflineAudioContext { /* private fields */ }
Expand description
The OfflineAudioContext
doesn’t render the audio to the device hardware; instead, it generates
it, as fast as it can, and outputs the result to an AudioBuffer
.
Implementations§
Source§impl OfflineAudioContext
impl OfflineAudioContext
Sourcepub fn new(number_of_channels: usize, length: usize, sample_rate: f32) -> Self
pub fn new(number_of_channels: usize, length: usize, sample_rate: f32) -> Self
Creates an OfflineAudioContext
instance
§Arguments
channels
- number of output channels to renderlength
- length of the rendering audio buffersample_rate
- output sample rate
Sourcepub fn start_rendering_sync(&mut self) -> AudioBuffer
pub fn start_rendering_sync(&mut self) -> AudioBuffer
Given the current connections and scheduled changes, starts rendering audio.
This function will block the current thread and returns the rendered AudioBuffer
synchronously.
This method will only adhere to scheduled suspensions via Self::suspend_sync
and
will ignore those provided via Self::suspend
.
§Panics
Panics if this method is called multiple times
Sourcepub async fn start_rendering(&self) -> AudioBuffer
pub async fn start_rendering(&self) -> AudioBuffer
Given the current connections and scheduled changes, starts rendering audio.
Rendering is purely CPU bound and contains no await
points, so calling this method will
block the executor until completion or until the context is suspended.
This method will only adhere to scheduled suspensions via Self::suspend
and will
ignore those provided via Self::suspend_sync
.
§Panics
Panics if this method is called multiple times.
Sourcepub async fn suspend(&self, suspend_time: f64)
pub async fn suspend(&self, suspend_time: f64)
Schedules a suspension of the time progression in the audio context at the specified time and returns a promise
The specified time is quantized and rounded up to the render quantum size.
§Panics
Panics if the quantized frame number
- is negative or
- is less than or equal to the current time or
- is greater than or equal to the total render duration or
- is scheduled by another suspend for the same time
§Example usage
use futures::{executor, join};
use futures::FutureExt as _;
use std::sync::Arc;
use web_audio_api::context::BaseAudioContext;
use web_audio_api::context::OfflineAudioContext;
use web_audio_api::node::{AudioNode, AudioScheduledSourceNode};
let context = Arc::new(OfflineAudioContext::new(1, 512, 44_100.));
let context_clone = Arc::clone(&context);
let suspend_promise = context.suspend(128. / 44_100.).then(|_| async move {
let mut src = context_clone.create_constant_source();
src.connect(&context_clone.destination());
src.start();
context_clone.resume().await;
});
let render_promise = context.start_rendering();
let buffer = executor::block_on(async move { join!(suspend_promise, render_promise).1 });
assert_eq!(buffer.number_of_channels(), 1);
assert_eq!(buffer.length(), 512);
Sourcepub fn suspend_sync<F: FnOnce(&mut Self) + Send + Sync + 'static>(
&mut self,
suspend_time: f64,
callback: F,
)
pub fn suspend_sync<F: FnOnce(&mut Self) + Send + Sync + 'static>( &mut self, suspend_time: f64, callback: F, )
Schedules a suspension of the time progression in the audio context at the specified time and runs a callback.
This is a synchronous version of Self::suspend
that runs the provided callback at
the suspendTime
. The rendering resumes automatically after the callback has run, so there
is no resume_sync
method.
The specified time is quantized and rounded up to the render quantum size.
§Panics
Panics if the quantized frame number
- is negative or
- is less than or equal to the current time or
- is greater than or equal to the total render duration or
- is scheduled by another suspend for the same time
§Example usage
use web_audio_api::context::BaseAudioContext;
use web_audio_api::context::OfflineAudioContext;
use web_audio_api::node::{AudioNode, AudioScheduledSourceNode};
let mut context = OfflineAudioContext::new(1, 512, 44_100.);
context.suspend_sync(128. / 44_100., |context| {
let mut src = context.create_constant_source();
src.connect(&context.destination());
src.start();
});
let buffer = context.start_rendering_sync();
assert_eq!(buffer.number_of_channels(), 1);
assert_eq!(buffer.length(), 512);
Sourcepub async fn resume(&self)
pub async fn resume(&self)
Resumes the progression of the OfflineAudioContext’s currentTime when it has been suspended
§Panics
Panics when the context is closed or rendering has not started
Sourcepub fn set_oncomplete<F: FnOnce(OfflineAudioCompletionEvent) + Send + 'static>(
&self,
callback: F,
)
pub fn set_oncomplete<F: FnOnce(OfflineAudioCompletionEvent) + Send + 'static>( &self, callback: F, )
Register callback to run when the rendering has completed
Only a single event handler is active at any time. Calling this method multiple times will override the previous event handler.
Sourcepub fn clear_oncomplete(&self)
pub fn clear_oncomplete(&self)
Unset the callback to run when the rendering has completed
Trait Implementations§
Source§impl BaseAudioContext for OfflineAudioContext
impl BaseAudioContext for OfflineAudioContext
Source§fn decode_audio_data_sync<R: Read + Send + Sync + 'static>(
&self,
input: R,
) -> Result<AudioBuffer, Box<dyn Error + Send + Sync>>
fn decode_audio_data_sync<R: Read + Send + Sync + 'static>( &self, input: R, ) -> Result<AudioBuffer, Box<dyn Error + Send + Sync>>
AudioBuffer
from a given input stream. Read moreSource§fn decode_audio_data<R: Read + Send + Sync + 'static>(
&self,
input: R,
) -> impl Future<Output = Result<AudioBuffer, Box<dyn Error + Send + Sync>>> + Send + 'static
fn decode_audio_data<R: Read + Send + Sync + 'static>( &self, input: R, ) -> impl Future<Output = Result<AudioBuffer, Box<dyn Error + Send + Sync>>> + Send + 'static
AudioBuffer
from a given input stream. Read moreSource§fn create_buffer(
&self,
number_of_channels: usize,
length: usize,
sample_rate: f32,
) -> AudioBuffer
fn create_buffer( &self, number_of_channels: usize, length: usize, sample_rate: f32, ) -> AudioBuffer
AudioBuffer
with the given number of channels,
length (i.e. number of samples per channel) and sample rate. Read moreSource§fn create_analyser(&self) -> AnalyserNode
fn create_analyser(&self) -> AnalyserNode
AnalyserNode
Source§fn create_biquad_filter(&self) -> BiquadFilterNode
fn create_biquad_filter(&self) -> BiquadFilterNode
BiquadFilterNode
which implements a second order filterSource§fn create_buffer_source(&self) -> AudioBufferSourceNode
fn create_buffer_source(&self) -> AudioBufferSourceNode
AudioBufferSourceNode
Source§fn create_constant_source(&self) -> ConstantSourceNode
fn create_constant_source(&self) -> ConstantSourceNode
ConstantSourceNode
, a source representing a constant valueSource§fn create_convolver(&self) -> ConvolverNode
fn create_convolver(&self) -> ConvolverNode
ConvolverNode
, a processing node which applies linear convolutionSource§fn create_channel_merger(&self, number_of_inputs: usize) -> ChannelMergerNode
fn create_channel_merger(&self, number_of_inputs: usize) -> ChannelMergerNode
ChannelMergerNode
Source§fn create_channel_splitter(
&self,
number_of_outputs: usize,
) -> ChannelSplitterNode
fn create_channel_splitter( &self, number_of_outputs: usize, ) -> ChannelSplitterNode
ChannelSplitterNode
Source§fn create_delay(&self, max_delay_time: f64) -> DelayNode
fn create_delay(&self, max_delay_time: f64) -> DelayNode
DelayNode
, delaying the audio signalSource§fn create_dynamics_compressor(&self) -> DynamicsCompressorNode
fn create_dynamics_compressor(&self) -> DynamicsCompressorNode
DynamicsCompressorNode
, compressing the audio signalSource§fn create_gain(&self) -> GainNode
fn create_gain(&self) -> GainNode
GainNode
, to control audio volumeSource§fn create_iir_filter(
&self,
feedforward: Vec<f64>,
feedback: Vec<f64>,
) -> IIRFilterNode
fn create_iir_filter( &self, feedforward: Vec<f64>, feedback: Vec<f64>, ) -> IIRFilterNode
IirFilterNode
Read moreSource§fn create_oscillator(&self) -> OscillatorNode
fn create_oscillator(&self) -> OscillatorNode
OscillatorNode
, a source representing a periodic waveform.Source§fn create_panner(&self) -> PannerNode
fn create_panner(&self) -> PannerNode
PannerNode
Source§fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave
fn create_periodic_wave(&self, options: PeriodicWaveOptions) -> PeriodicWave
Source§fn create_script_processor(
&self,
buffer_size: usize,
number_of_input_channels: usize,
number_of_output_channels: usize,
) -> ScriptProcessorNode
fn create_script_processor( &self, buffer_size: usize, number_of_input_channels: usize, number_of_output_channels: usize, ) -> ScriptProcessorNode
ScriptProcessorNode
for custom audio processing (deprecated); Read moreSource§fn create_stereo_panner(&self) -> StereoPannerNode
fn create_stereo_panner(&self) -> StereoPannerNode
StereoPannerNode
to pan a stereo outputSource§fn create_wave_shaper(&self) -> WaveShaperNode
fn create_wave_shaper(&self) -> WaveShaperNode
WaveShaperNode
Source§fn destination(&self) -> AudioDestinationNode
fn destination(&self) -> AudioDestinationNode
AudioDestinationNode
representing the final destination of all audio in the
context. It can be thought of as the audio-rendering device.Source§fn listener(&self) -> AudioListener
fn listener(&self) -> AudioListener
AudioListener
which is used for 3D spatializationSource§fn sample_rate(&self) -> f32
fn sample_rate(&self) -> f32
AudioContext
handles audio.Source§fn state(&self) -> AudioContextState
fn state(&self) -> AudioContextState
Source§fn current_time(&self) -> f64
fn current_time(&self) -> f64
Source§fn create_audio_param(
&self,
opts: AudioParamDescriptor,
dest: &AudioContextRegistration,
) -> (AudioParam, AudioParamId)
fn create_audio_param( &self, opts: AudioParamDescriptor, dest: &AudioContextRegistration, ) -> (AudioParam, AudioParamId)
AudioParam
. Read more