Struct web_audio_api::context::OfflineAudioContext [−][src]
pub struct OfflineAudioContext { /* fields omitted */ }
Expand description
The OfflineAudioContext doesn’t render the audio to the device hardware; instead, it generates it, as fast as it can, and outputs the result to an AudioBuffer.
Implementations
impl OfflineAudioContext
[src]
impl OfflineAudioContext
[src]pub fn new(channels: u32, length: usize, sample_rate: SampleRate) -> Self
[src]
pub fn start_rendering(&mut self) -> AudioBuffer
[src]
pub fn length(&self) -> usize
[src]
Trait Implementations
impl AsBaseAudioContext for OfflineAudioContext
[src]
impl AsBaseAudioContext for OfflineAudioContext
[src]fn base(&self) -> &BaseAudioContext
[src]
fn create_oscillator(&self) -> OscillatorNode
[src]
fn create_oscillator(&self) -> OscillatorNode
[src]Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone. Read more
fn create_gain(&self) -> GainNode
[src]
fn create_gain(&self) -> GainNode
[src]Creates an GainNode, to control audio volume
fn create_constant_source(&self) -> ConstantSourceNode
[src]
fn create_constant_source(&self) -> ConstantSourceNode
[src]Creates an ConstantSourceNode, a source representing a constant value
fn create_delay(&self, max_delay_time: f32) -> DelayNode
[src]
fn create_delay(&self, max_delay_time: f32) -> DelayNode
[src]Creates a DelayNode, delaying the audio signal
fn create_channel_splitter(&self, number_of_outputs: u32) -> ChannelSplitterNode
[src]
fn create_channel_splitter(&self, number_of_outputs: u32) -> ChannelSplitterNode
[src]Creates a ChannelSplitterNode
fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode
[src]
fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode
[src]Creates a ChannelMergerNode
fn create_media_stream_source<M: MediaStream>(
&self,
media: M
) -> MediaStreamAudioSourceNode
[src]
fn create_media_stream_source<M: MediaStream>(
&self,
media: M
) -> MediaStreamAudioSourceNode
[src]Creates a MediaStreamAudioSourceNode from a MediaElement
fn create_media_element_source(
&self,
media: MediaElement
) -> MediaElementAudioSourceNode
[src]
fn create_media_element_source(
&self,
media: MediaElement
) -> MediaElementAudioSourceNode
[src]Creates a MediaElementAudioSourceNode from a MediaElement Read more
fn create_buffer_source(&self) -> AudioBufferSourceNode
[src]
fn create_buffer_source(&self) -> AudioBufferSourceNode
[src]Creates an AudioBufferSourceNode Read more
fn create_panner(&self) -> PannerNode
[src]
fn create_panner(&self) -> PannerNode
[src]Creates a PannerNode
fn create_analyser(&self) -> AnalyserNode
[src]
fn create_analyser(&self) -> AnalyserNode
[src]Creates a AnalyserNode
fn create_audio_param(
&self,
opts: AudioParamOptions,
dest: &AudioNodeId
) -> (AudioParam, AudioParamId)
[src]
fn create_audio_param(
&self,
opts: AudioParamOptions,
dest: &AudioNodeId
) -> (AudioParam, AudioParamId)
[src]Create an AudioParam. Read more
fn destination(&self) -> DestinationNode
[src]
fn destination(&self) -> DestinationNode
[src]Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought of as the audio-rendering device. Read more
fn listener(&self) -> AudioListener
[src]
fn listener(&self) -> AudioListener
[src]Returns the AudioListener which is used for 3D spatialization
fn sample_rate(&self) -> SampleRate
[src]
fn sample_rate(&self) -> SampleRate
[src]The sample rate (in sample-frames per second) at which the AudioContext handles audio.
fn current_time(&self) -> f64
[src]
fn current_time(&self) -> f64
[src]This is the time in seconds of the sample frame immediately following the last sample-frame in the block of audio most recently processed by the context’s rendering graph. Read more