Trait web_audio_api::context::AsBaseAudioContext[][src]

pub trait AsBaseAudioContext {
Show methods fn base(&self) -> &BaseAudioContext; fn create_oscillator(&self) -> OscillatorNode { ... }
fn create_gain(&self) -> GainNode { ... }
fn create_constant_source(&self) -> ConstantSourceNode { ... }
fn create_delay(&self, max_delay_time: f32) -> DelayNode { ... }
fn create_channel_splitter(
        &self,
        number_of_outputs: u32
    ) -> ChannelSplitterNode { ... }
fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode { ... }
fn create_media_stream_source<M: MediaStream>(
        &self,
        media: M
    ) -> MediaStreamAudioSourceNode { ... }
fn create_media_element_source(
        &self,
        media: MediaElement
    ) -> MediaElementAudioSourceNode { ... }
fn create_buffer_source(&self) -> AudioBufferSourceNode { ... }
fn create_panner(&self) -> PannerNode { ... }
fn create_analyser(&self) -> AnalyserNode { ... }
fn create_audio_param(
        &self,
        opts: AudioParamOptions,
        dest: &AudioNodeId
    ) -> (AudioParam, AudioParamId) { ... }
fn destination(&self) -> DestinationNode { ... }
fn listener(&self) -> AudioListener { ... }
fn sample_rate(&self) -> SampleRate { ... }
fn current_time(&self) -> f64 { ... }
}
Expand description

Retrieve the BaseAudioContext from the concrete AudioContext

Required methods

Loading content...

Provided methods

fn create_oscillator(&self) -> OscillatorNode[src]

Expand description

Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone.

fn create_gain(&self) -> GainNode[src]

Expand description

Creates an GainNode, to control audio volume

fn create_constant_source(&self) -> ConstantSourceNode[src]

Expand description

Creates an ConstantSourceNode, a source representing a constant value

fn create_delay(&self, max_delay_time: f32) -> DelayNode[src]

Expand description

Creates a DelayNode, delaying the audio signal

fn create_channel_splitter(&self, number_of_outputs: u32) -> ChannelSplitterNode[src]

Expand description

Creates a ChannelSplitterNode

fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode[src]

Expand description

Creates a ChannelMergerNode

fn create_media_stream_source<M: MediaStream>(
    &self,
    media: M
) -> MediaStreamAudioSourceNode
[src]

Expand description

Creates a MediaStreamAudioSourceNode from a MediaElement

fn create_media_element_source(
    &self,
    media: MediaElement
) -> MediaElementAudioSourceNode
[src]

Expand description

Creates a MediaElementAudioSourceNode from a MediaElement

Note: do not forget to start() the node.

fn create_buffer_source(&self) -> AudioBufferSourceNode[src]

Expand description

Creates an AudioBufferSourceNode

Note: do not forget to start() the node.

fn create_panner(&self) -> PannerNode[src]

Expand description

Creates a PannerNode

fn create_analyser(&self) -> AnalyserNode[src]

Expand description

Creates a AnalyserNode

fn create_audio_param(
    &self,
    opts: AudioParamOptions,
    dest: &AudioNodeId
) -> (AudioParam, AudioParamId)
[src]

Expand description

Create an AudioParam.

Call this inside the register closure when setting up your AudioNode

fn destination(&self) -> DestinationNode[src]

Expand description

Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought of as the audio-rendering device.

fn listener(&self) -> AudioListener[src]

Expand description

Returns the AudioListener which is used for 3D spatialization

fn sample_rate(&self) -> SampleRate[src]

Expand description

The sample rate (in sample-frames per second) at which the AudioContext handles audio.

fn current_time(&self) -> f64[src]

Expand description

This is the time in seconds of the sample frame immediately following the last sample-frame in the block of audio most recently processed by the context’s rendering graph.

Loading content...

Implementors

Loading content...