Trait web_audio_api::context::AsBaseAudioContext[][src]

pub trait AsBaseAudioContext {
    fn base(&self) -> &BaseAudioContext;

    fn create_oscillator(&self) -> OscillatorNode<'_> { ... }
fn create_gain(&self) -> GainNode<'_> { ... }
fn create_delay(&self) -> DelayNode<'_> { ... }
fn create_channel_splitter(
        &self,
        number_of_outputs: u32
    ) -> ChannelSplitterNode<'_> { ... }
fn create_channel_merger(
        &self,
        number_of_inputs: u32
    ) -> ChannelMergerNode<'_> { ... }
fn destination(&self) -> DestinationNode<'_> { ... }
fn sample_rate(&self) -> SampleRate { ... }
fn current_time(&self) -> f64 { ... } }

Retrieve the BaseAudioContext from the concrete AudioContext

Required methods

Loading content...

Provided methods

fn create_oscillator(&self) -> OscillatorNode<'_>[src]

Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone.

fn create_gain(&self) -> GainNode<'_>[src]

Creates an GainNode, to control audio volume

fn create_delay(&self) -> DelayNode<'_>[src]

Creates a DelayNode, delaying the audio signal

fn create_channel_splitter(
    &self,
    number_of_outputs: u32
) -> ChannelSplitterNode<'_>
[src]

Creates a ChannelSplitterNode

fn create_channel_merger(&self, number_of_inputs: u32) -> ChannelMergerNode<'_>[src]

Creates a ChannelMergerNode

fn destination(&self) -> DestinationNode<'_>[src]

Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought of as the audio-rendering device.

fn sample_rate(&self) -> SampleRate[src]

The sample rate (in sample-frames per second) at which the AudioContext handles audio.

fn current_time(&self) -> f64[src]

This is the time in seconds of the sample frame immediately following the last sample-frame in the block of audio most recently processed by the context’s rendering graph.

Loading content...

Implementors

Loading content...