web_audio_api/node/media_stream_track_source.rs
1use crate::context::{AudioContextRegistration, BaseAudioContext};
2use crate::media_streams::MediaStreamTrack;
3use crate::resampling::Resampler;
4use crate::RENDER_QUANTUM_SIZE;
5
6use super::{AudioNode, ChannelConfig, MediaStreamRenderer};
7
8/// Options for constructing a [`MediaStreamTrackAudioSourceNode`]
9// dictionary MediaStreamTrackAudioSourceOptions {
10// required MediaStreamTrack mediaStreamTrack;
11// };
12//
13// @note - Does not extend AudioNodeOptions because AudioNodeOptions are
14// useless for source nodes as they instruct how to upmix the inputs.
15// This is a common source of confusion, see e.g. https://github.com/mdn/content/pull/18472
16#[derive(Debug)]
17pub struct MediaStreamTrackAudioSourceOptions<'a> {
18 pub media_stream_track: &'a MediaStreamTrack,
19}
20
21/// An audio source from a [`MediaStreamTrack`] (e.g. the audio track of the microphone input)
22///
23/// Below is an example showing how to create and play a stream directly in the audio context.
24/// Take care: The media stream will be polled on the render thread which will have catastrophic
25/// effects if the iterator blocks or for another reason takes too much time to yield a new sample
26/// frame. Use a [`MediaElementAudioSourceNode`](crate::node::MediaElementAudioSourceNode) for
27/// real time safe media playback.
28///
29/// # Example
30///
31/// ```no_run
32/// use web_audio_api::context::{AudioContext, BaseAudioContext};
33/// use web_audio_api::{AudioBuffer, AudioBufferOptions};
34/// use web_audio_api::node::AudioNode;
35/// use web_audio_api::media_streams::MediaStreamTrack;
36///
37/// // create a new buffer: 512 samples of silence
38/// let options = AudioBufferOptions {
39/// number_of_channels: 0,
40/// length: 512,
41/// sample_rate: 44_100.,
42/// };
43/// let silence = AudioBuffer::new(options);
44///
45/// // create a sequence of this buffer
46/// let sequence = std::iter::repeat(silence).take(5);
47///
48/// // the sequence should actually yield `Result<AudioBuffer, _>`s
49/// let sequence = sequence.map(|b| Ok(b));
50///
51/// // convert to a media track
52/// let media = MediaStreamTrack::from_iter(sequence);
53///
54/// // use in the web audio context
55/// let context = AudioContext::default();
56/// let node = context.create_media_stream_track_source(&media);
57/// node.connect(&context.destination());
58///
59/// loop {}
60/// ```
61#[derive(Debug)]
62pub struct MediaStreamTrackAudioSourceNode {
63 registration: AudioContextRegistration,
64 channel_config: ChannelConfig,
65}
66
67impl AudioNode for MediaStreamTrackAudioSourceNode {
68 fn registration(&self) -> &AudioContextRegistration {
69 &self.registration
70 }
71
72 fn channel_config(&self) -> &ChannelConfig {
73 &self.channel_config
74 }
75
76 fn number_of_inputs(&self) -> usize {
77 0
78 }
79
80 fn number_of_outputs(&self) -> usize {
81 1
82 }
83}
84
85impl MediaStreamTrackAudioSourceNode {
86 pub fn new<C: BaseAudioContext>(
87 context: &C,
88 options: MediaStreamTrackAudioSourceOptions<'_>,
89 ) -> Self {
90 context.base().register(move |registration| {
91 let node = MediaStreamTrackAudioSourceNode {
92 registration,
93 channel_config: ChannelConfig::default(),
94 };
95
96 let resampler = Resampler::new(
97 context.sample_rate(),
98 RENDER_QUANTUM_SIZE,
99 options.media_stream_track.iter(),
100 );
101
102 let render = MediaStreamRenderer::new(resampler);
103
104 (node, Box::new(render))
105 })
106 }
107}