web_audio_api/node/
media_stream_destination.rs

1use std::error::Error;
2
3use crate::buffer::AudioBuffer;
4use crate::context::{AudioContextRegistration, BaseAudioContext};
5use crate::render::{
6    AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
7};
8
9use super::{AudioNode, AudioNodeOptions, ChannelConfig};
10
11use crate::media_streams::{MediaStream, MediaStreamTrack};
12use crossbeam_channel::{self, Receiver, Sender};
13
14/// An audio stream destination (e.g. WebRTC sink)
15///
16/// - MDN documentation: <https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamAudioDestinationNode>
17/// - specification: <https://www.w3.org/TR/webaudio/#mediastreamaudiodestinationnode>
18/// - see also: [`AudioContext::create_media_stream_destination`](crate::context::AudioContext::create_media_stream_destination)
19///
20/// Since the w3c `MediaStream` interface is not part of this library, we cannot adhere to the
21/// official specification. Instead, you can pass in any callback that handles audio buffers.
22///
23/// IMPORTANT: you must consume the buffers faster than the render thread produces them, or you
24/// will miss frames. Consider to spin up a dedicated thread to consume the buffers and cache them.
25///
26/// # Usage
27///
28/// ```no_run
29/// use web_audio_api::context::{AudioContext, BaseAudioContext};
30/// use web_audio_api::node::{AudioNode, AudioScheduledSourceNode};
31///
32/// // Create an audio context where all audio nodes lives
33/// let context = AudioContext::default();
34///
35/// // Create an oscillator node with sine (default) type
36/// let mut osc = context.create_oscillator();
37///
38/// // Create a media destination node
39/// let dest = context.create_media_stream_destination();
40/// osc.connect(&dest);
41/// osc.start();
42///
43/// // Handle recorded buffers
44/// println!("samples recorded:");
45/// let mut samples_recorded = 0;
46/// for item in dest.stream().get_tracks()[0].iter() {
47///     let buffer = item.unwrap();
48///
49///     // You could write the samples to a file here.
50///     samples_recorded += buffer.length();
51///     print!("{}\r", samples_recorded);
52/// }
53/// ```
54///
55/// # Examples
56///
57/// - `cargo run --release --example recorder`
58#[derive(Debug)]
59pub struct MediaStreamAudioDestinationNode {
60    registration: AudioContextRegistration,
61    channel_config: ChannelConfig,
62    stream: MediaStream,
63}
64
65impl AudioNode for MediaStreamAudioDestinationNode {
66    fn registration(&self) -> &AudioContextRegistration {
67        &self.registration
68    }
69
70    fn channel_config(&self) -> &ChannelConfig {
71        &self.channel_config
72    }
73
74    fn number_of_inputs(&self) -> usize {
75        1
76    }
77
78    fn number_of_outputs(&self) -> usize {
79        0
80    }
81}
82
83impl MediaStreamAudioDestinationNode {
84    /// Create a new MediaStreamAudioDestinationNode
85    pub fn new<C: BaseAudioContext>(context: &C, options: AudioNodeOptions) -> Self {
86        context.base().register(move |registration| {
87            let (send, recv) = crossbeam_channel::bounded(1);
88
89            let iter = AudioDestinationNodeStream {
90                receiver: recv.clone(),
91            };
92            let track = MediaStreamTrack::from_iter(iter);
93            let stream = MediaStream::from_tracks(vec![track]);
94
95            let node = MediaStreamAudioDestinationNode {
96                registration,
97                channel_config: options.into(),
98                stream,
99            };
100
101            let render = DestinationRenderer { send, recv };
102
103            (node, Box::new(render))
104        })
105    }
106
107    /// A [`MediaStream`] producing audio buffers with the same number of channels as the node
108    /// itself
109    pub fn stream(&self) -> &MediaStream {
110        &self.stream
111    }
112}
113
114struct DestinationRenderer {
115    send: Sender<AudioBuffer>,
116    recv: Receiver<AudioBuffer>,
117}
118
119impl AudioProcessor for DestinationRenderer {
120    fn process(
121        &mut self,
122        inputs: &[AudioRenderQuantum],
123        _outputs: &mut [AudioRenderQuantum],
124        _params: AudioParamValues<'_>,
125        scope: &AudioWorkletGlobalScope,
126    ) -> bool {
127        // single input, no output
128        let input = &inputs[0];
129
130        // convert AudioRenderQuantum to AudioBuffer
131        let samples: Vec<_> = input.channels().iter().map(|c| c.to_vec()).collect();
132        let buffer = AudioBuffer::from(samples, scope.sample_rate);
133
134        // clear previous entry if it was not consumed
135        if self.recv.try_recv().is_ok() {
136            log::warn!("MediaStreamDestination buffer dropped");
137        }
138
139        // ship out AudioBuffer
140        let _ = self.send.send(buffer);
141
142        false
143    }
144}
145
146struct AudioDestinationNodeStream {
147    receiver: Receiver<AudioBuffer>,
148}
149
150impl Iterator for AudioDestinationNodeStream {
151    type Item = Result<AudioBuffer, Box<dyn Error + Send + Sync>>;
152
153    fn next(&mut self) -> Option<Self::Item> {
154        match self.receiver.recv() {
155            Ok(buf) => Some(Ok(buf)),
156            Err(e) => Some(Err(Box::new(e))),
157        }
158    }
159}