1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
use std::error::Error;
use crate::buffer::AudioBuffer;
use crate::context::{AudioContextRegistration, BaseAudioContext};
use crate::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use super::{AudioNode, ChannelConfig, ChannelConfigOptions};
use crate::media_streams::{MediaStream, MediaStreamTrack};
use crossbeam_channel::{self, Receiver, Sender};
/// An audio stream destination (e.g. WebRTC sink)
///
/// - MDN documentation: <https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamAudioDestinationNode>
/// - specification: <https://www.w3.org/TR/webaudio/#mediastreamaudiodestinationnode>
/// - see also: [`AudioContext::create_media_stream_destination`](crate::context::AudioContext::create_media_stream_destination)
///
/// Since the w3c `MediaStream` interface is not part of this library, we cannot adhere to the
/// official specification. Instead, you can pass in any callback that handles audio buffers.
///
/// IMPORTANT: you must consume the buffers faster than the render thread produces them, or you
/// will miss frames. Consider to spin up a dedicated thread to consume the buffers and cache them.
///
/// # Usage
///
/// ```no_run
/// use web_audio_api::context::{AudioContext, BaseAudioContext};
/// use web_audio_api::node::{AudioNode, AudioScheduledSourceNode};
///
/// // Create an audio context where all audio nodes lives
/// let context = AudioContext::default();
///
/// // Create an oscillator node with sine (default) type
/// let mut osc = context.create_oscillator();
///
/// // Create a media destination node
/// let dest = context.create_media_stream_destination();
/// osc.connect(&dest);
/// osc.start();
///
/// // Handle recorded buffers
/// println!("samples recorded:");
/// let mut samples_recorded = 0;
/// for item in dest.stream().get_tracks()[0].iter() {
/// let buffer = item.unwrap();
///
/// // You could write the samples to a file here.
/// samples_recorded += buffer.length();
/// print!("{}\r", samples_recorded);
/// }
/// ```
///
/// # Examples
///
/// - `cargo run --release --example recorder`
pub struct MediaStreamAudioDestinationNode {
registration: AudioContextRegistration,
channel_config: ChannelConfig,
stream: MediaStream,
}
impl AudioNode for MediaStreamAudioDestinationNode {
fn registration(&self) -> &AudioContextRegistration {
&self.registration
}
fn channel_config(&self) -> &ChannelConfig {
&self.channel_config
}
fn number_of_inputs(&self) -> usize {
1
}
fn number_of_outputs(&self) -> usize {
0
}
}
impl MediaStreamAudioDestinationNode {
/// Create a new MediaStreamAudioDestinationNode
pub fn new<C: BaseAudioContext>(context: &C, options: ChannelConfigOptions) -> Self {
context.register(move |registration| {
let (send, recv) = crossbeam_channel::bounded(1);
let iter = AudioDestinationNodeStream {
receiver: recv.clone(),
};
let track = MediaStreamTrack::from_iter(iter);
let stream = MediaStream::from_tracks(vec![track]);
let node = MediaStreamAudioDestinationNode {
registration,
channel_config: options.into(),
stream,
};
let render = DestinationRenderer { send, recv };
(node, Box::new(render))
})
}
/// A [`MediaStream`] producing audio buffers with the same number of channels as the node
/// itself
pub fn stream(&self) -> &MediaStream {
&self.stream
}
}
struct DestinationRenderer {
send: Sender<AudioBuffer>,
recv: Receiver<AudioBuffer>,
}
impl AudioProcessor for DestinationRenderer {
fn process(
&mut self,
inputs: &[AudioRenderQuantum],
_outputs: &mut [AudioRenderQuantum],
_params: AudioParamValues<'_>,
scope: &RenderScope,
) -> bool {
// single input, no output
let input = &inputs[0];
// convert AudioRenderQuantum to AudioBuffer
let samples: Vec<_> = input.channels().iter().map(|c| c.to_vec()).collect();
let buffer = AudioBuffer::from(samples, scope.sample_rate);
// clear previous entry if it was not consumed
if self.recv.try_recv().is_ok() {
log::warn!("MediaStreamDestination buffer dropped");
}
// ship out AudioBuffer
let _ = self.send.send(buffer);
false
}
}
struct AudioDestinationNodeStream {
receiver: Receiver<AudioBuffer>,
}
impl Iterator for AudioDestinationNodeStream {
type Item = Result<AudioBuffer, Box<dyn Error + Send + Sync>>;
fn next(&mut self) -> Option<Self::Item> {
match self.receiver.recv() {
Ok(buf) => Some(Ok(buf)),
Err(e) => Some(Err(Box::new(e))),
}
}
}