1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
use crate::context::{AudioContextRegistration, BaseAudioContext};
use crate::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use super::{
AudioNode, ChannelConfig, ChannelConfigOptions, ChannelCountMode, ChannelInterpretation,
};
/// The AudioDestinationNode interface represents the terminal node of an audio
/// graph in a given context. usually the speakers of your device, or the node that
/// will "record" the audio data with an OfflineAudioContext.
///
/// The output of a AudioDestinationNode is produced by summing its input, allowing to capture
/// the output of an AudioContext into, for example, a MediaStreamAudioDestinationNode, or a
/// MediaRecorder.
///
/// - MDN documentation: <https://developer.mozilla.org/en-US/docs/Web/API/AudioDestinationNode>
/// - specification: <https://webaudio.github.io/web-audio-api/#AudioDestinationNode>
/// - see also: [`BaseAudioContext::destination`]
///
/// # Usage
///
/// ```no_run
/// use web_audio_api::context::{BaseAudioContext, AudioContext};
/// use web_audio_api::node::{AudioNode, AudioScheduledSourceNode};
///
/// let context = AudioContext::default();
///
/// let mut osc = context.create_oscillator();
/// osc.connect(&context.destination());
/// osc.start();
/// ```
///
pub struct AudioDestinationNode {
registration: AudioContextRegistration,
channel_config: ChannelConfig,
}
impl AudioNode for AudioDestinationNode {
fn registration(&self) -> &AudioContextRegistration {
&self.registration
}
fn channel_config(&self) -> &ChannelConfig {
&self.channel_config
}
fn number_of_inputs(&self) -> usize {
1
}
fn number_of_outputs(&self) -> usize {
1
}
fn set_channel_count(&self, v: usize) {
assert!(
!self.registration.context().offline() || v == self.max_channel_count(),
"NotSupportedError - not allowed to change OfflineAudioContext destination channel count"
);
assert!(
v <= self.max_channel_count(),
"IndexSizeError - channel count cannot be greater than maxChannelCount ({})",
self.max_channel_count()
);
self.channel_config.set_count(v);
}
fn set_channel_count_mode(&self, v: ChannelCountMode) {
// [spec] If the AudioDestinationNode is the destination node of an
// OfflineAudioContext, then the channel count mode cannot be changed.
// An InvalidStateError exception MUST be thrown for any attempt to change the value.
assert!(
!self.registration.context().offline(),
"InvalidStateError - AudioDestinationNode has channel count mode constraints",
);
self.channel_config.set_count_mode(v);
}
}
impl AudioDestinationNode {
pub(crate) fn new<C: BaseAudioContext>(context: &C, channel_count: usize) -> Self {
context.register(move |registration| {
let channel_config = ChannelConfigOptions {
count: channel_count,
count_mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Speakers,
}
.into();
let node = Self {
registration,
channel_config,
};
let proc = DestinationRenderer {};
(node, Box::new(proc))
})
}
pub(crate) fn into_channel_config(self) -> ChannelConfig {
self.channel_config
}
pub(crate) fn from_raw_parts(
registration: AudioContextRegistration,
channel_config: ChannelConfig,
) -> Self {
Self {
registration,
channel_config,
}
}
/// The maximum number of channels that the channelCount attribute can be set to (the max
/// number of channels that the hardware is capable of supporting).
/// <https://www.w3.org/TR/webaudio/#dom-audiodestinationnode-maxchannelcount>
pub fn max_channel_count(&self) -> usize {
self.registration.context().base().max_channel_count()
}
}
struct DestinationRenderer {}
impl AudioProcessor for DestinationRenderer {
fn process(
&mut self,
inputs: &[AudioRenderQuantum],
outputs: &mut [AudioRenderQuantum],
_params: AudioParamValues<'_>,
_scope: &RenderScope,
) -> bool {
// single input/output node
let input = &inputs[0];
let output = &mut outputs[0];
// just move input to output
*output = input.clone();
true
}
}