use std::ops::Range;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
const DESTINATION_NODE_ID: u64 = 0;
const LISTENER_NODE_ID: u64 = 1;
const LISTENER_PARAM_IDS: Range<u64> = 2..12;
use crate::buffer::{AudioBuffer, ChannelConfigOptions, ChannelCountMode, ChannelInterpretation};
use crate::graph::{NodeIndex, RenderThread};
use crate::media::{MediaElement, MediaStream};
use crate::message::ControlMessage;
use crate::node::{self, AudioNode};
use crate::param::{AudioParam, AudioParamOptions, AutomationEvent};
use crate::process::AudioProcessor;
use crate::spatial::{AudioListener, AudioListenerParams};
use crate::{SampleRate, BUFFER_SIZE};
#[cfg(not(test))]
use crate::io;
#[cfg(not(test))]
use cpal::{traits::StreamTrait, Stream};
use crossbeam_channel::Sender;
#[derive(Clone)]
pub struct BaseAudioContext {
inner: Arc<BaseAudioContextInner>,
}
impl PartialEq for BaseAudioContext {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
struct BaseAudioContextInner {
sample_rate: SampleRate,
channels: u32,
node_id_inc: AtomicU64,
render_channel: Sender<ControlMessage>,
frames_played: AtomicU64,
listener_params: Option<AudioListenerParams>,
}
pub trait AsBaseAudioContext {
fn base(&self) -> &BaseAudioContext;
fn create_oscillator(&self) -> node::OscillatorNode {
node::OscillatorNode::new(self.base(), Default::default())
}
fn create_gain(&self) -> node::GainNode {
node::GainNode::new(self.base(), Default::default())
}
fn create_constant_source(&self) -> node::ConstantSourceNode {
node::ConstantSourceNode::new(self.base(), Default::default())
}
fn create_delay(&self, max_delay_time: f32) -> node::DelayNode {
let opts = node::DelayOptions {
max_delay_time,
..Default::default()
};
node::DelayNode::new(self.base(), opts)
}
fn create_channel_splitter(&self, number_of_outputs: u32) -> node::ChannelSplitterNode {
let opts = node::ChannelSplitterOptions {
number_of_outputs,
..Default::default()
};
node::ChannelSplitterNode::new(self.base(), opts)
}
fn create_channel_merger(&self, number_of_inputs: u32) -> node::ChannelMergerNode {
let opts = node::ChannelMergerOptions {
number_of_inputs,
..Default::default()
};
node::ChannelMergerNode::new(self.base(), opts)
}
fn create_media_stream_source<M: MediaStream>(
&self,
media: M,
) -> node::MediaStreamAudioSourceNode {
let channel_config = ChannelConfigOptions {
count: 1,
mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Speakers,
};
let opts = node::MediaStreamAudioSourceNodeOptions {
media,
channel_config,
};
node::MediaStreamAudioSourceNode::new(self.base(), opts)
}
fn create_media_element_source(
&self,
media: MediaElement,
) -> node::MediaElementAudioSourceNode {
let channel_config = ChannelConfigOptions {
count: 1,
mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Speakers,
};
let opts = node::MediaElementAudioSourceNodeOptions {
media,
channel_config,
};
node::MediaElementAudioSourceNode::new(self.base(), opts)
}
fn create_buffer_source(&self) -> node::AudioBufferSourceNode {
node::AudioBufferSourceNode::new(self.base(), Default::default())
}
fn create_panner(&self) -> node::PannerNode {
node::PannerNode::new(self.base(), Default::default())
}
fn create_analyser(&self) -> node::AnalyserNode {
node::AnalyserNode::new(self.base(), Default::default())
}
fn create_audio_param(
&self,
opts: AudioParamOptions,
dest: &AudioNodeId,
) -> (crate::param::AudioParam, AudioParamId) {
let param = self.base().register(move |registration| {
let (node, proc) = crate::param::audio_param_pair(opts, registration);
(node, Box::new(proc))
});
self.base().connect(param.id(), dest, 0, u32::MAX);
let proc_id = AudioParamId(param.id().0);
(param, proc_id)
}
fn destination(&self) -> node::DestinationNode {
let registration = AudioContextRegistration {
id: AudioNodeId(DESTINATION_NODE_ID),
context: self.base().clone(),
};
node::DestinationNode {
registration,
channel_count: self.base().channels() as usize,
}
}
fn listener(&self) -> AudioListener {
let mut ids = LISTENER_PARAM_IDS.map(|i| AudioContextRegistration {
id: AudioNodeId(i),
context: self.base().clone(),
});
let params = self.base().inner.listener_params.as_ref().unwrap();
AudioListener {
position_x: AudioParam::from_raw_parts(ids.next().unwrap(), params.position_x.clone()),
position_y: AudioParam::from_raw_parts(ids.next().unwrap(), params.position_y.clone()),
position_z: AudioParam::from_raw_parts(ids.next().unwrap(), params.position_z.clone()),
forward_x: AudioParam::from_raw_parts(ids.next().unwrap(), params.forward_x.clone()),
forward_y: AudioParam::from_raw_parts(ids.next().unwrap(), params.forward_y.clone()),
forward_z: AudioParam::from_raw_parts(ids.next().unwrap(), params.forward_z.clone()),
up_x: AudioParam::from_raw_parts(ids.next().unwrap(), params.up_x.clone()),
up_y: AudioParam::from_raw_parts(ids.next().unwrap(), params.up_y.clone()),
up_z: AudioParam::from_raw_parts(ids.next().unwrap(), params.up_z.clone()),
}
}
fn sample_rate(&self) -> SampleRate {
self.base().sample_rate()
}
fn current_time(&self) -> f64 {
self.base().current_time()
}
#[cfg(test)]
fn mock_registration(&self) -> AudioContextRegistration {
AudioContextRegistration {
id: AudioNodeId(0),
context: self.base().clone(),
}
}
}
impl AsBaseAudioContext for BaseAudioContext {
fn base(&self) -> &BaseAudioContext {
&self
}
}
pub struct AudioContext {
base: BaseAudioContext,
#[cfg(not(test))] stream: Stream,
}
impl AsBaseAudioContext for AudioContext {
fn base(&self) -> &BaseAudioContext {
&self.base
}
}
pub struct OfflineAudioContext {
base: BaseAudioContext,
length: usize,
render: RenderThread,
}
impl AsBaseAudioContext for OfflineAudioContext {
fn base(&self) -> &BaseAudioContext {
&self.base
}
}
impl AudioContext {
#[cfg(not(test))]
pub fn new() -> Self {
let io_builder = io::OutputBuilder::new();
let config = io_builder.config();
log::debug!("Output {:?}", config);
let sample_rate = SampleRate(config.sample_rate.0);
let channels = config.channels as u32;
let (sender, receiver) = crossbeam_channel::unbounded();
let base = BaseAudioContext::new(sample_rate, channels, sender);
let render = RenderThread::new(sample_rate, channels as usize, receiver);
let stream = io_builder.build(render);
Self { base, stream }
}
#[cfg(test)] pub fn new() -> Self {
let sample_rate = SampleRate(44_100);
let channels = 2;
let (sender, _receiver) = crossbeam_channel::unbounded();
let base = BaseAudioContext::new(sample_rate, channels, sender);
Self { base }
}
pub fn suspend(&self) {
#[cfg(not(test))] self.stream.pause().unwrap()
}
pub fn resume(&self) {
#[cfg(not(test))] self.stream.play().unwrap()
}
}
pub struct AudioNodeId(u64);
pub struct AudioParamId(u64);
impl From<&AudioParamId> for NodeIndex {
fn from(i: &AudioParamId) -> Self {
NodeIndex(i.0)
}
}
pub struct AudioContextRegistration {
context: BaseAudioContext,
id: AudioNodeId,
}
impl AudioContextRegistration {
pub fn id(&self) -> &AudioNodeId {
&self.id
}
pub fn context(&self) -> &BaseAudioContext {
&self.context
}
}
impl Drop for AudioContextRegistration {
fn drop(&mut self) {
let magic = self.id.0 == DESTINATION_NODE_ID
|| self.id.0 == LISTENER_NODE_ID
|| LISTENER_PARAM_IDS.contains(&self.id.0);
if !magic {
let message = ControlMessage::FreeWhenFinished { id: self.id.0 };
self.context.inner.render_channel.send(message).unwrap();
}
}
}
impl BaseAudioContext {
fn new(sample_rate: SampleRate, channels: u32, render_channel: Sender<ControlMessage>) -> Self {
let base_inner = BaseAudioContextInner {
sample_rate,
channels,
render_channel,
node_id_inc: AtomicU64::new(0),
frames_played: AtomicU64::new(0),
listener_params: None,
};
let base = BaseAudioContext {
inner: Arc::new(base_inner),
};
let listener_params = {
let dest = node::DestinationNode::new(&base, channels as usize);
let listener = crate::spatial::AudioListenerNode::new(&base);
base.connect(listener.id(), dest.id(), 0, u32::MAX);
let listener_params = listener.into_fields();
let AudioListener {
position_x,
position_y,
position_z,
forward_x,
forward_y,
forward_z,
up_x,
up_y,
up_z,
} = listener_params;
AudioListenerParams {
position_x: position_x.into_raw_parts(),
position_y: position_y.into_raw_parts(),
position_z: position_z.into_raw_parts(),
forward_x: forward_x.into_raw_parts(),
forward_y: forward_y.into_raw_parts(),
forward_z: forward_z.into_raw_parts(),
up_x: up_x.into_raw_parts(),
up_y: up_y.into_raw_parts(),
up_z: up_z.into_raw_parts(),
}
};
let mut base = base;
let mut inner_mut = Arc::get_mut(&mut base.inner).unwrap();
inner_mut.listener_params = Some(listener_params);
base
}
pub fn sample_rate(&self) -> SampleRate {
self.inner.sample_rate
}
pub fn current_time(&self) -> f64 {
self.inner.frames_played.load(Ordering::SeqCst) as f64 / self.inner.sample_rate.0 as f64
}
pub fn channels(&self) -> u32 {
self.inner.channels
}
pub fn register<
T: node::AudioNode,
F: FnOnce(AudioContextRegistration) -> (T, Box<dyn AudioProcessor>),
>(
&self,
f: F,
) -> T {
let id = self.inner.node_id_inc.fetch_add(1, Ordering::SeqCst);
let node_id = AudioNodeId(id);
let registration = AudioContextRegistration {
id: node_id,
context: self.clone(),
};
let (node, render) = (f)(registration);
let message = ControlMessage::RegisterNode {
id,
node: render,
inputs: node.number_of_inputs() as usize,
outputs: node.number_of_outputs() as usize,
channel_config: node.channel_config_cloned(),
};
self.inner.render_channel.send(message).unwrap();
node
}
pub(crate) fn connect(&self, from: &AudioNodeId, to: &AudioNodeId, output: u32, input: u32) {
let message = ControlMessage::ConnectNode {
from: from.0,
to: to.0,
output,
input,
};
self.inner.render_channel.send(message).unwrap();
}
pub(crate) fn disconnect(&self, from: &AudioNodeId, to: &AudioNodeId) {
let message = ControlMessage::DisconnectNode {
from: from.0,
to: to.0,
};
self.inner.render_channel.send(message).unwrap();
}
pub(crate) fn disconnect_all(&self, from: &AudioNodeId) {
let message = ControlMessage::DisconnectAll { from: from.0 };
self.inner.render_channel.send(message).unwrap();
}
pub(crate) fn pass_audio_param_event(
&self,
to: &Sender<AutomationEvent>,
event: AutomationEvent,
) {
let message = ControlMessage::AudioParamEvent {
to: to.clone(),
event,
};
self.inner.render_channel.send(message).unwrap();
}
pub(crate) fn connect_listener_to_panner(&self, panner: &AudioNodeId) {
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 0, 1);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 1, 2);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 2, 3);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 3, 4);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 4, 5);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 5, 6);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 6, 7);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 7, 8);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 8, 9);
}
}
impl Default for AudioContext {
fn default() -> Self {
Self::new()
}
}
impl OfflineAudioContext {
pub fn new(channels: u32, length: usize, sample_rate: SampleRate) -> Self {
let (sender, receiver) = crossbeam_channel::unbounded();
let base = BaseAudioContext::new(sample_rate, channels, sender);
let render = RenderThread::new(sample_rate, channels as usize, receiver);
Self {
base,
length,
render,
}
}
pub fn start_rendering(&mut self) -> AudioBuffer {
let buffer_size = (self.length as u32 + BUFFER_SIZE - 1) / BUFFER_SIZE * BUFFER_SIZE;
let mut buf = self.render.render_audiobuffer(buffer_size as usize);
let _split = buf.split_off(self.length as u32);
buf
}
pub fn length(&self) -> usize {
self.length
}
}
#[cfg(test)]
mod tests {
use super::*;
fn require_send_sync_static<T: Send + Sync + 'static>(_: T) {}
#[test]
fn test_audio_context_registration_traits() {
let context = OfflineAudioContext::new(1, 0, SampleRate(0));
let registration = context.mock_registration();
require_send_sync_static(registration);
}
}