use std::ops::Range;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::mpsc::{self, Sender};
const DESTINATION_NODE_ID: u64 = 0;
const LISTENER_NODE_ID: u64 = 1;
const LISTENER_PARAM_IDS: Range<u64> = 2..12;
#[cfg(not(test))]
use cpal::{
traits::{DeviceTrait, HostTrait, StreamTrait},
SampleFormat, Stream, StreamConfig,
};
use crate::buffer::{ChannelConfigOptions, ChannelCountMode, ChannelInterpretation};
use crate::graph::{NodeIndex, RenderThread};
use crate::media::{MediaElement, MediaStream};
use crate::message::ControlMessage;
use crate::node;
use crate::node::AudioNode;
use crate::param::{AudioParam, AudioParamOptions};
use crate::process::AudioProcessor;
use crate::spatial::{AudioListener, AudioListenerParams};
use crate::SampleRate;
pub struct BaseAudioContext {
sample_rate: SampleRate,
channels: u32,
node_id_inc: AtomicU64,
render_channel: Sender<ControlMessage>,
frames_played: AtomicU64,
listener_params: Option<AudioListenerParams>,
}
pub trait AsBaseAudioContext {
fn base(&self) -> &BaseAudioContext;
fn create_oscillator(&self) -> node::OscillatorNode {
node::OscillatorNode::new(self.base(), Default::default())
}
fn create_gain(&self) -> node::GainNode {
node::GainNode::new(self.base(), Default::default())
}
fn create_constant_source(&self) -> node::ConstantSourceNode {
node::ConstantSourceNode::new(self.base(), Default::default())
}
fn create_delay(&self) -> node::DelayNode {
node::DelayNode::new(self.base(), Default::default())
}
fn create_channel_splitter(&self, number_of_outputs: u32) -> node::ChannelSplitterNode {
let opts = node::ChannelSplitterOptions {
number_of_outputs,
..Default::default()
};
node::ChannelSplitterNode::new(self.base(), opts)
}
fn create_channel_merger(&self, number_of_inputs: u32) -> node::ChannelMergerNode {
let opts = node::ChannelMergerOptions {
number_of_inputs,
..Default::default()
};
node::ChannelMergerNode::new(self.base(), opts)
}
fn create_media_stream_source<M: MediaStream>(
&self,
media: M,
) -> node::MediaStreamAudioSourceNode {
let channel_config = ChannelConfigOptions {
count: 1,
mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Speakers,
};
let opts = node::MediaStreamAudioSourceNodeOptions {
media,
channel_config,
};
node::MediaStreamAudioSourceNode::new(self.base(), opts)
}
fn create_media_element_source<M: MediaStream>(
&self,
media: MediaElement<M>,
) -> node::MediaElementAudioSourceNode {
let channel_config = ChannelConfigOptions {
count: 1,
mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Speakers,
};
let opts = node::MediaElementAudioSourceNodeOptions {
media,
channel_config,
};
node::MediaElementAudioSourceNode::new(self.base(), opts)
}
fn create_buffer_source(&self) -> node::AudioBufferSourceNode {
node::AudioBufferSourceNode::new(self.base(), Default::default())
}
fn create_panner(&self) -> node::PannerNode {
node::PannerNode::new(self.base(), Default::default())
}
fn create_audio_param(
&self,
opts: AudioParamOptions,
dest: &AudioNodeId,
) -> (crate::param::AudioParam<'_>, AudioParamId) {
let param = self.base().register(move |registration| {
let (node, proc) = crate::param::audio_param_pair(opts, registration);
(node, Box::new(proc))
});
self.base().connect(param.id(), dest, 0, u32::MAX);
let proc_id = AudioParamId(param.id().0);
(param, proc_id)
}
fn destination(&self) -> node::DestinationNode {
let registration = AudioContextRegistration {
id: AudioNodeId(DESTINATION_NODE_ID),
context: &self.base(),
};
node::DestinationNode {
registration,
channel_count: self.base().channels as usize,
}
}
fn listener(&self) -> AudioListener {
let mut ids = LISTENER_PARAM_IDS.map(|i| AudioContextRegistration {
id: AudioNodeId(i),
context: self.base(),
});
let params = self.base().listener_params.as_ref().unwrap();
AudioListener {
position_x: AudioParam::from_raw_parts(ids.next().unwrap(), params.position_x.clone()),
position_y: AudioParam::from_raw_parts(ids.next().unwrap(), params.position_y.clone()),
position_z: AudioParam::from_raw_parts(ids.next().unwrap(), params.position_z.clone()),
forward_x: AudioParam::from_raw_parts(ids.next().unwrap(), params.forward_x.clone()),
forward_y: AudioParam::from_raw_parts(ids.next().unwrap(), params.forward_y.clone()),
forward_z: AudioParam::from_raw_parts(ids.next().unwrap(), params.forward_z.clone()),
up_x: AudioParam::from_raw_parts(ids.next().unwrap(), params.up_x.clone()),
up_y: AudioParam::from_raw_parts(ids.next().unwrap(), params.up_y.clone()),
up_z: AudioParam::from_raw_parts(ids.next().unwrap(), params.up_z.clone()),
}
}
fn sample_rate(&self) -> SampleRate {
self.base().sample_rate()
}
fn current_time(&self) -> f64 {
self.base().current_time()
}
#[cfg(test)]
fn mock_registration(&self) -> AudioContextRegistration {
AudioContextRegistration {
id: AudioNodeId(0),
context: self.base(),
}
}
}
impl AsBaseAudioContext for BaseAudioContext {
fn base(&self) -> &BaseAudioContext {
&self
}
}
pub struct AudioContext {
base: BaseAudioContext,
#[cfg(not(test))]
stream: Stream,
}
impl AsBaseAudioContext for AudioContext {
fn base(&self) -> &BaseAudioContext {
&self.base
}
}
pub struct OfflineAudioContext {
base: BaseAudioContext,
length: usize,
buffer: Vec<f32>,
render: RenderThread,
}
impl AsBaseAudioContext for OfflineAudioContext {
fn base(&self) -> &BaseAudioContext {
&self.base
}
}
impl AudioContext {
#[cfg(not(test))]
pub fn new() -> Self {
let host = cpal::default_host();
let device = host
.default_output_device()
.expect("no output device available");
let mut supported_configs_range = device
.supported_output_configs()
.expect("error while querying configs");
let supported_config = supported_configs_range
.next()
.expect("no supported config?!")
.with_max_sample_rate();
let err_fn = |err| eprintln!("an error occurred on the output audio stream: {}", err);
let sample_format = supported_config.sample_format();
let mut config: StreamConfig = supported_config.into();
config.buffer_size = cpal::BufferSize::Fixed(crate::BUFFER_SIZE);
let sample_rate = SampleRate(config.sample_rate.0);
let channels = config.channels as u32;
let (sender, receiver) = mpsc::channel();
let base = BaseAudioContext::new(sample_rate, channels, sender);
let mut render = RenderThread::new(sample_rate, channels as usize, receiver);
let stream = match sample_format {
SampleFormat::F32 => {
device.build_output_stream(&config, move |data, _c| render.render(data), err_fn)
}
_ => unimplemented!(),
}
.unwrap();
stream.play().unwrap();
Self { base, stream }
}
#[cfg(test)]
pub fn new() -> Self {
let sample_rate = SampleRate(44_100);
let channels = 2;
let (sender, _receiver) = mpsc::channel();
let base = BaseAudioContext::new(sample_rate, channels, sender);
Self { base }
}
pub fn suspend(&self) {
#[cfg(not(test))]
self.stream.pause().unwrap()
}
pub fn resume(&self) {
#[cfg(not(test))]
self.stream.play().unwrap()
}
}
pub struct AudioNodeId(u64);
pub struct AudioParamId(u64);
impl From<&AudioParamId> for NodeIndex {
fn from(i: &AudioParamId) -> Self {
NodeIndex(i.0)
}
}
pub struct AudioContextRegistration<'a> {
context: &'a BaseAudioContext,
id: AudioNodeId,
}
impl<'a> AudioContextRegistration<'a> {
pub fn id(&self) -> &AudioNodeId {
&self.id
}
pub fn context(&self) -> &BaseAudioContext {
self.context
}
}
impl<'a> Drop for AudioContextRegistration<'a> {
fn drop(&mut self) {
let magic = self.id.0 == DESTINATION_NODE_ID
|| self.id.0 == LISTENER_NODE_ID
|| LISTENER_PARAM_IDS.contains(&self.id.0);
if !magic {
let message = ControlMessage::FreeWhenFinished { id: self.id.0 };
self.context.render_channel.send(message).unwrap();
}
}
}
impl BaseAudioContext {
fn new(sample_rate: SampleRate, channels: u32, render_channel: Sender<ControlMessage>) -> Self {
let base = Self {
sample_rate,
channels,
render_channel,
node_id_inc: AtomicU64::new(0),
frames_played: AtomicU64::new(0),
listener_params: None,
};
let listener_params = {
let dest = node::DestinationNode::new(&base, channels as usize);
assert_eq!(dest.registration().id.0, DESTINATION_NODE_ID);
let listener = crate::spatial::AudioListenerNode::new(&base);
assert_eq!(listener.registration().id.0, LISTENER_NODE_ID);
assert_eq!(
base.node_id_inc.load(Ordering::SeqCst),
LISTENER_PARAM_IDS.end - 1
);
base.connect(listener.id(), dest.id(), 0, u32::MAX);
let listener_params = listener.into_fields();
let AudioListener {
position_x,
position_y,
position_z,
forward_x,
forward_y,
forward_z,
up_x,
up_y,
up_z,
} = listener_params;
AudioListenerParams {
position_x: position_x.into_raw_parts(),
position_y: position_y.into_raw_parts(),
position_z: position_z.into_raw_parts(),
forward_x: forward_x.into_raw_parts(),
forward_y: forward_y.into_raw_parts(),
forward_z: forward_z.into_raw_parts(),
up_x: up_x.into_raw_parts(),
up_y: up_y.into_raw_parts(),
up_z: up_z.into_raw_parts(),
}
};
let mut base = base;
base.listener_params = Some(listener_params);
base
}
pub fn sample_rate(&self) -> SampleRate {
self.sample_rate
}
pub fn current_time(&self) -> f64 {
self.frames_played.load(Ordering::SeqCst) as f64 / self.sample_rate.0 as f64
}
pub fn channels(&self) -> u32 {
self.channels
}
pub fn register<
'a,
T: node::AudioNode,
F: FnOnce(AudioContextRegistration<'a>) -> (T, Box<dyn AudioProcessor>),
>(
&'a self,
f: F,
) -> T {
let id = self.node_id_inc.fetch_add(1, Ordering::SeqCst);
let node_id = AudioNodeId(id);
let registration = AudioContextRegistration {
id: node_id,
context: &self,
};
let (node, render) = (f)(registration);
let message = ControlMessage::RegisterNode {
id,
node: render,
inputs: node.number_of_inputs() as usize,
outputs: node.number_of_outputs() as usize,
channel_config: node.channel_config_cloned(),
};
self.render_channel.send(message).unwrap();
node
}
pub(crate) fn connect(&self, from: &AudioNodeId, to: &AudioNodeId, output: u32, input: u32) {
let message = ControlMessage::ConnectNode {
from: from.0,
to: to.0,
output,
input,
};
self.render_channel.send(message).unwrap();
}
pub(crate) fn disconnect(&self, from: &AudioNodeId, to: &AudioNodeId) {
let message = ControlMessage::DisconnectNode {
from: from.0,
to: to.0,
};
self.render_channel.send(message).unwrap();
}
pub(crate) fn disconnect_all(&self, from: &AudioNodeId) {
let message = ControlMessage::DisconnectAll { from: from.0 };
self.render_channel.send(message).unwrap();
}
pub(crate) fn connect_listener_to_panner(&self, panner: &AudioNodeId) {
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 0, 1);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 1, 2);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 2, 3);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 3, 4);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 4, 5);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 5, 6);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 6, 7);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 7, 8);
self.connect(&AudioNodeId(LISTENER_NODE_ID), panner, 8, 9);
}
}
impl Default for AudioContext {
fn default() -> Self {
Self::new()
}
}
impl OfflineAudioContext {
pub fn new(channels: u32, length: usize, sample_rate: SampleRate) -> Self {
let (sender, receiver) = mpsc::channel();
let base = BaseAudioContext::new(sample_rate, channels, sender);
let render = RenderThread::new(sample_rate, channels as usize, receiver);
let buffer = vec![0.; length];
Self {
base,
length,
buffer,
render,
}
}
pub fn start_rendering(&mut self) -> &[f32] {
for quantum in self.buffer.chunks_mut(crate::BUFFER_SIZE as usize) {
self.render.render(quantum)
}
self.buffer.as_slice()
}
pub fn length(&self) -> usize {
self.length
}
}