use std::sync::atomic::{AtomicU64, AtomicU8};
use std::sync::Arc;
use crossbeam_channel::{Receiver, Sender};
use crate::buffer::AudioBuffer;
use crate::context::{AudioContextLatencyCategory, AudioContextOptions, AudioContextState};
use crate::events::EventDispatch;
use crate::media_devices::MediaDeviceInfo;
use crate::media_streams::{MediaStream, MediaStreamTrack};
use crate::message::ControlMessage;
use crate::{AudioRenderCapacityLoad, RENDER_QUANTUM_SIZE};
mod none;
pub(crate) use none::NoneBackend;
#[cfg(feature = "cpal")]
mod cpal;
#[cfg(feature = "cubeb")]
mod cubeb;
#[cfg(any(feature = "cubeb", feature = "cpal"))]
mod microphone;
#[derive(Debug)]
pub(crate) struct ControlThreadInit {
pub state: Arc<AtomicU8>,
pub frames_played: Arc<AtomicU64>,
pub ctrl_msg_send: Sender<ControlMessage>,
pub load_value_recv: Receiver<AudioRenderCapacityLoad>,
pub event_send: Sender<EventDispatch>,
pub event_recv: Receiver<EventDispatch>,
}
#[derive(Clone, Debug)]
pub(crate) struct RenderThreadInit {
pub state: Arc<AtomicU8>,
pub frames_played: Arc<AtomicU64>,
pub ctrl_msg_recv: Receiver<ControlMessage>,
pub load_value_send: Sender<AudioRenderCapacityLoad>,
pub event_send: Sender<EventDispatch>,
}
pub(crate) fn thread_init() -> (ControlThreadInit, RenderThreadInit) {
let state = Arc::new(AtomicU8::new(AudioContextState::Suspended as u8));
let frames_played = Arc::new(AtomicU64::new(0));
let (ctrl_msg_send, ctrl_msg_recv) = crossbeam_channel::bounded(256);
let (load_value_send, load_value_recv) = crossbeam_channel::bounded(1);
let (event_send, event_recv) = crossbeam_channel::bounded(256);
let control_thread_init = ControlThreadInit {
state: Arc::clone(&state),
frames_played: Arc::clone(&frames_played),
ctrl_msg_send,
load_value_recv,
event_send: event_send.clone(),
event_recv,
};
let render_thread_init = RenderThreadInit {
state,
frames_played,
ctrl_msg_recv,
load_value_send,
event_send,
};
(control_thread_init, render_thread_init)
}
pub(crate) fn build_output(
options: AudioContextOptions,
render_thread_init: RenderThreadInit,
) -> Box<dyn AudioBackendManager> {
if options.sink_id == "none" {
let backend = NoneBackend::build_output(options, render_thread_init);
return Box::new(backend);
}
#[cfg(feature = "cubeb")]
{
let backend = cubeb::CubebBackend::build_output(options, render_thread_init);
Box::new(backend)
}
#[cfg(all(not(feature = "cubeb"), feature = "cpal"))]
{
let backend = cpal::CpalBackend::build_output(options, render_thread_init);
Box::new(backend)
}
#[cfg(all(not(feature = "cubeb"), not(feature = "cpal")))]
{
panic!("No audio backend available, enable the 'cpal' or 'cubeb' feature")
}
}
pub(crate) fn build_input(
options: AudioContextOptions,
number_of_channels: Option<u32>,
) -> MediaStream {
#[cfg(all(not(feature = "cubeb"), not(feature = "cpal")))]
{
panic!("No audio backend available, enable the 'cpal' or 'cubeb' feature")
}
#[cfg(any(feature = "cubeb", feature = "cpal"))]
{
let (backend, receiver) = {
#[cfg(feature = "cubeb")]
{
cubeb::CubebBackend::build_input(options, number_of_channels)
}
#[cfg(all(not(feature = "cubeb"), feature = "cpal"))]
{
cpal::CpalBackend::build_input(options, number_of_channels)
}
};
let media_iter = microphone::MicrophoneStream::new(receiver, Box::new(backend));
let track = MediaStreamTrack::from_iter(media_iter);
MediaStream::from_tracks(vec![track])
}
}
pub(crate) trait AudioBackendManager: Send + Sync + 'static {
fn name(&self) -> &'static str {
std::any::type_name::<Self>()
}
fn build_output(options: AudioContextOptions, render_thread_init: RenderThreadInit) -> Self
where
Self: Sized;
fn build_input(
options: AudioContextOptions,
number_of_channels: Option<u32>,
) -> (Self, Receiver<AudioBuffer>)
where
Self: Sized;
fn resume(&self) -> bool;
fn suspend(&self) -> bool;
fn close(&self);
fn sample_rate(&self) -> f32;
fn number_of_channels(&self) -> usize;
fn output_latency(&self) -> f64;
fn sink_id(&self) -> &str;
fn enumerate_devices_sync() -> Vec<MediaDeviceInfo>
where
Self: Sized;
}
fn buffer_size_for_latency_category(
latency_cat: AudioContextLatencyCategory,
sample_rate: f32,
) -> usize {
match latency_cat {
AudioContextLatencyCategory::Interactive => RENDER_QUANTUM_SIZE,
AudioContextLatencyCategory::Balanced => RENDER_QUANTUM_SIZE * 4,
AudioContextLatencyCategory::Playback => RENDER_QUANTUM_SIZE * 8,
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
AudioContextLatencyCategory::Custom(latency) => {
assert!(
latency > 0.,
"RangeError - Invalid custom latency: {:?}, should be strictly positive",
latency
);
let buffer_size = (latency * sample_rate as f64) as usize;
buffer_size.next_power_of_two()
}
}
}
pub(crate) fn enumerate_devices_sync() -> Vec<MediaDeviceInfo> {
#[cfg(feature = "cubeb")]
{
cubeb::CubebBackend::enumerate_devices_sync()
}
#[cfg(all(not(feature = "cubeb"), feature = "cpal"))]
{
cpal::CpalBackend::enumerate_devices_sync()
}
#[cfg(all(not(feature = "cubeb"), not(feature = "cpal")))]
panic!("No audio backend available, enable the 'cpal' or 'cubeb' feature")
}