use std::sync::atomic::AtomicU64;
use std::sync::Arc;
use crossbeam_channel::{Receiver, Sender};
use crate::buffer::AudioBuffer;
use crate::context::{AudioContextLatencyCategory, AudioContextOptions};
use crate::events::EventDispatch;
use crate::message::ControlMessage;
use crate::{AudioRenderCapacityLoad, RENDER_QUANTUM_SIZE};
mod none;
#[cfg(feature = "cpal")]
mod cpal;
#[cfg(feature = "cubeb")]
mod cubeb;
pub fn enumerate_devices() -> Vec<MediaDeviceInfo> {
#[cfg(feature = "cubeb")]
{
cubeb::CubebBackend::enumerate_devices()
}
#[cfg(all(not(feature = "cubeb"), feature = "cpal"))]
{
cpal::CpalBackend::enumerate_devices()
}
#[cfg(all(not(feature = "cubeb"), not(feature = "cpal")))]
panic!("No audio backend available, enable the 'cpal' or 'cubeb' feature")
}
#[derive(Debug)]
pub(crate) struct ControlThreadInit {
pub frames_played: Arc<AtomicU64>,
pub ctrl_msg_send: Sender<ControlMessage>,
pub load_value_recv: Receiver<AudioRenderCapacityLoad>,
pub event_send: Sender<EventDispatch>,
pub event_recv: Receiver<EventDispatch>,
}
#[derive(Clone, Debug)]
pub(crate) struct RenderThreadInit {
pub frames_played: Arc<AtomicU64>,
pub ctrl_msg_recv: Receiver<ControlMessage>,
pub load_value_send: Sender<AudioRenderCapacityLoad>,
pub event_send: Sender<EventDispatch>,
}
pub(crate) fn thread_init() -> (ControlThreadInit, RenderThreadInit) {
let frames_played = Arc::new(AtomicU64::new(0));
let (ctrl_msg_send, ctrl_msg_recv) = crossbeam_channel::unbounded();
let (load_value_send, load_value_recv) = crossbeam_channel::bounded(1);
let (event_send, event_recv) = crossbeam_channel::unbounded();
let control_thread_init = ControlThreadInit {
frames_played: frames_played.clone(),
ctrl_msg_send,
load_value_recv,
event_send: event_send.clone(),
event_recv,
};
let render_thread_init = RenderThreadInit {
frames_played,
ctrl_msg_recv,
load_value_send,
event_send,
};
(control_thread_init, render_thread_init)
}
pub(crate) fn build_output(
options: AudioContextOptions,
render_thread_init: RenderThreadInit,
) -> Box<dyn AudioBackendManager> {
if options.sink_id == "none" {
let backend = none::NoneBackend::build_output(options, render_thread_init);
return Box::new(backend);
}
#[cfg(feature = "cubeb")]
{
let backend = cubeb::CubebBackend::build_output(options, render_thread_init);
Box::new(backend)
}
#[cfg(all(not(feature = "cubeb"), feature = "cpal"))]
{
let backend = cpal::CpalBackend::build_output(options, render_thread_init);
Box::new(backend)
}
#[cfg(all(not(feature = "cubeb"), not(feature = "cpal")))]
{
panic!("No audio backend available, enable the 'cpal' or 'cubeb' feature")
}
}
#[cfg(any(feature = "cubeb", feature = "cpal"))]
pub(crate) fn build_input(
options: AudioContextOptions,
) -> (Box<dyn AudioBackendManager>, Receiver<AudioBuffer>) {
#[cfg(feature = "cubeb")]
{
let (b, r) = cubeb::CubebBackend::build_input(options);
(Box::new(b), r)
}
#[cfg(all(not(feature = "cubeb"), feature = "cpal"))]
{
let (b, r) = cpal::CpalBackend::build_input(options);
(Box::new(b), r)
}
#[cfg(all(not(feature = "cubeb"), not(feature = "cpal")))]
{
panic!("No audio backend available, enable the 'cpal' or 'cubeb' feature")
}
}
pub(crate) trait AudioBackendManager: Send + Sync + 'static {
fn build_output(options: AudioContextOptions, render_thread_init: RenderThreadInit) -> Self
where
Self: Sized;
fn build_input(options: AudioContextOptions) -> (Self, Receiver<AudioBuffer>)
where
Self: Sized;
fn resume(&self) -> bool;
fn suspend(&self) -> bool;
fn close(&self);
fn sample_rate(&self) -> f32;
fn number_of_channels(&self) -> usize;
fn output_latency(&self) -> f64;
fn sink_id(&self) -> &str;
fn boxed_clone(&self) -> Box<dyn AudioBackendManager>;
fn enumerate_devices() -> Vec<MediaDeviceInfo>
where
Self: Sized;
}
fn buffer_size_for_latency_category(
latency_cat: AudioContextLatencyCategory,
sample_rate: f32,
) -> usize {
match latency_cat {
AudioContextLatencyCategory::Interactive => RENDER_QUANTUM_SIZE,
AudioContextLatencyCategory::Balanced => RENDER_QUANTUM_SIZE * 4,
AudioContextLatencyCategory::Playback => RENDER_QUANTUM_SIZE * 8,
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
AudioContextLatencyCategory::Custom(latency) => {
if latency <= 0. {
panic!(
"RangeError - Invalid custom latency: {:?}, should be strictly positive",
latency
);
}
let buffer_size = (latency * sample_rate as f64) as usize;
buffer_size.next_power_of_two()
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum MediaDeviceInfoKind {
VideoInput,
AudioInput,
AudioOutput,
}
#[derive(Debug)]
pub struct MediaDeviceInfo {
device_id: String,
group_id: Option<String>,
kind: MediaDeviceInfoKind,
label: String,
device: Box<dyn std::any::Any>,
}
impl MediaDeviceInfo {
pub(crate) fn new(
device_id: String,
group_id: Option<String>,
kind: MediaDeviceInfoKind,
label: String,
device: Box<dyn std::any::Any>,
) -> Self {
Self {
device_id,
group_id,
kind,
label,
device,
}
}
pub fn device_id(&self) -> &str {
&self.device_id
}
pub fn group_id(&self) -> Option<&str> {
self.group_id.as_deref()
}
pub fn kind(&self) -> MediaDeviceInfoKind {
self.kind
}
pub fn label(&self) -> &str {
&self.label
}
pub(crate) fn device(self) -> Box<dyn std::any::Any> {
self.device
}
}