use crate::context::{AudioContextState, BaseAudioContext, ConcreteBaseAudioContext};
use crate::io::{
self, enumerate_devices, AudioBackendManager, ControlThreadInit, RenderThreadInit,
};
use crate::media::{MediaElement, MediaStream};
use crate::message::ControlMessage;
use crate::node::{self, ChannelConfigOptions};
use crate::{AudioRenderCapacity, Event};
use crate::events::{EventDispatch, EventHandler, EventType};
use std::error::Error;
use std::sync::Mutex;
fn is_valid_sink_id(sink_id: &str) -> bool {
if sink_id.is_empty() || sink_id == "none" {
true
} else {
enumerate_devices()
.into_iter()
.any(|d| d.device_id() == sink_id)
}
}
#[derive(Copy, Clone, Debug)]
pub enum AudioContextLatencyCategory {
Balanced,
Interactive,
Playback,
Custom(f64),
}
impl Default for AudioContextLatencyCategory {
fn default() -> Self {
Self::Interactive
}
}
#[derive(Copy, Clone, Debug)]
#[non_exhaustive]
pub enum AudioContextRenderSizeCategory {
Default,
}
impl Default for AudioContextRenderSizeCategory {
fn default() -> Self {
Self::Default
}
}
#[derive(Clone, Debug, Default)]
pub struct AudioContextOptions {
pub latency_hint: AudioContextLatencyCategory,
pub sample_rate: Option<f32>,
pub sink_id: String,
pub render_size_hint: AudioContextRenderSizeCategory,
}
#[allow(clippy::module_name_repetitions)]
pub struct AudioContext {
base: ConcreteBaseAudioContext,
backend_manager: Mutex<Box<dyn AudioBackendManager>>,
render_capacity: AudioRenderCapacity,
render_thread_init: RenderThreadInit,
}
impl BaseAudioContext for AudioContext {
fn base(&self) -> &ConcreteBaseAudioContext {
&self.base
}
}
impl Default for AudioContext {
fn default() -> Self {
Self::new(AudioContextOptions::default())
}
}
impl AudioContext {
#[allow(clippy::needless_pass_by_value)]
#[must_use]
pub fn new(options: AudioContextOptions) -> Self {
if !is_valid_sink_id(&options.sink_id) {
panic!("NotFoundError: invalid sinkId {:?}", options.sink_id);
}
let (control_thread_init, render_thread_init) = io::thread_init();
let backend = io::build_output(options, render_thread_init.clone());
let ControlThreadInit {
frames_played,
ctrl_msg_send,
load_value_recv,
event_send,
event_recv,
} = control_thread_init;
let graph = crate::render::graph::Graph::new();
let message = crate::message::ControlMessage::Startup { graph };
ctrl_msg_send.send(message).unwrap();
let base = ConcreteBaseAudioContext::new(
backend.sample_rate(),
backend.number_of_channels(),
frames_played,
ctrl_msg_send,
Some((event_send, event_recv)),
false,
);
base.set_state(AudioContextState::Running);
let base_clone = base.clone();
let render_capacity = AudioRenderCapacity::new(base_clone, load_value_recv);
Self {
base,
backend_manager: Mutex::new(backend),
render_capacity,
render_thread_init,
}
}
#[allow(clippy::unused_self)]
#[must_use]
pub fn base_latency(&self) -> f64 {
0.
}
#[must_use]
#[allow(clippy::missing_panics_doc)]
pub fn output_latency(&self) -> f64 {
self.backend_manager.lock().unwrap().output_latency()
}
#[allow(clippy::missing_panics_doc)]
pub fn sink_id(&self) -> String {
self.backend_manager.lock().unwrap().sink_id().to_owned()
}
#[allow(clippy::needless_collect, clippy::missing_panics_doc)]
pub fn set_sink_id_sync(&self, sink_id: String) -> Result<(), Box<dyn Error>> {
if self.sink_id() == sink_id {
return Ok(()); }
if !is_valid_sink_id(&sink_id) {
Err(format!("NotFoundError: invalid sinkId {sink_id}"))?;
};
let mut backend_manager_guard = self.backend_manager.lock().unwrap();
let original_state = self.state();
if original_state == AudioContextState::Closed {
return Ok(());
}
self.base().set_state(AudioContextState::Suspended);
let ctrl_msg_send = self.base.lock_control_msg_sender();
let mut pending_msgs: Vec<_> = self.render_thread_init.ctrl_msg_recv.try_iter().collect();
let graph = if matches!(pending_msgs.get(0), Some(ControlMessage::Startup { .. })) {
let msg = pending_msgs.remove(0);
match msg {
ControlMessage::Startup { graph } => graph,
_ => unreachable!(),
}
} else {
let (graph_send, graph_recv) = crossbeam_channel::bounded(1);
let message = ControlMessage::Shutdown { sender: graph_send };
ctrl_msg_send.send(message).unwrap();
if original_state == AudioContextState::Suspended {
backend_manager_guard.resume();
}
graph_recv.recv().unwrap()
};
let options = AudioContextOptions {
sample_rate: Some(self.sample_rate()),
latency_hint: AudioContextLatencyCategory::default(), sink_id,
render_size_hint: AudioContextRenderSizeCategory::default(), };
*backend_manager_guard = io::build_output(options, self.render_thread_init.clone());
if original_state == AudioContextState::Suspended {
backend_manager_guard.suspend();
}
let message = ControlMessage::Startup { graph };
ctrl_msg_send.send(message).unwrap();
if original_state == AudioContextState::Running {
self.base().set_state(AudioContextState::Running);
}
pending_msgs
.into_iter()
.for_each(|m| self.base().send_control_msg(m).unwrap());
drop(backend_manager_guard);
let _ = self.base.send_event(EventDispatch::sink_change());
Ok(())
}
pub fn set_onsinkchange<F: FnMut(Event) + Send + 'static>(&self, mut callback: F) {
let callback = move |_| {
callback(Event {
type_: "onsinkchange",
})
};
self.base().set_event_handler(
EventType::SinkChange,
EventHandler::Multiple(Box::new(callback)),
);
}
pub fn clear_onsinkchange(&self) {
self.base().clear_event_handler(EventType::SinkChange);
}
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn suspend_sync(&self) {
if self.backend_manager.lock().unwrap().suspend() {
self.base().set_state(AudioContextState::Suspended);
}
}
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn resume_sync(&self) {
if self.backend_manager.lock().unwrap().resume() {
self.base().set_state(AudioContextState::Running);
}
}
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn close_sync(&self) {
self.backend_manager.lock().unwrap().close();
self.render_capacity.stop();
self.base().set_state(AudioContextState::Closed);
}
#[must_use]
pub fn create_media_stream_source<M: MediaStream>(
&self,
media: M,
) -> node::MediaStreamAudioSourceNode {
let opts = node::MediaStreamAudioSourceOptions {
media_stream: media,
};
node::MediaStreamAudioSourceNode::new(self, opts)
}
#[must_use]
pub fn create_media_stream_destination(&self) -> node::MediaStreamAudioDestinationNode {
let opts = ChannelConfigOptions::default();
node::MediaStreamAudioDestinationNode::new(self, opts)
}
#[must_use]
pub fn create_media_element_source(
&self,
media_element: &mut MediaElement,
) -> node::MediaElementAudioSourceNode {
let opts = node::MediaElementAudioSourceOptions { media_element };
node::MediaElementAudioSourceNode::new(self, opts)
}
#[must_use]
pub fn render_capacity(&self) -> &AudioRenderCapacity {
&self.render_capacity
}
}