1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
//! The `AudioContext` type and constructor options
use crate::context::{AudioContextState, BaseAudioContext, ConcreteBaseAudioContext};
use crate::media::MediaStream;
use crate::node::{self, ChannelConfigOptions};
use crate::SampleRate;
use std::sync::atomic::AtomicU64;
use std::sync::Arc;
#[cfg(not(test))]
use std::sync::Mutex;
#[cfg(not(test))]
use crate::io;
#[cfg(not(test))]
use cpal::{traits::StreamTrait, Stream};
/// Identify the type of playback, which affects tradeoffs
/// between audio output latency and power consumption
#[derive(Clone, Debug)]
pub enum AudioContextLatencyCategory {
/// Balance audio output latency and power consumption.
Balanced,
/// Provide the lowest audio output latency possible without glitching. This is the default.
Interactive,
/// Prioritize sustained playback without interruption
/// over audio output latency. Lowest power consumption.
Playback,
/// Specify the number of seconds of latency
/// this latency is not guaranted to be applied,
/// it depends on the audio hardware capabilities
Specific(f64),
}
impl Default for AudioContextLatencyCategory {
fn default() -> Self {
Self::Interactive
}
}
/// Specify the playback configuration for the [`AudioContext`] constructor.
///
/// All fields are optional and will default to the value best suited for interactive playback on
/// your hardware configuration.
///
/// Check the documentation of the [`AudioContext` constructor](AudioContext::new) for usage
/// instructions.
#[derive(Clone, Debug, Default)]
pub struct AudioContextOptions {
/// Identify the type of playback, which affects
/// tradeoffs between audio output latency and power consumption
pub latency_hint: AudioContextLatencyCategory,
/// Sample rate of the audio Context and audio output hardware
pub sample_rate: Option<u32>,
}
/// This interface represents an audio graph whose `AudioDestinationNode` is routed to a real-time
/// output device that produces a signal directed at the user.
// the naming comes from the web audio specfication
#[allow(clippy::module_name_repetitions)]
pub struct AudioContext {
/// represents the underlying `BaseAudioContext`
base: ConcreteBaseAudioContext,
/// cpal stream (play/pause functionality)
#[cfg(not(test))] // in tests, do not set up a cpal Stream
stream: Mutex<Option<Stream>>,
}
impl BaseAudioContext for AudioContext {
fn base(&self) -> &ConcreteBaseAudioContext {
&self.base
}
}
impl Default for AudioContext {
fn default() -> Self {
Self::new(AudioContextOptions::default())
}
}
impl AudioContext {
/// Creates and returns a new `AudioContext` object.
///
/// This will play live audio on the default output device.
///
/// ```no_run
/// use web_audio_api::context::{AudioContext, AudioContextLatencyCategory, AudioContextOptions};
///
/// // Request a sample rate of 44.1 kHz and default latency (buffer size 128, if available)
/// let opts = AudioContextOptions {
/// sample_rate: Some(44100),
/// latency_hint: AudioContextLatencyCategory::Interactive,
/// };
///
/// // Setup the audio context that will emit to your speakers
/// let context = AudioContext::new(opts);
///
/// // Alternatively, use the default constructor to get the best settings for your hardware
/// // let context = AudioContext::default();
/// ```
#[allow(clippy::needless_pass_by_value)]
#[cfg(not(test))]
#[must_use]
pub fn new(options: AudioContextOptions) -> Self {
// track number of frames - synced from render thread to control thread
let frames_played = Arc::new(AtomicU64::new(0));
let frames_played_clone = frames_played.clone();
let (stream, config, sender) = io::build_output(frames_played_clone, options);
let number_of_channels = usize::from(config.channels);
let sample_rate = SampleRate(config.sample_rate.0);
let base = ConcreteBaseAudioContext::new(
sample_rate,
number_of_channels,
frames_played,
sender,
false,
);
base.set_state(AudioContextState::Running);
Self {
base,
stream: Mutex::new(Some(stream)),
}
}
#[cfg(test)] // in tests, do not set up a cpal Stream
#[allow(clippy::must_use_candidate)]
#[allow(clippy::needless_pass_by_value)]
pub fn new(options: AudioContextOptions) -> Self {
let sample_rate = SampleRate(options.sample_rate.unwrap_or(44_100));
let number_of_channels = 2;
let (sender, _receiver) = crossbeam_channel::unbounded();
let frames_played = Arc::new(AtomicU64::new(0));
let base = ConcreteBaseAudioContext::new(
sample_rate,
number_of_channels,
frames_played,
sender,
false,
);
base.set_state(AudioContextState::Running);
Self { base }
}
/// Suspends the progression of time in the audio context.
///
/// This will temporarily halt audio hardware access and reducing CPU/battery usage in the
/// process.
///
/// This function operates synchronously and might block the current thread. An async version
/// is currently not implemented.
///
/// # Panics
///
/// Will panic if:
///
/// * The audio device is not available
/// * For a `BackendSpecificError`
// false positive due to #[cfg(not(test))]
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn suspend_sync(&self) {
#[cfg(not(test))] // in tests, do not set up a cpal Stream
if let Some(s) = self.stream.lock().unwrap().as_ref() {
if let Err(e) = s.pause() {
panic!("Error suspending cpal stream: {:?}", e);
}
self.base.set_state(AudioContextState::Suspended);
}
}
/// Resumes the progression of time in an audio context that has previously been
/// suspended/paused.
///
/// This function operates synchronously and might block the current thread. An async version
/// is currently not implemented.
///
/// # Panics
///
/// Will panic if:
///
/// * The audio device is not available
/// * For a `BackendSpecificError`
// false positive due to #[cfg(not(test))]
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn resume_sync(&self) {
#[cfg(not(test))] // in tests, do not set up a cpal Stream
if let Some(s) = self.stream.lock().unwrap().as_ref() {
if let Err(e) = s.play() {
panic!("Error resuming cpal stream: {:?}", e);
}
self.base.set_state(AudioContextState::Running);
}
}
/// Closes the `AudioContext`, releasing the system resources being used.
///
/// This will not automatically release all `AudioContext`-created objects, but will suspend
/// the progression of the currentTime, and stop processing audio data.
///
/// This function operates synchronously and might block the current thread. An async version
/// is currently not implemented.
///
/// # Panics
///
/// Will panic when this function is called multiple times
// false positive due to #[cfg(not(test))]
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn close_sync(&self) {
#[cfg(not(test))] // in tests, do not set up a cpal Stream
self.stream.lock().unwrap().take(); // will Drop
self.base.set_state(AudioContextState::Closed);
}
/// Creates a `MediaStreamAudioSourceNode` from a [`MediaStream`]
#[must_use]
pub fn create_media_stream_source<M: MediaStream>(
&self,
media: M,
) -> node::MediaStreamAudioSourceNode {
let opts = node::MediaStreamAudioSourceOptions {
media_stream: media,
};
node::MediaStreamAudioSourceNode::new(self.base(), opts)
}
/// Creates a `MediaStreamAudioDestinationNode`
#[must_use]
pub fn create_media_stream_destination(&self) -> node::MediaStreamAudioDestinationNode {
let opts = ChannelConfigOptions::default();
node::MediaStreamAudioDestinationNode::new(self.base(), opts)
}
}