Skip to main content

web_audio_api/context/
online.rs

1//! The `AudioContext` type and constructor options
2use std::error::Error;
3use std::sync::atomic::{AtomicBool, Ordering};
4use std::sync::{Arc, Mutex};
5
6use crate::context::{AudioContextState, BaseAudioContext, ConcreteBaseAudioContext};
7use crate::events::{EventDispatch, EventHandler, EventLoop, EventPayload, EventType};
8use crate::io::{self, AudioBackendManager, ControlThreadInit, NoneBackend, RenderThreadInit};
9use crate::media_devices::{enumerate_devices_sync, MediaDeviceInfoKind};
10use crate::media_streams::{MediaStream, MediaStreamTrack};
11use crate::message::{ControlMessage, OneshotNotify};
12use crate::node::{self, AudioNodeOptions};
13use crate::render::graph::Graph;
14use crate::MediaElement;
15use crate::{AudioPlaybackStats, AudioRenderCapacity, Event};
16
17use futures_channel::oneshot;
18
19/// Check if the provided sink_id is available for playback
20///
21/// It should be "", "none" or a valid output `sinkId` returned from [`enumerate_devices_sync`]
22fn is_valid_sink_id(sink_id: &str) -> bool {
23    if sink_id.is_empty() || sink_id == "none" {
24        true
25    } else {
26        enumerate_devices_sync()
27            .into_iter()
28            .filter(|d| d.kind() == MediaDeviceInfoKind::AudioOutput)
29            .any(|d| d.device_id() == sink_id)
30    }
31}
32
33#[derive(Debug)]
34enum AudioContextError {
35    SinkNotFound { sink_id: String },
36    Backend { error: io::AudioBackendError },
37}
38
39impl std::fmt::Display for AudioContextError {
40    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
41        match self {
42            Self::SinkNotFound { sink_id } => {
43                write!(f, "NotFoundError - Invalid sinkId: {sink_id:?}")
44            }
45            Self::Backend { error } => write!(f, "InvalidStateError - {error}"),
46        }
47    }
48}
49
50impl Error for AudioContextError {}
51
52impl From<io::AudioBackendError> for AudioContextError {
53    fn from(error: io::AudioBackendError) -> Self {
54        Self::Backend { error }
55    }
56}
57
58/// Identify the type of playback, which affects tradeoffs
59/// between audio output latency and power consumption
60#[derive(Copy, Clone, Debug, Default)]
61pub enum AudioContextLatencyCategory {
62    /// Balance audio output latency and power consumption.
63    Balanced,
64    /// Provide the lowest audio output latency possible without glitching. This is the default.
65    #[default]
66    Interactive,
67    /// Prioritize sustained playback without interruption over audio output latency.
68    ///
69    /// Lowest power consumption.
70    Playback,
71    /// Specify the number of seconds of latency
72    ///
73    /// This latency is not guaranteed to be applied, it depends on the audio hardware capabilities
74    Custom(f64),
75}
76
77#[derive(Copy, Clone, Debug)]
78#[non_exhaustive]
79/// This allows users to ask for a particular render quantum size.
80///
81/// Currently, only the default value is available
82#[derive(Default)]
83pub enum AudioContextRenderSizeCategory {
84    /// The default value of 128 frames
85    #[default]
86    Default,
87}
88
89/// Specify the playback configuration for the [`AudioContext`] constructor.
90///
91/// All fields are optional and will default to the value best suited for interactive playback on
92/// your hardware configuration.
93///
94/// For future compatibility, it is best to construct a default implementation of this struct and
95/// set the fields you would like to override:
96/// ```
97/// use web_audio_api::context::AudioContextOptions;
98///
99/// // Request a sample rate of 44.1 kHz, leave other fields to their default values
100/// let opts = AudioContextOptions {
101///     sample_rate: Some(44100.),
102///     ..AudioContextOptions::default()
103/// };
104#[derive(Clone, Debug, Default)]
105pub struct AudioContextOptions {
106    /// Identify the type of playback, which affects tradeoffs between audio output latency and
107    /// power consumption.
108    pub latency_hint: AudioContextLatencyCategory,
109
110    /// Sample rate of the audio context and audio output hardware. Use `None` for a default value.
111    pub sample_rate: Option<f32>,
112
113    /// The audio output device
114    /// - use `""` for the default audio output device
115    /// - use `"none"` to process the audio graph without playing through an audio output device.
116    /// - use `"sinkId"` to use the specified audio sink id, obtained with [`enumerate_devices_sync`]
117    pub sink_id: String,
118
119    /// Option to request a default, optimized or specific render quantum size. It is a hint that might not be honored.
120    pub render_size_hint: AudioContextRenderSizeCategory,
121}
122
123/// This interface represents an audio graph whose `AudioDestinationNode` is routed to a real-time
124/// output device that produces a signal directed at the user.
125// the naming comes from the web audio specification
126#[allow(clippy::module_name_repetitions)]
127pub struct AudioContext {
128    /// represents the underlying `BaseAudioContext`
129    base: ConcreteBaseAudioContext,
130    /// audio backend (play/pause functionality)
131    backend_manager: Mutex<Box<dyn AudioBackendManager>>,
132    /// Provider for rendering performance metrics
133    render_capacity: AudioRenderCapacity,
134    /// Provider for playback statistics
135    playback_stats: AudioPlaybackStats,
136    /// true while the render thread has not yet processed its initial Startup message
137    startup_pending: std::sync::Arc<AtomicBool>,
138    /// Initializer for the render thread (when restart is required)
139    render_thread_init: RenderThreadInit,
140}
141
142impl std::fmt::Debug for AudioContext {
143    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
144        f.debug_struct("AudioContext")
145            .field("sink_id", &self.sink_id())
146            .field("base_latency", &self.base_latency())
147            .field("output_latency", &self.output_latency())
148            .field("base", &self.base())
149            .finish_non_exhaustive()
150    }
151}
152
153impl Drop for AudioContext {
154    fn drop(&mut self) {
155        // Continue playing the stream if the AudioContext goes out of scope
156        if self.state() == AudioContextState::Running {
157            let tombstone = Box::new(NoneBackend::void());
158            let original = std::mem::replace(self.backend_manager.get_mut().unwrap(), tombstone);
159            Box::leak(original);
160        }
161    }
162}
163
164impl BaseAudioContext for AudioContext {
165    fn base(&self) -> &ConcreteBaseAudioContext {
166        &self.base
167    }
168}
169
170impl Default for AudioContext {
171    fn default() -> Self {
172        Self::new(AudioContextOptions::default())
173    }
174}
175
176impl AudioContext {
177    /// Creates and returns a new `AudioContext` object.
178    ///
179    /// This will play live audio on the default output device.
180    ///
181    /// ```no_run
182    /// use web_audio_api::context::{AudioContext, AudioContextOptions};
183    ///
184    /// // Request a sample rate of 44.1 kHz and default latency (buffer size 128, if available)
185    /// let opts = AudioContextOptions {
186    ///     sample_rate: Some(44100.),
187    ///     ..AudioContextOptions::default()
188    /// };
189    ///
190    /// // Setup the audio context that will emit to your speakers
191    /// let context = AudioContext::new(opts);
192    ///
193    /// // Alternatively, use the default constructor to get the best settings for your hardware
194    /// // let context = AudioContext::default();
195    /// ```
196    ///
197    /// # Panics
198    ///
199    /// The `AudioContext` constructor will panic when an invalid `sinkId` is provided in the
200    /// `AudioContextOptions`, or when the selected audio backend cannot create or start the output
201    /// stream. Use [`Self::try_new`] to handle these errors without panicking.
202    #[must_use]
203    pub fn new(options: AudioContextOptions) -> Self {
204        Self::try_new_inner(options).unwrap_or_else(|e| panic!("{e}"))
205    }
206
207    /// Creates and returns a new `AudioContext` object.
208    ///
209    /// This will play live audio on the requested output device and returns backend errors instead
210    /// of panicking when the stream cannot be created.
211    ///
212    /// # Errors
213    ///
214    /// Returns an error when the sink id is invalid or when the selected audio backend cannot
215    /// create or start the output stream.
216    pub fn try_new(options: AudioContextOptions) -> Result<Self, Box<dyn Error>> {
217        Self::try_new_inner(options).map_err(Into::into)
218    }
219
220    fn try_new_inner(options: AudioContextOptions) -> Result<Self, AudioContextError> {
221        // https://webaudio.github.io/web-audio-api/#validating-sink-identifier
222        if !is_valid_sink_id(&options.sink_id) {
223            return Err(AudioContextError::SinkNotFound {
224                sink_id: options.sink_id,
225            });
226        }
227
228        // Set up the audio output thread
229        let (control_thread_init, render_thread_init) = io::thread_init();
230        let startup_pending = Arc::clone(&render_thread_init.startup_pending);
231        let backend = io::build_output(options, render_thread_init.clone())?;
232
233        let ControlThreadInit {
234            state,
235            frames_played,
236            stats,
237            ctrl_msg_send,
238            event_send,
239            event_recv,
240        } = control_thread_init;
241
242        // Construct the audio Graph and hand it to the render thread
243        let (node_id_producer, node_id_consumer) = llq::Queue::new().split();
244        let graph = Graph::new(node_id_producer);
245        let message = ControlMessage::Startup { graph };
246        ctrl_msg_send.send(message).unwrap();
247
248        // Set up the event loop thread that handles the events spawned by the render thread
249        let event_loop = EventLoop::new(event_recv);
250
251        // Put everything together in the BaseAudioContext (shared with offline context)
252        let base = ConcreteBaseAudioContext::new(
253            backend.sample_rate(),
254            backend.number_of_channels(),
255            state,
256            frames_played,
257            ctrl_msg_send,
258            event_send,
259            event_loop.clone(),
260            false,
261            node_id_consumer,
262        );
263
264        // Setup AudioRenderCapacity for this context
265        let render_capacity = AudioRenderCapacity::new(base.clone(), stats.clone());
266        let playback_stats = AudioPlaybackStats::new(base.clone(), stats);
267
268        // As the final step, spawn a thread for the event loop. If we do this earlier we may miss
269        // event handling of the initial events that are emitted right after render thread
270        // construction.
271        event_loop.run_in_thread();
272
273        Ok(Self {
274            base,
275            backend_manager: Mutex::new(backend),
276            render_capacity,
277            playback_stats,
278            startup_pending,
279            render_thread_init,
280        })
281    }
282
283    /// This represents the number of seconds of processing latency incurred by
284    /// the `AudioContext` passing the audio from the `AudioDestinationNode`
285    /// to the audio subsystem.
286    // We don't do any buffering between rendering the audio and sending
287    // it to the audio subsystem, so this value is zero. (see Gecko)
288    #[allow(clippy::unused_self)]
289    #[must_use]
290    pub fn base_latency(&self) -> f64 {
291        0.
292    }
293
294    /// The estimation in seconds of audio output latency, i.e., the interval
295    /// between the time the UA requests the host system to play a buffer and
296    /// the time at which the first sample in the buffer is actually processed
297    /// by the audio output device.
298    #[must_use]
299    #[allow(clippy::missing_panics_doc)]
300    pub fn output_latency(&self) -> f64 {
301        self.try_output_latency()
302            .unwrap_or_else(|e| panic!("InvalidStateError - {e}"))
303    }
304
305    /// The estimation in seconds of audio output latency.
306    ///
307    /// # Errors
308    ///
309    /// Returns an error when the selected audio backend cannot query the output latency.
310    fn try_output_latency(&self) -> Result<f64, Box<dyn Error>> {
311        Ok(self.backend_manager.lock().unwrap().output_latency()?)
312    }
313
314    /// Identifier or the information of the current audio output device.
315    ///
316    /// The initial value is `""`, which means the default audio output device.
317    #[allow(clippy::missing_panics_doc)]
318    pub fn sink_id(&self) -> String {
319        self.backend_manager.lock().unwrap().sink_id().to_owned()
320    }
321
322    /// Returns an [`AudioRenderCapacity`] instance associated with an AudioContext.
323    #[must_use]
324    pub fn render_capacity(&self) -> AudioRenderCapacity {
325        self.render_capacity.clone()
326    }
327
328    /// Returns an [`AudioPlaybackStats`] instance associated with this `AudioContext`.
329    #[must_use]
330    pub fn playback_stats(&self) -> AudioPlaybackStats {
331        self.playback_stats.clone()
332    }
333
334    /// Update the current audio output device.
335    ///
336    /// The provided `sink_id` string must match a device name `enumerate_devices_sync`.
337    ///
338    /// Supplying `"none"` for the `sink_id` will process the audio graph without playing through an
339    /// audio output device.
340    ///
341    /// This function operates synchronously and might block the current thread. An async version
342    /// is currently not implemented.
343    #[allow(clippy::needless_collect, clippy::missing_panics_doc)]
344    pub fn set_sink_id_sync(&self, sink_id: String) -> Result<(), Box<dyn Error>> {
345        log::debug!("SinkChange requested");
346        if self.sink_id() == sink_id {
347            log::debug!("SinkChange: no-op");
348            return Ok(()); // sink is already active
349        }
350
351        if !is_valid_sink_id(&sink_id) {
352            Err(format!("NotFoundError: invalid sinkId {sink_id}"))?;
353        };
354
355        log::debug!("SinkChange: locking backend manager");
356        let mut backend_manager_guard = self.backend_manager.lock().unwrap();
357        let original_state = self.state();
358        if original_state == AudioContextState::Closed {
359            log::debug!("SinkChange: context is closed");
360            return Ok(());
361        }
362
363        // Acquire exclusive lock on ctrl msg sender
364        log::debug!("SinkChange: locking message channel");
365        let ctrl_msg_send = self.base.lock_control_msg_sender();
366
367        // Flush out the ctrl msg receiver, cache
368        let mut pending_msgs: Vec<_> = self.render_thread_init.ctrl_msg_recv.try_iter().collect();
369
370        // Acquire the active audio graph from the current render thread, shutting it down
371        let graph = if matches!(pending_msgs.first(), Some(ControlMessage::Startup { .. })) {
372            // Handle the edge case where the previous backend was suspended for its entire lifetime.
373            // In this case, the `Startup` control message was never processed.
374            log::debug!("SinkChange: recover unstarted graph");
375
376            let msg = pending_msgs.remove(0);
377            match msg {
378                ControlMessage::Startup { graph } => graph,
379                _ => unreachable!(),
380            }
381        } else {
382            // Acquire the audio graph from the current render thread, shutting it down
383            log::debug!("SinkChange: recover graph from render thread");
384
385            let (graph_send, graph_recv) = crossbeam_channel::bounded(1);
386            let message = ControlMessage::CloseAndRecycle { sender: graph_send };
387            ctrl_msg_send.send(message).unwrap();
388            if original_state == AudioContextState::Suspended {
389                // We must wake up the render thread to be able to handle the shutdown.
390                // No new audio will be produced because it will receive the shutdown command first.
391                backend_manager_guard.resume()?;
392            }
393            graph_recv.recv().unwrap()
394        };
395
396        log::debug!("SinkChange: closing audio stream");
397        backend_manager_guard.close()?;
398
399        // hotswap the backend
400        let options = AudioContextOptions {
401            sample_rate: Some(self.sample_rate()),
402            latency_hint: AudioContextLatencyCategory::default(), // todo reuse existing setting
403            sink_id,
404            render_size_hint: AudioContextRenderSizeCategory::default(), // todo reuse existing setting
405        };
406        log::debug!("SinkChange: starting audio stream");
407        *backend_manager_guard = io::build_output(options, self.render_thread_init.clone())?;
408
409        // if the previous backend state was suspend, suspend the new one before shipping the graph
410        if original_state == AudioContextState::Suspended {
411            log::debug!("SinkChange: suspending audio stream");
412            backend_manager_guard.suspend()?;
413        }
414
415        // send the audio graph to the new render thread
416        let message = ControlMessage::Startup { graph };
417        ctrl_msg_send.send(message).unwrap();
418
419        // flush the cached msgs
420        pending_msgs
421            .into_iter()
422            .for_each(|m| self.base().send_control_msg(m));
423
424        // explicitly release the lock to prevent concurrent render threads
425        drop(backend_manager_guard);
426
427        // trigger event when all the work is done
428        let _ = self.base.send_event(EventDispatch::sink_change());
429
430        log::debug!("SinkChange: done");
431        Ok(())
432    }
433
434    /// Register callback to run when the audio sink has changed
435    ///
436    /// Only a single event handler is active at any time. Calling this method multiple times will
437    /// override the previous event handler.
438    pub fn set_onsinkchange<F: FnMut(Event) + Send + 'static>(&self, mut callback: F) {
439        let callback = move |_| {
440            callback(Event {
441                type_: "sinkchange",
442            })
443        };
444
445        self.base().set_event_handler(
446            EventType::SinkChange,
447            EventHandler::Multiple(Box::new(callback)),
448        );
449    }
450
451    /// Unset the callback to run when the audio sink has changed
452    pub fn clear_onsinkchange(&self) {
453        self.base().clear_event_handler(EventType::SinkChange);
454    }
455
456    #[allow(clippy::missing_panics_doc)]
457    #[doc(hidden)] // Method signature might change in the future
458    pub fn run_diagnostics<F: Fn(String) + Send + 'static>(&self, callback: F) {
459        let mut buffer = Vec::with_capacity(32 * 1024);
460        {
461            let backend = self.backend_manager.lock().unwrap();
462            use std::io::Write;
463            writeln!(&mut buffer, "backend: {}", backend.name()).ok();
464            writeln!(&mut buffer, "sink id: {}", backend.sink_id()).ok();
465            writeln!(
466                &mut buffer,
467                "output latency: {:.6}",
468                backend.output_latency().unwrap_or(0.)
469            )
470            .ok();
471        }
472        let callback = move |v| match v {
473            EventPayload::Diagnostics(v) => {
474                let s = String::from_utf8(v).unwrap();
475                callback(s);
476            }
477            _ => unreachable!(),
478        };
479
480        self.base().set_event_handler(
481            EventType::Diagnostics,
482            EventHandler::Once(Box::new(callback)),
483        );
484
485        self.base()
486            .send_control_msg(ControlMessage::RunDiagnostics { buffer });
487    }
488
489    /// Suspends the progression of time in the audio context.
490    ///
491    /// This will temporarily halt audio hardware access and reducing CPU/battery usage in the
492    /// process.
493    ///
494    /// # Panics
495    ///
496    /// Will panic if:
497    ///
498    /// * The audio device is not available
499    /// * For a `BackendSpecificError`
500    pub async fn suspend(&self) {
501        // Don't lock the backend manager because we can't hold is across the await point
502        log::debug!("Suspend called");
503
504        let state = self.state();
505        if state == AudioContextState::Closed {
506            log::debug!("Suspend no-op - context is closed");
507            return;
508        }
509
510        if state != AudioContextState::Running && !self.startup_pending.load(Ordering::Acquire) {
511            log::debug!("Suspend no-op - context is not running");
512            return;
513        }
514
515        // Pause rendering via a control message
516        let (sender, receiver) = oneshot::channel();
517        let notify = OneshotNotify::Async(sender);
518        self.base
519            .suspend_control_msgs(ControlMessage::Suspend { notify });
520
521        // Wait for the render thread to have processed the suspend message.
522        // The AudioContextState will be updated by the render thread.
523        log::debug!("Suspending audio graph, waiting for signal..");
524        receiver.await.unwrap();
525
526        // Then ask the audio host to suspend the stream
527        log::debug!("Suspended audio graph. Suspending audio stream..");
528        self.backend_manager
529            .lock()
530            .unwrap()
531            .suspend()
532            .unwrap_or_else(|e| panic!("InvalidStateError - {e}"));
533
534        log::debug!("Suspended audio stream");
535    }
536
537    /// Resumes the progression of time in an audio context that has previously been
538    /// suspended/paused.
539    ///
540    /// # Panics
541    ///
542    /// Will panic if:
543    ///
544    /// * The audio device is not available
545    /// * For a `BackendSpecificError`
546    pub async fn resume(&self) {
547        let (sender, receiver) = oneshot::channel();
548
549        {
550            // Lock the backend manager mutex to avoid concurrent calls
551            log::debug!("Resume called, locking backend manager");
552            let backend_manager_guard = self.backend_manager.lock().unwrap();
553
554            if self.state() != AudioContextState::Suspended {
555                log::debug!("Resume no-op - context is not suspended");
556                return;
557            }
558
559            // Ask the audio host to resume the stream
560            backend_manager_guard
561                .resume()
562                .unwrap_or_else(|e| panic!("InvalidStateError - {e}"));
563
564            // Then, ask to resume rendering via a control message
565            log::debug!("Resumed audio stream, waking audio graph");
566            let notify = OneshotNotify::Async(sender);
567            self.base
568                .resume_control_msgs(ControlMessage::Resume { notify });
569
570            // Drop the Mutex guard so we won't hold it across an await point
571        }
572
573        // Wait for the render thread to have processed the resume message
574        // The AudioContextState will be updated by the render thread.
575        receiver.await.unwrap();
576        log::debug!("Resumed audio graph");
577    }
578
579    /// Closes the `AudioContext`, releasing the system resources being used.
580    ///
581    /// This will not automatically release all `AudioContext`-created objects, but will suspend
582    /// the progression of the currentTime, and stop processing audio data.
583    ///
584    /// # Panics
585    ///
586    /// Will panic when this function is called multiple times
587    pub async fn close(&self) {
588        // Don't lock the backend manager because we can't hold is across the await point
589        log::debug!("Close called");
590
591        if self.state() == AudioContextState::Closed {
592            log::debug!("Close no-op - context is already closed");
593            return;
594        }
595
596        // Stop AudioRenderCapacity before closing so no capacity events are queued during shutdown.
597        self.render_capacity.stop();
598
599        if self.state() == AudioContextState::Running {
600            // First, stop rendering via a control message
601            let (sender, receiver) = oneshot::channel();
602            let notify = OneshotNotify::Async(sender);
603            self.base.send_control_msg(ControlMessage::Close { notify });
604
605            // Wait for the render thread to have processed the suspend message.
606            // The AudioContextState will be updated by the render thread.
607            log::debug!("Suspending audio graph, waiting for signal..");
608            receiver.await.unwrap();
609        } else {
610            // if the context is not running, change the state manually
611            self.base.set_state(AudioContextState::Closed);
612        }
613
614        // Then ask the audio host to close the stream
615        log::debug!("Suspended audio graph. Closing audio stream..");
616        self.backend_manager
617            .lock()
618            .unwrap()
619            .close()
620            .unwrap_or_else(|e| panic!("InvalidStateError - {e}"));
621
622        log::debug!("Closed audio stream");
623    }
624
625    /// Suspends the progression of time in the audio context.
626    ///
627    /// This will temporarily halt audio hardware access and reducing CPU/battery usage in the
628    /// process.
629    ///
630    /// This function operates synchronously and blocks the current thread until the audio thread
631    /// has stopped processing.
632    ///
633    /// # Panics
634    ///
635    /// Will panic if:
636    ///
637    /// * The audio device is not available
638    /// * For a `BackendSpecificError`
639    pub fn suspend_sync(&self) {
640        // Lock the backend manager mutex to avoid concurrent calls
641        log::debug!("Suspend_sync called, locking backend manager");
642        let backend_manager_guard = self.backend_manager.lock().unwrap();
643
644        let state = self.state();
645        if state == AudioContextState::Closed {
646            log::debug!("Suspend_sync no-op - context is closed");
647            return;
648        }
649
650        if state != AudioContextState::Running && !self.startup_pending.load(Ordering::Acquire) {
651            log::debug!("Suspend_sync no-op - context is not running");
652            return;
653        }
654
655        // Pause rendering via a control message
656        let (sender, receiver) = crossbeam_channel::bounded(0);
657        let notify = OneshotNotify::Sync(sender);
658        self.base
659            .suspend_control_msgs(ControlMessage::Suspend { notify });
660
661        // Wait for the render thread to have processed the suspend message.
662        // The AudioContextState will be updated by the render thread.
663        log::debug!("Suspending audio graph, waiting for signal..");
664        receiver.recv().ok();
665
666        // Then ask the audio host to suspend the stream
667        log::debug!("Suspended audio graph. Suspending audio stream..");
668        backend_manager_guard
669            .suspend()
670            .unwrap_or_else(|e| panic!("InvalidStateError - {e}"));
671
672        log::debug!("Suspended audio stream");
673    }
674
675    /// Resumes the progression of time in an audio context that has previously been
676    /// suspended/paused.
677    ///
678    /// This function operates synchronously and blocks the current thread until the audio thread
679    /// has started processing again.
680    ///
681    /// # Panics
682    ///
683    /// Will panic if:
684    ///
685    /// * The audio device is not available
686    /// * For a `BackendSpecificError`
687    pub fn resume_sync(&self) {
688        // Lock the backend manager mutex to avoid concurrent calls
689        log::debug!("Resume_sync called, locking backend manager");
690        let backend_manager_guard = self.backend_manager.lock().unwrap();
691
692        if self.state() != AudioContextState::Suspended {
693            log::debug!("Resume no-op - context is not suspended");
694            return;
695        }
696
697        // Ask the audio host to resume the stream
698        backend_manager_guard
699            .resume()
700            .unwrap_or_else(|e| panic!("InvalidStateError - {e}"));
701
702        // Then, ask to resume rendering via a control message
703        log::debug!("Resumed audio stream, waking audio graph");
704        let (sender, receiver) = crossbeam_channel::bounded(0);
705        let notify = OneshotNotify::Sync(sender);
706        self.base
707            .resume_control_msgs(ControlMessage::Resume { notify });
708
709        // Wait for the render thread to have processed the resume message
710        // The AudioContextState will be updated by the render thread.
711        receiver.recv().ok();
712        log::debug!("Resumed audio graph");
713    }
714
715    /// Closes the `AudioContext`, releasing the system resources being used.
716    ///
717    /// This will not automatically release all `AudioContext`-created objects, but will suspend
718    /// the progression of the currentTime, and stop processing audio data.
719    ///
720    /// This function operates synchronously and blocks the current thread until the audio thread
721    /// has stopped processing.
722    ///
723    /// # Panics
724    ///
725    /// Will panic when this function is called multiple times
726    pub fn close_sync(&self) {
727        // Lock the backend manager mutex to avoid concurrent calls
728        log::debug!("Close_sync called, locking backend manager");
729        let backend_manager_guard = self.backend_manager.lock().unwrap();
730
731        if self.state() == AudioContextState::Closed {
732            log::debug!("Close no-op - context is already closed");
733            return;
734        }
735
736        // Stop AudioRenderCapacity before closing so no capacity events are queued during shutdown.
737        self.render_capacity.stop();
738
739        // First, stop rendering via a control message
740        if self.state() == AudioContextState::Running {
741            let (sender, receiver) = crossbeam_channel::bounded(0);
742            let notify = OneshotNotify::Sync(sender);
743            self.base.send_control_msg(ControlMessage::Close { notify });
744
745            // Wait for the render thread to have processed the suspend message.
746            // The AudioContextState will be updated by the render thread.
747            log::debug!("Suspending audio graph, waiting for signal..");
748            receiver.recv().ok();
749        } else {
750            // if the context is not running, change the state manually
751            self.base.set_state(AudioContextState::Closed);
752        }
753
754        // Then ask the audio host to close the stream
755        log::debug!("Suspended audio graph. Closing audio stream..");
756        backend_manager_guard
757            .close()
758            .unwrap_or_else(|e| panic!("InvalidStateError - {e}"));
759
760        log::debug!("Closed audio stream");
761    }
762
763    /// Creates a [`MediaStreamAudioSourceNode`](node::MediaStreamAudioSourceNode) from a
764    /// [`MediaStream`]
765    #[must_use]
766    pub fn create_media_stream_source(
767        &self,
768        media: &MediaStream,
769    ) -> node::MediaStreamAudioSourceNode {
770        let opts = node::MediaStreamAudioSourceOptions {
771            media_stream: media,
772        };
773        node::MediaStreamAudioSourceNode::new(self, opts)
774    }
775
776    /// Creates a [`MediaStreamAudioDestinationNode`](node::MediaStreamAudioDestinationNode)
777    #[must_use]
778    pub fn create_media_stream_destination(&self) -> node::MediaStreamAudioDestinationNode {
779        let opts = AudioNodeOptions::default();
780        node::MediaStreamAudioDestinationNode::new(self, opts)
781    }
782
783    /// Creates a [`MediaStreamTrackAudioSourceNode`](node::MediaStreamTrackAudioSourceNode) from a
784    /// [`MediaStreamTrack`]
785    #[must_use]
786    pub fn create_media_stream_track_source(
787        &self,
788        media: &MediaStreamTrack,
789    ) -> node::MediaStreamTrackAudioSourceNode {
790        let opts = node::MediaStreamTrackAudioSourceOptions {
791            media_stream_track: media,
792        };
793        node::MediaStreamTrackAudioSourceNode::new(self, opts)
794    }
795
796    /// Creates a [`MediaElementAudioSourceNode`](node::MediaElementAudioSourceNode) from a
797    /// [`MediaElement`]
798    #[must_use]
799    pub fn create_media_element_source(
800        &self,
801        media_element: &mut MediaElement,
802    ) -> node::MediaElementAudioSourceNode {
803        let opts = node::MediaElementAudioSourceOptions { media_element };
804        node::MediaElementAudioSourceNode::new(self, opts)
805    }
806}
807
808#[cfg(test)]
809mod tests {
810    use super::*;
811    use futures::executor;
812
813    #[test]
814    fn test_suspend_resume_close() {
815        let options = AudioContextOptions {
816            sink_id: "none".into(),
817            ..AudioContextOptions::default()
818        };
819
820        // construct with 'none' sink_id
821        let context = AudioContext::new(options);
822
823        // Ensure startup has been processed before testing suspend/resume transitions.
824        executor::block_on(context.resume());
825        assert_eq!(context.state(), AudioContextState::Running);
826
827        executor::block_on(context.suspend());
828        assert_eq!(context.state(), AudioContextState::Suspended);
829        let time1 = context.current_time();
830        assert!(time1 >= 0.);
831
832        // allow some time to progress
833        std::thread::sleep(std::time::Duration::from_millis(1));
834        let time2 = context.current_time();
835        assert_eq!(time1, time2); // no progression of time
836
837        executor::block_on(context.resume());
838        assert_eq!(context.state(), AudioContextState::Running);
839
840        // allow some time to progress
841        std::thread::sleep(std::time::Duration::from_millis(1));
842
843        let time3 = context.current_time();
844        assert!(time3 > time2); // time is progressing
845
846        executor::block_on(context.close());
847        assert_eq!(context.state(), AudioContextState::Closed);
848
849        let time4 = context.current_time();
850
851        // allow some time to progress
852        std::thread::sleep(std::time::Duration::from_millis(1));
853
854        let time5 = context.current_time();
855        assert_eq!(time5, time4); // no progression of time
856    }
857
858    #[test]
859    fn test_suspend_during_startup() {
860        let options = AudioContextOptions {
861            sink_id: "none".into(),
862            ..AudioContextOptions::default()
863        };
864
865        let context = AudioContext::new(options);
866
867        executor::block_on(context.suspend());
868        assert_eq!(context.state(), AudioContextState::Suspended);
869
870        let time1 = context.current_time();
871        std::thread::sleep(std::time::Duration::from_millis(5));
872        let time2 = context.current_time();
873        assert_eq!(time1, time2);
874    }
875
876    #[test]
877    fn test_suspend_sync_during_startup() {
878        let options = AudioContextOptions {
879            sink_id: "none".into(),
880            ..AudioContextOptions::default()
881        };
882
883        let context = AudioContext::new(options);
884
885        context.suspend_sync();
886        assert_eq!(context.state(), AudioContextState::Suspended);
887
888        let time1 = context.current_time();
889        std::thread::sleep(std::time::Duration::from_millis(5));
890        let time2 = context.current_time();
891        assert_eq!(time1, time2);
892    }
893
894    fn require_send_sync<T: Send + Sync>(_: T) {}
895
896    #[test]
897    fn test_all_futures_thread_safe() {
898        let options = AudioContextOptions {
899            sink_id: "none".into(),
900            ..AudioContextOptions::default()
901        };
902        let context = AudioContext::new(options);
903
904        require_send_sync(context.suspend());
905        require_send_sync(context.resume());
906        require_send_sync(context.close());
907    }
908
909    #[test]
910    #[should_panic]
911    fn test_invalid_sink_id() {
912        let options = AudioContextOptions {
913            sink_id: "invalid".into(),
914            ..AudioContextOptions::default()
915        };
916        let _ = AudioContext::new(options);
917    }
918
919    #[test]
920    fn test_try_new_invalid_sink_id() {
921        let options = AudioContextOptions {
922            sink_id: "invalid".into(),
923            ..AudioContextOptions::default()
924        };
925
926        let error = AudioContext::try_new(options).unwrap_err();
927        assert_eq!(
928            error.to_string(),
929            "NotFoundError - Invalid sinkId: \"invalid\""
930        );
931    }
932}