web_audio_api/context/
online.rs

1//! The `AudioContext` type and constructor options
2use std::error::Error;
3use std::sync::Mutex;
4
5use crate::context::{AudioContextState, BaseAudioContext, ConcreteBaseAudioContext};
6use crate::events::{EventDispatch, EventHandler, EventLoop, EventPayload, EventType};
7use crate::io::{self, AudioBackendManager, ControlThreadInit, NoneBackend, RenderThreadInit};
8use crate::media_devices::{enumerate_devices_sync, MediaDeviceInfoKind};
9use crate::media_streams::{MediaStream, MediaStreamTrack};
10use crate::message::{ControlMessage, OneshotNotify};
11use crate::node::{self, AudioNodeOptions};
12use crate::render::graph::Graph;
13use crate::MediaElement;
14use crate::{AudioRenderCapacity, Event};
15
16use futures_channel::oneshot;
17
18/// Check if the provided sink_id is available for playback
19///
20/// It should be "", "none" or a valid output `sinkId` returned from [`enumerate_devices_sync`]
21fn is_valid_sink_id(sink_id: &str) -> bool {
22    if sink_id.is_empty() || sink_id == "none" {
23        true
24    } else {
25        enumerate_devices_sync()
26            .into_iter()
27            .filter(|d| d.kind() == MediaDeviceInfoKind::AudioOutput)
28            .any(|d| d.device_id() == sink_id)
29    }
30}
31
32/// Identify the type of playback, which affects tradeoffs
33/// between audio output latency and power consumption
34#[derive(Copy, Clone, Debug)]
35pub enum AudioContextLatencyCategory {
36    /// Balance audio output latency and power consumption.
37    Balanced,
38    /// Provide the lowest audio output latency possible without glitching. This is the default.
39    Interactive,
40    /// Prioritize sustained playback without interruption over audio output latency.
41    ///
42    /// Lowest power consumption.
43    Playback,
44    /// Specify the number of seconds of latency
45    ///
46    /// This latency is not guaranteed to be applied, it depends on the audio hardware capabilities
47    Custom(f64),
48}
49
50impl Default for AudioContextLatencyCategory {
51    fn default() -> Self {
52        Self::Interactive
53    }
54}
55
56#[derive(Copy, Clone, Debug)]
57#[non_exhaustive]
58/// This allows users to ask for a particular render quantum size.
59///
60/// Currently, only the default value is available
61pub enum AudioContextRenderSizeCategory {
62    /// The default value of 128 frames
63    Default,
64}
65
66impl Default for AudioContextRenderSizeCategory {
67    fn default() -> Self {
68        Self::Default
69    }
70}
71
72/// Specify the playback configuration for the [`AudioContext`] constructor.
73///
74/// All fields are optional and will default to the value best suited for interactive playback on
75/// your hardware configuration.
76///
77/// For future compatibility, it is best to construct a default implementation of this struct and
78/// set the fields you would like to override:
79/// ```
80/// use web_audio_api::context::AudioContextOptions;
81///
82/// // Request a sample rate of 44.1 kHz, leave other fields to their default values
83/// let opts = AudioContextOptions {
84///     sample_rate: Some(44100.),
85///     ..AudioContextOptions::default()
86/// };
87#[derive(Clone, Debug, Default)]
88pub struct AudioContextOptions {
89    /// Identify the type of playback, which affects tradeoffs between audio output latency and
90    /// power consumption.
91    pub latency_hint: AudioContextLatencyCategory,
92
93    /// Sample rate of the audio context and audio output hardware. Use `None` for a default value.
94    pub sample_rate: Option<f32>,
95
96    /// The audio output device
97    /// - use `""` for the default audio output device
98    /// - use `"none"` to process the audio graph without playing through an audio output device.
99    /// - use `"sinkId"` to use the specified audio sink id, obtained with [`enumerate_devices_sync`]
100    pub sink_id: String,
101
102    /// Option to request a default, optimized or specific render quantum size. It is a hint that might not be honored.
103    pub render_size_hint: AudioContextRenderSizeCategory,
104}
105
106/// This interface represents an audio graph whose `AudioDestinationNode` is routed to a real-time
107/// output device that produces a signal directed at the user.
108// the naming comes from the web audio specification
109#[allow(clippy::module_name_repetitions)]
110pub struct AudioContext {
111    /// represents the underlying `BaseAudioContext`
112    base: ConcreteBaseAudioContext,
113    /// audio backend (play/pause functionality)
114    backend_manager: Mutex<Box<dyn AudioBackendManager>>,
115    /// Provider for rendering performance metrics
116    render_capacity: AudioRenderCapacity,
117    /// Initializer for the render thread (when restart is required)
118    render_thread_init: RenderThreadInit,
119}
120
121impl std::fmt::Debug for AudioContext {
122    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
123        f.debug_struct("AudioContext")
124            .field("sink_id", &self.sink_id())
125            .field("base_latency", &self.base_latency())
126            .field("output_latency", &self.output_latency())
127            .field("base", &self.base())
128            .finish_non_exhaustive()
129    }
130}
131
132impl Drop for AudioContext {
133    fn drop(&mut self) {
134        // Continue playing the stream if the AudioContext goes out of scope
135        if self.state() == AudioContextState::Running {
136            let tombstone = Box::new(NoneBackend::void());
137            let original = std::mem::replace(self.backend_manager.get_mut().unwrap(), tombstone);
138            Box::leak(original);
139        }
140    }
141}
142
143impl BaseAudioContext for AudioContext {
144    fn base(&self) -> &ConcreteBaseAudioContext {
145        &self.base
146    }
147}
148
149impl Default for AudioContext {
150    fn default() -> Self {
151        Self::new(AudioContextOptions::default())
152    }
153}
154
155impl AudioContext {
156    /// Creates and returns a new `AudioContext` object.
157    ///
158    /// This will play live audio on the default output device.
159    ///
160    /// ```no_run
161    /// use web_audio_api::context::{AudioContext, AudioContextOptions};
162    ///
163    /// // Request a sample rate of 44.1 kHz and default latency (buffer size 128, if available)
164    /// let opts = AudioContextOptions {
165    ///     sample_rate: Some(44100.),
166    ///     ..AudioContextOptions::default()
167    /// };
168    ///
169    /// // Setup the audio context that will emit to your speakers
170    /// let context = AudioContext::new(opts);
171    ///
172    /// // Alternatively, use the default constructor to get the best settings for your hardware
173    /// // let context = AudioContext::default();
174    /// ```
175    ///
176    /// # Panics
177    ///
178    /// The `AudioContext` constructor will panic when an invalid `sinkId` is provided in the
179    /// `AudioContextOptions`. In a future version, a `try_new` constructor will be introduced that
180    /// never panics.
181    #[must_use]
182    pub fn new(options: AudioContextOptions) -> Self {
183        // https://webaudio.github.io/web-audio-api/#validating-sink-identifier
184        assert!(
185            is_valid_sink_id(&options.sink_id),
186            "NotFoundError - Invalid sinkId: {:?}",
187            options.sink_id
188        );
189
190        // Set up the audio output thread
191        let (control_thread_init, render_thread_init) = io::thread_init();
192        let backend = io::build_output(options, render_thread_init.clone());
193
194        let ControlThreadInit {
195            state,
196            frames_played,
197            ctrl_msg_send,
198            load_value_recv,
199            event_send,
200            event_recv,
201        } = control_thread_init;
202
203        // Construct the audio Graph and hand it to the render thread
204        let (node_id_producer, node_id_consumer) = llq::Queue::new().split();
205        let graph = Graph::new(node_id_producer);
206        let message = ControlMessage::Startup { graph };
207        ctrl_msg_send.send(message).unwrap();
208
209        // Set up the event loop thread that handles the events spawned by the render thread
210        let event_loop = EventLoop::new(event_recv);
211
212        // Put everything together in the BaseAudioContext (shared with offline context)
213        let base = ConcreteBaseAudioContext::new(
214            backend.sample_rate(),
215            backend.number_of_channels(),
216            state,
217            frames_played,
218            ctrl_msg_send,
219            event_send,
220            event_loop.clone(),
221            false,
222            node_id_consumer,
223        );
224
225        // Setup AudioRenderCapacity for this context
226        let base_clone = base.clone();
227        let render_capacity = AudioRenderCapacity::new(base_clone, load_value_recv);
228
229        // As the final step, spawn a thread for the event loop. If we do this earlier we may miss
230        // event handling of the initial events that are emitted right after render thread
231        // construction.
232        event_loop.run_in_thread();
233
234        Self {
235            base,
236            backend_manager: Mutex::new(backend),
237            render_capacity,
238            render_thread_init,
239        }
240    }
241
242    /// This represents the number of seconds of processing latency incurred by
243    /// the `AudioContext` passing the audio from the `AudioDestinationNode`
244    /// to the audio subsystem.
245    // We don't do any buffering between rendering the audio and sending
246    // it to the audio subsystem, so this value is zero. (see Gecko)
247    #[allow(clippy::unused_self)]
248    #[must_use]
249    pub fn base_latency(&self) -> f64 {
250        0.
251    }
252
253    /// The estimation in seconds of audio output latency, i.e., the interval
254    /// between the time the UA requests the host system to play a buffer and
255    /// the time at which the first sample in the buffer is actually processed
256    /// by the audio output device.
257    #[must_use]
258    #[allow(clippy::missing_panics_doc)]
259    pub fn output_latency(&self) -> f64 {
260        self.backend_manager.lock().unwrap().output_latency()
261    }
262
263    /// Identifier or the information of the current audio output device.
264    ///
265    /// The initial value is `""`, which means the default audio output device.
266    #[allow(clippy::missing_panics_doc)]
267    pub fn sink_id(&self) -> String {
268        self.backend_manager.lock().unwrap().sink_id().to_owned()
269    }
270
271    /// Returns an [`AudioRenderCapacity`] instance associated with an AudioContext.
272    #[must_use]
273    pub fn render_capacity(&self) -> AudioRenderCapacity {
274        self.render_capacity.clone()
275    }
276
277    /// Update the current audio output device.
278    ///
279    /// The provided `sink_id` string must match a device name `enumerate_devices_sync`.
280    ///
281    /// Supplying `"none"` for the `sink_id` will process the audio graph without playing through an
282    /// audio output device.
283    ///
284    /// This function operates synchronously and might block the current thread. An async version
285    /// is currently not implemented.
286    #[allow(clippy::needless_collect, clippy::missing_panics_doc)]
287    pub fn set_sink_id_sync(&self, sink_id: String) -> Result<(), Box<dyn Error>> {
288        log::debug!("SinkChange requested");
289        if self.sink_id() == sink_id {
290            log::debug!("SinkChange: no-op");
291            return Ok(()); // sink is already active
292        }
293
294        if !is_valid_sink_id(&sink_id) {
295            Err(format!("NotFoundError: invalid sinkId {sink_id}"))?;
296        };
297
298        log::debug!("SinkChange: locking backend manager");
299        let mut backend_manager_guard = self.backend_manager.lock().unwrap();
300        let original_state = self.state();
301        if original_state == AudioContextState::Closed {
302            log::debug!("SinkChange: context is closed");
303            return Ok(());
304        }
305
306        // Acquire exclusive lock on ctrl msg sender
307        log::debug!("SinkChange: locking message channel");
308        let ctrl_msg_send = self.base.lock_control_msg_sender();
309
310        // Flush out the ctrl msg receiver, cache
311        let mut pending_msgs: Vec<_> = self.render_thread_init.ctrl_msg_recv.try_iter().collect();
312
313        // Acquire the active audio graph from the current render thread, shutting it down
314        let graph = if matches!(pending_msgs.first(), Some(ControlMessage::Startup { .. })) {
315            // Handle the edge case where the previous backend was suspended for its entire lifetime.
316            // In this case, the `Startup` control message was never processed.
317            log::debug!("SinkChange: recover unstarted graph");
318
319            let msg = pending_msgs.remove(0);
320            match msg {
321                ControlMessage::Startup { graph } => graph,
322                _ => unreachable!(),
323            }
324        } else {
325            // Acquire the audio graph from the current render thread, shutting it down
326            log::debug!("SinkChange: recover graph from render thread");
327
328            let (graph_send, graph_recv) = crossbeam_channel::bounded(1);
329            let message = ControlMessage::CloseAndRecycle { sender: graph_send };
330            ctrl_msg_send.send(message).unwrap();
331            if original_state == AudioContextState::Suspended {
332                // We must wake up the render thread to be able to handle the shutdown.
333                // No new audio will be produced because it will receive the shutdown command first.
334                backend_manager_guard.resume();
335            }
336            graph_recv.recv().unwrap()
337        };
338
339        log::debug!("SinkChange: closing audio stream");
340        backend_manager_guard.close();
341
342        // hotswap the backend
343        let options = AudioContextOptions {
344            sample_rate: Some(self.sample_rate()),
345            latency_hint: AudioContextLatencyCategory::default(), // todo reuse existing setting
346            sink_id,
347            render_size_hint: AudioContextRenderSizeCategory::default(), // todo reuse existing setting
348        };
349        log::debug!("SinkChange: starting audio stream");
350        *backend_manager_guard = io::build_output(options, self.render_thread_init.clone());
351
352        // if the previous backend state was suspend, suspend the new one before shipping the graph
353        if original_state == AudioContextState::Suspended {
354            log::debug!("SinkChange: suspending audio stream");
355            backend_manager_guard.suspend();
356        }
357
358        // send the audio graph to the new render thread
359        let message = ControlMessage::Startup { graph };
360        ctrl_msg_send.send(message).unwrap();
361
362        // flush the cached msgs
363        pending_msgs
364            .into_iter()
365            .for_each(|m| self.base().send_control_msg(m));
366
367        // explicitly release the lock to prevent concurrent render threads
368        drop(backend_manager_guard);
369
370        // trigger event when all the work is done
371        let _ = self.base.send_event(EventDispatch::sink_change());
372
373        log::debug!("SinkChange: done");
374        Ok(())
375    }
376
377    /// Register callback to run when the audio sink has changed
378    ///
379    /// Only a single event handler is active at any time. Calling this method multiple times will
380    /// override the previous event handler.
381    pub fn set_onsinkchange<F: FnMut(Event) + Send + 'static>(&self, mut callback: F) {
382        let callback = move |_| {
383            callback(Event {
384                type_: "sinkchange",
385            })
386        };
387
388        self.base().set_event_handler(
389            EventType::SinkChange,
390            EventHandler::Multiple(Box::new(callback)),
391        );
392    }
393
394    /// Unset the callback to run when the audio sink has changed
395    pub fn clear_onsinkchange(&self) {
396        self.base().clear_event_handler(EventType::SinkChange);
397    }
398
399    #[allow(clippy::missing_panics_doc)]
400    #[doc(hidden)] // Method signature might change in the future
401    pub fn run_diagnostics<F: Fn(String) + Send + 'static>(&self, callback: F) {
402        let mut buffer = Vec::with_capacity(32 * 1024);
403        {
404            let backend = self.backend_manager.lock().unwrap();
405            use std::io::Write;
406            writeln!(&mut buffer, "backend: {}", backend.name()).ok();
407            writeln!(&mut buffer, "sink id: {}", backend.sink_id()).ok();
408            writeln!(
409                &mut buffer,
410                "output latency: {:.6}",
411                backend.output_latency()
412            )
413            .ok();
414        }
415        let callback = move |v| match v {
416            EventPayload::Diagnostics(v) => {
417                let s = String::from_utf8(v).unwrap();
418                callback(s);
419            }
420            _ => unreachable!(),
421        };
422
423        self.base().set_event_handler(
424            EventType::Diagnostics,
425            EventHandler::Once(Box::new(callback)),
426        );
427
428        self.base()
429            .send_control_msg(ControlMessage::RunDiagnostics { buffer });
430    }
431
432    /// Suspends the progression of time in the audio context.
433    ///
434    /// This will temporarily halt audio hardware access and reducing CPU/battery usage in the
435    /// process.
436    ///
437    /// # Panics
438    ///
439    /// Will panic if:
440    ///
441    /// * The audio device is not available
442    /// * For a `BackendSpecificError`
443    pub async fn suspend(&self) {
444        // Don't lock the backend manager because we can't hold is across the await point
445        log::debug!("Suspend called");
446
447        if self.state() != AudioContextState::Running {
448            log::debug!("Suspend no-op - context is not running");
449            return;
450        }
451
452        // Pause rendering via a control message
453        let (sender, receiver) = oneshot::channel();
454        let notify = OneshotNotify::Async(sender);
455        self.base
456            .send_control_msg(ControlMessage::Suspend { notify });
457
458        // Wait for the render thread to have processed the suspend message.
459        // The AudioContextState will be updated by the render thread.
460        log::debug!("Suspending audio graph, waiting for signal..");
461        receiver.await.unwrap();
462
463        // Then ask the audio host to suspend the stream
464        log::debug!("Suspended audio graph. Suspending audio stream..");
465        self.backend_manager.lock().unwrap().suspend();
466
467        log::debug!("Suspended audio stream");
468    }
469
470    /// Resumes the progression of time in an audio context that has previously been
471    /// suspended/paused.
472    ///
473    /// # Panics
474    ///
475    /// Will panic if:
476    ///
477    /// * The audio device is not available
478    /// * For a `BackendSpecificError`
479    pub async fn resume(&self) {
480        let (sender, receiver) = oneshot::channel();
481
482        {
483            // Lock the backend manager mutex to avoid concurrent calls
484            log::debug!("Resume called, locking backend manager");
485            let backend_manager_guard = self.backend_manager.lock().unwrap();
486
487            if self.state() != AudioContextState::Suspended {
488                log::debug!("Resume no-op - context is not suspended");
489                return;
490            }
491
492            // Ask the audio host to resume the stream
493            backend_manager_guard.resume();
494
495            // Then, ask to resume rendering via a control message
496            log::debug!("Resumed audio stream, waking audio graph");
497            let notify = OneshotNotify::Async(sender);
498            self.base
499                .send_control_msg(ControlMessage::Resume { notify });
500
501            // Drop the Mutex guard so we won't hold it across an await point
502        }
503
504        // Wait for the render thread to have processed the resume message
505        // The AudioContextState will be updated by the render thread.
506        receiver.await.unwrap();
507        log::debug!("Resumed audio graph");
508    }
509
510    /// Closes the `AudioContext`, releasing the system resources being used.
511    ///
512    /// This will not automatically release all `AudioContext`-created objects, but will suspend
513    /// the progression of the currentTime, and stop processing audio data.
514    ///
515    /// # Panics
516    ///
517    /// Will panic when this function is called multiple times
518    pub async fn close(&self) {
519        // Don't lock the backend manager because we can't hold is across the await point
520        log::debug!("Close called");
521
522        if self.state() == AudioContextState::Closed {
523            log::debug!("Close no-op - context is already closed");
524            return;
525        }
526
527        if self.state() == AudioContextState::Running {
528            // First, stop rendering via a control message
529            let (sender, receiver) = oneshot::channel();
530            let notify = OneshotNotify::Async(sender);
531            self.base.send_control_msg(ControlMessage::Close { notify });
532
533            // Wait for the render thread to have processed the suspend message.
534            // The AudioContextState will be updated by the render thread.
535            log::debug!("Suspending audio graph, waiting for signal..");
536            receiver.await.unwrap();
537        } else {
538            // if the context is not running, change the state manually
539            self.base.set_state(AudioContextState::Closed);
540        }
541
542        // Then ask the audio host to close the stream
543        log::debug!("Suspended audio graph. Closing audio stream..");
544        self.backend_manager.lock().unwrap().close();
545
546        // Stop the AudioRenderCapacity collection thread
547        self.render_capacity.stop();
548
549        log::debug!("Closed audio stream");
550    }
551
552    /// Suspends the progression of time in the audio context.
553    ///
554    /// This will temporarily halt audio hardware access and reducing CPU/battery usage in the
555    /// process.
556    ///
557    /// This function operates synchronously and blocks the current thread until the audio thread
558    /// has stopped processing.
559    ///
560    /// # Panics
561    ///
562    /// Will panic if:
563    ///
564    /// * The audio device is not available
565    /// * For a `BackendSpecificError`
566    pub fn suspend_sync(&self) {
567        // Lock the backend manager mutex to avoid concurrent calls
568        log::debug!("Suspend_sync called, locking backend manager");
569        let backend_manager_guard = self.backend_manager.lock().unwrap();
570
571        if self.state() != AudioContextState::Running {
572            log::debug!("Suspend_sync no-op - context is not running");
573            return;
574        }
575
576        // Pause rendering via a control message
577        let (sender, receiver) = crossbeam_channel::bounded(0);
578        let notify = OneshotNotify::Sync(sender);
579        self.base
580            .send_control_msg(ControlMessage::Suspend { notify });
581
582        // Wait for the render thread to have processed the suspend message.
583        // The AudioContextState will be updated by the render thread.
584        log::debug!("Suspending audio graph, waiting for signal..");
585        receiver.recv().ok();
586
587        // Then ask the audio host to suspend the stream
588        log::debug!("Suspended audio graph. Suspending audio stream..");
589        backend_manager_guard.suspend();
590
591        log::debug!("Suspended audio stream");
592    }
593
594    /// Resumes the progression of time in an audio context that has previously been
595    /// suspended/paused.
596    ///
597    /// This function operates synchronously and blocks the current thread until the audio thread
598    /// has started processing again.
599    ///
600    /// # Panics
601    ///
602    /// Will panic if:
603    ///
604    /// * The audio device is not available
605    /// * For a `BackendSpecificError`
606    pub fn resume_sync(&self) {
607        // Lock the backend manager mutex to avoid concurrent calls
608        log::debug!("Resume_sync called, locking backend manager");
609        let backend_manager_guard = self.backend_manager.lock().unwrap();
610
611        if self.state() != AudioContextState::Suspended {
612            log::debug!("Resume no-op - context is not suspended");
613            return;
614        }
615
616        // Ask the audio host to resume the stream
617        backend_manager_guard.resume();
618
619        // Then, ask to resume rendering via a control message
620        log::debug!("Resumed audio stream, waking audio graph");
621        let (sender, receiver) = crossbeam_channel::bounded(0);
622        let notify = OneshotNotify::Sync(sender);
623        self.base
624            .send_control_msg(ControlMessage::Resume { notify });
625
626        // Wait for the render thread to have processed the resume message
627        // The AudioContextState will be updated by the render thread.
628        receiver.recv().ok();
629        log::debug!("Resumed audio graph");
630    }
631
632    /// Closes the `AudioContext`, releasing the system resources being used.
633    ///
634    /// This will not automatically release all `AudioContext`-created objects, but will suspend
635    /// the progression of the currentTime, and stop processing audio data.
636    ///
637    /// This function operates synchronously and blocks the current thread until the audio thread
638    /// has stopped processing.
639    ///
640    /// # Panics
641    ///
642    /// Will panic when this function is called multiple times
643    pub fn close_sync(&self) {
644        // Lock the backend manager mutex to avoid concurrent calls
645        log::debug!("Close_sync called, locking backend manager");
646        let backend_manager_guard = self.backend_manager.lock().unwrap();
647
648        if self.state() == AudioContextState::Closed {
649            log::debug!("Close no-op - context is already closed");
650            return;
651        }
652
653        // First, stop rendering via a control message
654        if self.state() == AudioContextState::Running {
655            let (sender, receiver) = crossbeam_channel::bounded(0);
656            let notify = OneshotNotify::Sync(sender);
657            self.base.send_control_msg(ControlMessage::Close { notify });
658
659            // Wait for the render thread to have processed the suspend message.
660            // The AudioContextState will be updated by the render thread.
661            log::debug!("Suspending audio graph, waiting for signal..");
662            receiver.recv().ok();
663        } else {
664            // if the context is not running, change the state manually
665            self.base.set_state(AudioContextState::Closed);
666        }
667
668        // Then ask the audio host to close the stream
669        log::debug!("Suspended audio graph. Closing audio stream..");
670        backend_manager_guard.close();
671
672        // Stop the AudioRenderCapacity collection thread
673        self.render_capacity.stop();
674
675        log::debug!("Closed audio stream");
676    }
677
678    /// Creates a [`MediaStreamAudioSourceNode`](node::MediaStreamAudioSourceNode) from a
679    /// [`MediaStream`]
680    #[must_use]
681    pub fn create_media_stream_source(
682        &self,
683        media: &MediaStream,
684    ) -> node::MediaStreamAudioSourceNode {
685        let opts = node::MediaStreamAudioSourceOptions {
686            media_stream: media,
687        };
688        node::MediaStreamAudioSourceNode::new(self, opts)
689    }
690
691    /// Creates a [`MediaStreamAudioDestinationNode`](node::MediaStreamAudioDestinationNode)
692    #[must_use]
693    pub fn create_media_stream_destination(&self) -> node::MediaStreamAudioDestinationNode {
694        let opts = AudioNodeOptions::default();
695        node::MediaStreamAudioDestinationNode::new(self, opts)
696    }
697
698    /// Creates a [`MediaStreamTrackAudioSourceNode`](node::MediaStreamTrackAudioSourceNode) from a
699    /// [`MediaStreamTrack`]
700    #[must_use]
701    pub fn create_media_stream_track_source(
702        &self,
703        media: &MediaStreamTrack,
704    ) -> node::MediaStreamTrackAudioSourceNode {
705        let opts = node::MediaStreamTrackAudioSourceOptions {
706            media_stream_track: media,
707        };
708        node::MediaStreamTrackAudioSourceNode::new(self, opts)
709    }
710
711    /// Creates a [`MediaElementAudioSourceNode`](node::MediaElementAudioSourceNode) from a
712    /// [`MediaElement`]
713    #[must_use]
714    pub fn create_media_element_source(
715        &self,
716        media_element: &mut MediaElement,
717    ) -> node::MediaElementAudioSourceNode {
718        let opts = node::MediaElementAudioSourceOptions { media_element };
719        node::MediaElementAudioSourceNode::new(self, opts)
720    }
721}
722
723#[cfg(test)]
724mod tests {
725    use super::*;
726    use futures::executor;
727
728    #[test]
729    fn test_suspend_resume_close() {
730        let options = AudioContextOptions {
731            sink_id: "none".into(),
732            ..AudioContextOptions::default()
733        };
734
735        // construct with 'none' sink_id
736        let context = AudioContext::new(options);
737
738        // allow some time to progress
739        std::thread::sleep(std::time::Duration::from_millis(1));
740
741        executor::block_on(context.suspend());
742        assert_eq!(context.state(), AudioContextState::Suspended);
743        let time1 = context.current_time();
744        assert!(time1 >= 0.);
745
746        // allow some time to progress
747        std::thread::sleep(std::time::Duration::from_millis(1));
748        let time2 = context.current_time();
749        assert_eq!(time1, time2); // no progression of time
750
751        executor::block_on(context.resume());
752        assert_eq!(context.state(), AudioContextState::Running);
753
754        // allow some time to progress
755        std::thread::sleep(std::time::Duration::from_millis(1));
756
757        let time3 = context.current_time();
758        assert!(time3 > time2); // time is progressing
759
760        executor::block_on(context.close());
761        assert_eq!(context.state(), AudioContextState::Closed);
762
763        let time4 = context.current_time();
764
765        // allow some time to progress
766        std::thread::sleep(std::time::Duration::from_millis(1));
767
768        let time5 = context.current_time();
769        assert_eq!(time5, time4); // no progression of time
770    }
771
772    fn require_send_sync<T: Send + Sync>(_: T) {}
773
774    #[test]
775    fn test_all_futures_thread_safe() {
776        let options = AudioContextOptions {
777            sink_id: "none".into(),
778            ..AudioContextOptions::default()
779        };
780        let context = AudioContext::new(options);
781
782        require_send_sync(context.suspend());
783        require_send_sync(context.resume());
784        require_send_sync(context.close());
785    }
786
787    #[test]
788    #[should_panic]
789    fn test_invalid_sink_id() {
790        let options = AudioContextOptions {
791            sink_id: "invalid".into(),
792            ..AudioContextOptions::default()
793        };
794        let _ = AudioContext::new(options);
795    }
796}