firewheel_core/
node.rs

1use core::ops::Range;
2use core::time::Duration;
3use core::{any::Any, fmt::Debug, hash::Hash, num::NonZeroU32};
4
5#[cfg(not(feature = "std"))]
6use bevy_platform::prelude::{Box, Vec};
7
8use crate::dsp::buffer::ChannelBuffer;
9use crate::log::RealtimeLogger;
10use crate::{
11    channel_config::{ChannelConfig, ChannelCount},
12    clock::{DurationSamples, InstantSamples, InstantSeconds},
13    dsp::declick::DeclickValues,
14    event::{NodeEvent, NodeEventType, ProcEvents},
15    ConnectedMask, SilenceMask, StreamInfo,
16};
17
18pub mod dummy;
19
20#[cfg(feature = "scheduled_events")]
21use crate::clock::EventInstant;
22
23#[cfg(feature = "musical_transport")]
24use crate::clock::{InstantMusical, MusicalTransport};
25
26/// A globally unique identifier for a node.
27#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
28#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
29#[cfg_attr(feature = "bevy_reflect", reflect(opaque))]
30pub struct NodeID(pub thunderdome::Index);
31
32impl NodeID {
33    pub const DANGLING: Self = Self(thunderdome::Index::DANGLING);
34}
35
36impl Default for NodeID {
37    fn default() -> Self {
38        Self::DANGLING
39    }
40}
41
42/// Information about an [`AudioNode`].
43///
44/// This struct enforces the use of the builder pattern for future-proofness, as
45/// it is likely that more fields will be added in the future.
46#[derive(Debug)]
47pub struct AudioNodeInfo {
48    debug_name: &'static str,
49    channel_config: ChannelConfig,
50    call_update_method: bool,
51    custom_state: Option<Box<dyn Any>>,
52    latency_frames: u32,
53}
54
55impl AudioNodeInfo {
56    /// Construct a new [`AudioNodeInfo`] builder struct.
57    pub const fn new() -> Self {
58        Self {
59            debug_name: "unnamed",
60            channel_config: ChannelConfig {
61                num_inputs: ChannelCount::ZERO,
62                num_outputs: ChannelCount::ZERO,
63            },
64            call_update_method: false,
65            custom_state: None,
66            latency_frames: 0,
67        }
68    }
69
70    /// A unique name for this type of node, used for debugging purposes.
71    pub const fn debug_name(mut self, debug_name: &'static str) -> Self {
72        self.debug_name = debug_name;
73        self
74    }
75
76    /// The channel configuration of this node.
77    ///
78    /// By default this has a channel configuration with zero input and output
79    /// channels.
80    ///
81    /// WARNING: Audio nodes *MUST* either completely fill all output buffers
82    /// with data, or return [`ProcessStatus::ClearAllOutputs`]/[`ProcessStatus::Bypass`].
83    /// Failing to do this will result in audio glitches.
84    pub const fn channel_config(mut self, channel_config: ChannelConfig) -> Self {
85        self.channel_config = channel_config;
86        self
87    }
88
89    /// Set to `true` if this node wishes to have the Firewheel context call
90    /// [`AudioNode::update`] on every update cycle.
91    ///
92    /// By default this is set to `false`.
93    pub const fn call_update_method(mut self, call_update_method: bool) -> Self {
94        self.call_update_method = call_update_method;
95        self
96    }
97
98    /// Custom `!Send` state that can be stored in the Firewheel context and accessed
99    /// by the user.
100    ///
101    /// The user accesses this state via `FirewheelCtx::node_state` and
102    /// `FirewheelCtx::node_state_mut`.
103    pub fn custom_state<T: 'static>(mut self, custom_state: T) -> Self {
104        self.custom_state = Some(Box::new(custom_state));
105        self
106    }
107
108    /// Set the latency of this node in frames (samples in a single channel of audio).
109    ///
110    /// By default this is set to `0`.
111    pub const fn latency_frames(mut self, latency_frames: u32) -> Self {
112        self.latency_frames = latency_frames;
113        self
114    }
115}
116
117impl Default for AudioNodeInfo {
118    fn default() -> Self {
119        Self::new()
120    }
121}
122
123impl From<AudioNodeInfo> for AudioNodeInfoInner {
124    fn from(value: AudioNodeInfo) -> Self {
125        AudioNodeInfoInner {
126            debug_name: value.debug_name,
127            channel_config: value.channel_config,
128            call_update_method: value.call_update_method,
129            custom_state: value.custom_state,
130            latency_frames: value.latency_frames,
131        }
132    }
133}
134
135/// Information about an [`AudioNode`]. Used internally by the Firewheel context.
136#[derive(Debug)]
137pub struct AudioNodeInfoInner {
138    pub debug_name: &'static str,
139    pub channel_config: ChannelConfig,
140    pub call_update_method: bool,
141    pub custom_state: Option<Box<dyn Any>>,
142    pub latency_frames: u32,
143}
144
145/// A trait representing a node in a Firewheel audio graph.
146///
147/// # Notes about ECS
148///
149/// In order to be friendlier to ECS's (entity component systems), it is encouraged
150/// that any struct deriving this trait be POD (plain ol' data). If you want your
151/// audio node to be usable in the Bevy game engine, also derive
152/// `bevy_ecs::prelude::Component`. (You can hide this derive behind a feature flag
153/// by using `#[cfg_attr(feature = "bevy", derive(bevy_ecs::prelude::Component))]`).
154///
155/// # Audio Node Lifecycle
156///
157/// 1. The user constructs the node as POD or from a custom constructor method for
158/// that node.
159/// 2. The user adds the node to the graph using `FirewheelCtx::add_node`. If the
160/// node has any custom configuration, then the user passes that configuration to this
161/// method as well. In this method, the Firewheel context calls [`AudioNode::info`] to
162/// get information about the node. The node can also store any custom state in the
163/// [`AudioNodeInfo`] struct.
164/// 3. At this point the user may now call `FirewheelCtx::node_state` and
165/// `FirewheelCtx::node_state_mut` to retrieve the node's custom state.
166/// 4. If [`AudioNodeInfo::call_update_method`] was set to `true`, then
167/// [`AudioNode::update`] will be called every time the Firewheel context updates.
168/// The node's custom state is also accessible in this method.
169/// 5. When the Firewheel context is ready for the node to start processing data,
170/// it calls [`AudioNode::construct_processor`] to retrieve the realtime
171/// [`AudioNodeProcessor`] counterpart of the node. This processor counterpart is
172/// then sent to the audio thread.
173/// 6. The Firewheel processor calls [`AudioNodeProcessor::process`] whenever there
174/// is a new block of audio data to process.
175/// > WARNING: Audio nodes *MUST* either completely fill all output buffers
176/// with data, or return [`ProcessStatus::ClearAllOutputs`]/[`ProcessStatus::Bypass`].
177/// Failing to do this will result in audio glitches.
178/// 7. (Graceful shutdown)
179///
180///     7a. The Firewheel processor calls [`AudioNodeProcessor::stream_stopped`].
181/// The processor is then sent back to the main thread.
182///
183///     7b. If a new audio stream is started, then the context will call
184/// [`AudioNodeProcessor::new_stream`] on the main thread, and then send the
185/// processor back to the audio thread for processing.
186///
187///     7c. If the Firewheel context is dropped before a new stream is started, then
188/// both the node and the processor counterpart are dropped.
189/// 8. (Audio thread crashes or stops unexpectedly) - The node's processor counterpart
190/// may or may not be dropped. The user may try to create a new audio stream, in which
191/// case [`AudioNode::construct_processor`] might be called again. If a second processor
192/// instance is not able to be created, then the node may panic.
193pub trait AudioNode {
194    /// A type representing this constructor's configuration.
195    ///
196    /// This is intended as a one-time configuration to be used
197    /// when constructing an audio node. When no configuration
198    /// is required, [`EmptyConfig`] should be used.
199    type Configuration: Default;
200
201    /// Get information about this node.
202    ///
203    /// This method is only called once after the node is added to the audio graph.
204    fn info(&self, configuration: &Self::Configuration) -> AudioNodeInfo;
205
206    /// Construct a realtime processor for this node.
207    ///
208    /// * `configuration` - The custom configuration of this node.
209    /// * `cx` - A context for interacting with the Firewheel context. This context
210    /// also includes information about the audio stream.
211    fn construct_processor(
212        &self,
213        configuration: &Self::Configuration,
214        cx: ConstructProcessorContext,
215    ) -> impl AudioNodeProcessor;
216
217    /// If [`AudioNodeInfo::call_update_method`] was set to `true`, then the Firewheel
218    /// context will call this method on every update cycle.
219    ///
220    /// * `configuration` - The custom configuration of this node.
221    /// * `cx` - A context for interacting with the Firewheel context.
222    fn update(&mut self, configuration: &Self::Configuration, cx: UpdateContext) {
223        let _ = configuration;
224        let _ = cx;
225    }
226}
227
228/// A context for [`AudioNode::construct_processor`].
229pub struct ConstructProcessorContext<'a> {
230    /// The ID of this audio node.
231    pub node_id: NodeID,
232    /// Information about the running audio stream.
233    pub stream_info: &'a StreamInfo,
234    custom_state: &'a mut Option<Box<dyn Any>>,
235}
236
237impl<'a> ConstructProcessorContext<'a> {
238    pub fn new(
239        node_id: NodeID,
240        stream_info: &'a StreamInfo,
241        custom_state: &'a mut Option<Box<dyn Any>>,
242    ) -> Self {
243        Self {
244            node_id,
245            stream_info,
246            custom_state,
247        }
248    }
249
250    /// Get an immutable reference to the custom state that was created in
251    /// [`AudioNodeInfo::custom_state`].
252    pub fn custom_state<T: 'static>(&self) -> Option<&T> {
253        self.custom_state
254            .as_ref()
255            .and_then(|s| s.downcast_ref::<T>())
256    }
257
258    /// Get a mutable reference to the custom state that was created in
259    /// [`AudioNodeInfo::custom_state`].
260    pub fn custom_state_mut<T: 'static>(&mut self) -> Option<&mut T> {
261        self.custom_state
262            .as_mut()
263            .and_then(|s| s.downcast_mut::<T>())
264    }
265}
266
267/// A context for [`AudioNode::update`].
268pub struct UpdateContext<'a> {
269    /// The ID of this audio node.
270    pub node_id: NodeID,
271    /// Information about the running audio stream. If no audio stream is running,
272    /// then this will be `None`.
273    pub stream_info: Option<&'a StreamInfo>,
274    custom_state: &'a mut Option<Box<dyn Any>>,
275    event_queue: &'a mut Vec<NodeEvent>,
276}
277
278impl<'a> UpdateContext<'a> {
279    pub fn new(
280        node_id: NodeID,
281        stream_info: Option<&'a StreamInfo>,
282        custom_state: &'a mut Option<Box<dyn Any>>,
283        event_queue: &'a mut Vec<NodeEvent>,
284    ) -> Self {
285        Self {
286            node_id,
287            stream_info,
288            custom_state,
289            event_queue,
290        }
291    }
292
293    /// Queue an event to send to this node's processor counterpart.
294    pub fn queue_event(&mut self, event: NodeEventType) {
295        self.event_queue.push(NodeEvent {
296            node_id: self.node_id,
297            #[cfg(feature = "scheduled_events")]
298            time: None,
299            event,
300        });
301    }
302
303    /// Queue an event to send to this node's processor counterpart, at a certain time.
304    ///
305    /// # Performance
306    ///
307    /// Note that for most nodes that handle scheduled events, this will split the buffer
308    /// into chunks and process those chunks. If two events are scheduled too close to one
309    /// another in time then that chunk may be too small for the audio processing to be
310    /// fully vectorized.
311    #[cfg(feature = "scheduled_events")]
312    pub fn schedule_event(&mut self, event: NodeEventType, time: EventInstant) {
313        self.event_queue.push(NodeEvent {
314            node_id: self.node_id,
315            time: Some(time),
316            event,
317        });
318    }
319
320    /// Get an immutable reference to the custom state that was created in
321    /// [`AudioNodeInfo::custom_state`].
322    pub fn custom_state<T: 'static>(&self) -> Option<&T> {
323        self.custom_state
324            .as_ref()
325            .and_then(|s| s.downcast_ref::<T>())
326    }
327
328    /// Get a mutable reference to the custom state that was created in
329    /// [`AudioNodeInfo::custom_state`].
330    pub fn custom_state_mut<T: 'static>(&mut self) -> Option<&mut T> {
331        self.custom_state
332            .as_mut()
333            .and_then(|s| s.downcast_mut::<T>())
334    }
335}
336
337/// An empty constructor configuration.
338///
339/// This should be preferred over `()` because it implements
340/// Bevy's `Component` trait, making the
341/// [`AudioNode`] implementor trivially Bevy-compatible.
342#[derive(Debug, Default, Clone, Copy, PartialEq)]
343#[cfg_attr(feature = "bevy", derive(bevy_ecs::prelude::Component))]
344#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
345pub struct EmptyConfig;
346
347/// A type-erased dyn-compatible [`AudioNode`].
348pub trait DynAudioNode {
349    /// Get information about this node.
350    ///
351    /// This method is only called once after the node is added to the audio graph.
352    fn info(&self) -> AudioNodeInfo;
353
354    /// Construct a realtime processor for this node.
355    ///
356    /// * `cx` - A context for interacting with the Firewheel context. This context
357    /// also includes information about the audio stream.
358    fn construct_processor(&self, cx: ConstructProcessorContext) -> Box<dyn AudioNodeProcessor>;
359
360    /// If [`AudioNodeInfo::call_update_method`] was set to `true`, then the Firewheel
361    /// context will call this method on every update cycle.
362    ///
363    /// * `cx` - A context for interacting with the Firewheel context.
364    fn update(&mut self, cx: UpdateContext) {
365        let _ = cx;
366    }
367}
368
369/// Pairs constructors with their configurations.
370///
371/// This is useful for type-erasing an [`AudioNode`].
372pub struct Constructor<T, C> {
373    constructor: T,
374    configuration: C,
375}
376
377impl<T: AudioNode> Constructor<T, T::Configuration> {
378    pub fn new(constructor: T, configuration: Option<T::Configuration>) -> Self {
379        Self {
380            constructor,
381            configuration: configuration.unwrap_or_default(),
382        }
383    }
384}
385
386impl<T: AudioNode> DynAudioNode for Constructor<T, T::Configuration> {
387    fn info(&self) -> AudioNodeInfo {
388        self.constructor.info(&self.configuration)
389    }
390
391    fn construct_processor(&self, cx: ConstructProcessorContext) -> Box<dyn AudioNodeProcessor> {
392        Box::new(
393            self.constructor
394                .construct_processor(&self.configuration, cx),
395        )
396    }
397
398    fn update(&mut self, cx: UpdateContext) {
399        self.constructor.update(&self.configuration, cx);
400    }
401}
402
403/// The trait describing the realtime processor counterpart to an
404/// audio node.
405pub trait AudioNodeProcessor: 'static + Send {
406    /// Process the given block of audio. Only process data in the
407    /// buffers up to `samples`.
408    ///
409    /// WARNING: The node *MUST* either completely fill all output buffers
410    /// with data, or return [`ProcessStatus::ClearAllOutputs`]/[`ProcessStatus::Bypass`].
411    /// Failing to do this will result in audio glitches.
412    ///
413    /// * `info` - Information about this processing block.
414    /// * `buffers` - The buffers of data to process.
415    /// * `events` - A list of events for this node to process.
416    /// * `extra` - Additional buffers and utilities.
417    ///
418    /// WARNING: Audio nodes *MUST* either completely fill all output buffers
419    /// with data, or return [`ProcessStatus::ClearAllOutputs`]/[`ProcessStatus::Bypass`].
420    /// Failing to do this will result in audio glitches.
421    fn process(
422        &mut self,
423        info: &ProcInfo,
424        buffers: ProcBuffers,
425        events: &mut ProcEvents,
426        extra: &mut ProcExtra,
427    ) -> ProcessStatus;
428
429    /// Called when the audio stream has been stopped.
430    fn stream_stopped(&mut self, logger: &mut RealtimeLogger) {
431        let _ = logger;
432    }
433
434    /// Called when a new audio stream has been started after a previous
435    /// call to [`AudioNodeProcessor::stream_stopped`].
436    ///
437    /// Note, this method gets called on the main thread, not the audio
438    /// thread. So it is safe to allocate/deallocate here.
439    fn new_stream(&mut self, stream_info: &StreamInfo) {
440        let _ = stream_info;
441    }
442}
443
444pub const NUM_SCRATCH_BUFFERS: usize = 8;
445
446/// The buffers used in [`AudioNodeProcessor::process`]
447pub struct ProcBuffers<'a, 'b> {
448    /// The audio input buffers.
449    ///
450    /// The number of channels will always equal the [`ChannelConfig::num_inputs`]
451    /// value that was returned in [`AudioNode::info`].
452    ///
453    /// Each channel slice will have a length of [`ProcInfo::frames`].
454    pub inputs: &'a [&'b [f32]],
455
456    /// The audio output buffers.
457    ///
458    /// WARNING: The node *MUST* either completely fill all output buffers
459    /// with data, or return [`ProcessStatus::ClearAllOutputs`]/[`ProcessStatus::Bypass`].
460    /// Failing to do this will result in audio glitches.
461    ///
462    /// The number of channels will always equal the [`ChannelConfig::num_outputs`]
463    /// value that was returned in [`AudioNode::info`].
464    ///
465    /// Each channel slice will have a length of [`ProcInfo::frames`].
466    ///
467    /// These buffers may contain junk data.
468    pub outputs: &'a mut [&'b mut [f32]],
469}
470
471/// Extra buffers and utilities for [`AudioNodeProcessor::process`]
472pub struct ProcExtra {
473    /// A list of extra scratch buffers that can be used for processing.
474    /// This removes the need for nodes to allocate their own scratch buffers.
475    /// Each buffer has a length of [`StreamInfo::max_block_frames`]. These
476    /// buffers are shared across all nodes, so assume that they contain junk
477    /// data.
478    pub scratch_buffers: ChannelBuffer<f32, NUM_SCRATCH_BUFFERS>,
479
480    /// A buffer of values that linearly ramp up/down between `0.0` and `1.0`
481    /// which can be used to implement efficient declicking when
482    /// pausing/resuming/stopping.
483    pub declick_values: DeclickValues,
484
485    /// A realtime-safe logger helper.
486    pub logger: RealtimeLogger,
487}
488
489/// Information for [`AudioNodeProcessor::process`]
490pub struct ProcInfo {
491    /// The number of frames (samples in a single channel of audio) in
492    /// this processing block.
493    ///
494    /// Not to be confused with video frames.
495    pub frames: usize,
496
497    /// An optional optimization hint on which input channels contain
498    /// all zeros (silence). The first bit (`0b1`) is the first channel,
499    /// the second bit is the second channel, and so on.
500    pub in_silence_mask: SilenceMask,
501
502    /// An optional optimization hint on which output channels contain
503    /// all zeros (silence). The first bit (`0b1`) is the first channel,
504    /// the second bit is the second channel, and so on.
505    pub out_silence_mask: SilenceMask,
506
507    /// An optional hint on which input channels are connected to other
508    /// nodes in the graph.
509    pub in_connected_mask: ConnectedMask,
510
511    /// An optional hint on which output channels are connected to other
512    /// nodes in the graph.
513    pub out_connected_mask: ConnectedMask,
514
515    /// The sample rate of the audio stream in samples per second.
516    pub sample_rate: NonZeroU32,
517
518    /// The reciprocal of the sample rate. This can be used to avoid a
519    /// division and improve performance.
520    pub sample_rate_recip: f64,
521
522    /// The current time of the audio clock at the first frame in this
523    /// processing block, equal to the total number of frames (samples in
524    /// a single channel of audio) that have been processed since this
525    /// Firewheel context was first started.
526    ///
527    /// Note, this value does *NOT* account for any output underflows
528    /// (underruns) that may have occured.
529    ///
530    /// Note, generally this value will always count up, but there may be
531    /// a few edge cases that cause this value to be less than the previous
532    /// block, such as when the sample rate of the stream has been changed.
533    pub clock_samples: InstantSamples,
534
535    /// The duration between when the stream was started an when the
536    /// Firewheel processor's `process` method was called.
537    ///
538    /// Note, this clock is not as accurate as the audio clock.
539    pub duration_since_stream_start: Duration,
540
541    /// Flags indicating the current status of the audio stream
542    pub stream_status: StreamStatus,
543
544    /// If an output underflow (underrun) occured, then this will contain
545    /// an estimate for the number of frames (samples in a single channel
546    /// of audio) that were dropped.
547    ///
548    /// This can be used to correct the timing of events if desired.
549    ///
550    /// Note, this is just an estimate, and may not always be perfectly
551    /// accurate.
552    ///
553    /// If an underrun did not occur, then this will be `0`.
554    pub dropped_frames: u32,
555
556    /// Information about the musical transport.
557    ///
558    /// This will be `None` if no musical transport is currently active,
559    /// or if the current transport is currently paused.
560    #[cfg(feature = "musical_transport")]
561    pub transport_info: Option<TransportInfo>,
562}
563
564impl ProcInfo {
565    /// The current time of the audio clock at the first frame in this
566    /// processing block, equal to the total number of seconds of data that
567    /// have been processed since this Firewheel context was first started.
568    ///
569    /// Note, this value does *NOT* account for any output underflows
570    /// (underruns) that may have occured.
571    ///
572    /// Note, generally this value will always count up, but there may be
573    /// a few edge cases that cause this value to be less than the previous
574    /// block, such as when the sample rate of the stream has been changed.
575    pub fn clock_seconds(&self) -> InstantSeconds {
576        self.clock_samples
577            .to_seconds(self.sample_rate, self.sample_rate_recip)
578    }
579
580    /// Get the current time of the audio clock in frames as a range for this
581    /// processing block.
582    pub fn clock_samples_range(&self) -> Range<InstantSamples> {
583        self.clock_samples..self.clock_samples + DurationSamples(self.frames as i64)
584    }
585
586    /// Get the current time of the audio clock in frames as a range for this
587    /// processing block.
588    pub fn clock_seconds_range(&self) -> Range<InstantSeconds> {
589        self.clock_seconds()
590            ..(self.clock_samples + DurationSamples(self.frames as i64))
591                .to_seconds(self.sample_rate, self.sample_rate_recip)
592    }
593
594    /// Get the playhead of the transport at the first frame in this processing
595    /// block.
596    ///
597    /// If there is no active transport, or if the transport is not currently
598    /// playing, then this will return `None`.
599    #[cfg(feature = "musical_transport")]
600    pub fn playhead(&self) -> Option<InstantMusical> {
601        self.transport_info.as_ref().and_then(|transport_info| {
602            transport_info
603                .start_clock_samples
604                .map(|start_clock_samples| {
605                    transport_info.transport.samples_to_musical(
606                        self.clock_samples,
607                        start_clock_samples,
608                        transport_info.speed_multiplier,
609                        self.sample_rate,
610                        self.sample_rate_recip,
611                    )
612                })
613        })
614    }
615
616    /// Get the playhead of the transport as a range for this processing
617    /// block.
618    ///
619    /// If there is no active transport, or if the transport is not currently
620    /// playing, then this will return `None`.
621    #[cfg(feature = "musical_transport")]
622    pub fn playhead_range(&self) -> Option<Range<InstantMusical>> {
623        self.transport_info.as_ref().and_then(|transport_info| {
624            transport_info
625                .start_clock_samples
626                .map(|start_clock_samples| {
627                    transport_info.transport.samples_to_musical(
628                        self.clock_samples,
629                        start_clock_samples,
630                        transport_info.speed_multiplier,
631                        self.sample_rate,
632                        self.sample_rate_recip,
633                    )
634                        ..transport_info.transport.samples_to_musical(
635                            self.clock_samples + DurationSamples(self.frames as i64),
636                            start_clock_samples,
637                            transport_info.speed_multiplier,
638                            self.sample_rate,
639                            self.sample_rate_recip,
640                        )
641                })
642        })
643    }
644
645    /// Returns `true` if there is a transport and that transport is playing,
646    /// `false` otherwise.
647    #[cfg(feature = "musical_transport")]
648    pub fn transport_is_playing(&self) -> bool {
649        self.transport_info
650            .as_ref()
651            .map(|t| t.playing())
652            .unwrap_or(false)
653    }
654
655    /// Converts the given musical time to the corresponding time in samples.
656    ///
657    /// If there is no musical transport or the transport is not currently playing,
658    /// then this will return `None`.
659    #[cfg(feature = "musical_transport")]
660    pub fn musical_to_samples(&self, musical: InstantMusical) -> Option<InstantSamples> {
661        self.transport_info.as_ref().and_then(|transport_info| {
662            transport_info
663                .start_clock_samples
664                .map(|start_clock_samples| {
665                    transport_info.transport.musical_to_samples(
666                        musical,
667                        start_clock_samples,
668                        transport_info.speed_multiplier,
669                        self.sample_rate,
670                    )
671                })
672        })
673    }
674}
675
676#[cfg(feature = "musical_transport")]
677pub struct TransportInfo {
678    /// The current transport.
679    pub transport: MusicalTransport,
680
681    /// The instant that `MusicaltTime::ZERO` occured in units of
682    /// `ClockSamples`.
683    ///
684    /// If the transport is not currently playing, then this will be `None`.
685    pub start_clock_samples: Option<InstantSamples>,
686
687    /// The beats per minute at the first frame of this process block.
688    ///
689    /// (The `speed_multipler` has already been applied to this value.)
690    pub beats_per_minute: f64,
691
692    /// A multiplier for the playback speed of the transport. A value of `1.0`
693    /// means no change in speed, a value less than `1.0` means a decrease in
694    /// speed, and a value greater than `1.0` means an increase in speed.
695    pub speed_multiplier: f64,
696}
697
698#[cfg(feature = "musical_transport")]
699impl TransportInfo {
700    /// Whether or not the transport is currently playing (true) or paused
701    /// (false).
702    pub const fn playing(&self) -> bool {
703        self.start_clock_samples.is_some()
704    }
705}
706
707bitflags::bitflags! {
708    /// Flags indicating the current status of the audio stream
709    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
710    pub struct StreamStatus: u32 {
711        /// Some input data was discarded because of an overflow condition
712        /// at the audio driver.
713        const INPUT_OVERFLOW = 0b01;
714
715        /// The output buffer ran low, likely producing a break in the
716        /// output sound. (This is also known as an "underrun").
717        const OUTPUT_UNDERFLOW = 0b10;
718    }
719}
720
721/// The status of processing buffers in an audio node.
722#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
723pub enum ProcessStatus {
724    /// No output buffers were modified. If this is returned, then
725    /// the engine will automatically clear all output buffers
726    /// for you as efficiently as possible.
727    #[default]
728    ClearAllOutputs,
729    /// No output buffers were modified. If this is returned, then
730    /// the engine will automatically copy the input buffers to
731    /// their corresponding output buffers for you as efficiently
732    /// as possible.
733    Bypass,
734    /// All output buffers were filled with data.
735    ///
736    /// WARNING: The node must fill all audio audio output buffers
737    /// completely with data when returning this process status.
738    /// Failing to do so will result in audio glitches.
739    ///
740    /// WARNING: Incorrectly marking a channel as containing silence
741    /// when it doesn't will result in audio glitches. Please take
742    /// great care when using this, or preferrably just use
743    /// [`ProcessStatus::outputs_not_silent()`] instead.
744    OutputsModified { out_silence_mask: SilenceMask },
745}
746
747impl ProcessStatus {
748    /// All output buffers were filled with non-silence.
749    ///
750    /// WARNING: The node must fill all audio audio output buffers
751    /// completely with data when returning this process status.
752    /// Failing to do so will result in audio glitches.
753    pub const fn outputs_not_silent() -> Self {
754        Self::OutputsModified {
755            out_silence_mask: SilenceMask::NONE_SILENT,
756        }
757    }
758
759    /// All output buffers were filled with data.
760    ///
761    /// WARNING: The node must fill all audio audio output buffers
762    /// completely with data when returning this process status.
763    /// Failing to do so will result in audio glitches.
764    ///
765    /// WARNING: Incorrectly marking a channel as containing silence
766    /// when it doesn't will result in audio glitches. Please take
767    /// great care when using this, or preferrably just use
768    /// [`ProcessStatus::outputs_not_silent()`] instead.
769    pub const fn outputs_modified(out_silence_mask: SilenceMask) -> Self {
770        Self::OutputsModified { out_silence_mask }
771    }
772}