firewheel_core/node.rs
1use core::ops::Range;
2use core::time::Duration;
3use core::{any::Any, fmt::Debug, hash::Hash, num::NonZeroU32};
4
5#[cfg(not(feature = "std"))]
6use bevy_platform::prelude::{Box, Vec};
7
8use crate::dsp::buffer::ChannelBuffer;
9use crate::log::RealtimeLogger;
10use crate::{
11 channel_config::{ChannelConfig, ChannelCount},
12 clock::{DurationSamples, InstantSamples, InstantSeconds},
13 dsp::declick::DeclickValues,
14 event::{NodeEvent, NodeEventType, ProcEvents},
15 ConnectedMask, SilenceMask, StreamInfo,
16};
17
18pub mod dummy;
19
20#[cfg(feature = "scheduled_events")]
21use crate::clock::EventInstant;
22
23#[cfg(feature = "musical_transport")]
24use crate::clock::{InstantMusical, MusicalTransport};
25
26/// A globally unique identifier for a node.
27#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
28#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
29#[cfg_attr(feature = "bevy_reflect", reflect(opaque))]
30pub struct NodeID(pub thunderdome::Index);
31
32impl NodeID {
33 pub const DANGLING: Self = Self(thunderdome::Index::DANGLING);
34}
35
36impl Default for NodeID {
37 fn default() -> Self {
38 Self::DANGLING
39 }
40}
41
42/// Information about an [`AudioNode`].
43///
44/// This struct enforces the use of the builder pattern for future-proofness, as
45/// it is likely that more fields will be added in the future.
46#[derive(Debug)]
47pub struct AudioNodeInfo {
48 debug_name: &'static str,
49 channel_config: ChannelConfig,
50 call_update_method: bool,
51 custom_state: Option<Box<dyn Any>>,
52 latency_frames: u32,
53}
54
55impl AudioNodeInfo {
56 /// Construct a new [`AudioNodeInfo`] builder struct.
57 pub const fn new() -> Self {
58 Self {
59 debug_name: "unnamed",
60 channel_config: ChannelConfig {
61 num_inputs: ChannelCount::ZERO,
62 num_outputs: ChannelCount::ZERO,
63 },
64 call_update_method: false,
65 custom_state: None,
66 latency_frames: 0,
67 }
68 }
69
70 /// A unique name for this type of node, used for debugging purposes.
71 pub const fn debug_name(mut self, debug_name: &'static str) -> Self {
72 self.debug_name = debug_name;
73 self
74 }
75
76 /// The channel configuration of this node.
77 ///
78 /// By default this has a channel configuration with zero input and output
79 /// channels.
80 pub const fn channel_config(mut self, channel_config: ChannelConfig) -> Self {
81 self.channel_config = channel_config;
82 self
83 }
84
85 /// Set to `true` if this node wishes to have the Firewheel context call
86 /// [`AudioNode::update`] on every update cycle.
87 ///
88 /// By default this is set to `false`.
89 pub const fn call_update_method(mut self, call_update_method: bool) -> Self {
90 self.call_update_method = call_update_method;
91 self
92 }
93
94 /// Custom `!Send` state that can be stored in the Firewheel context and accessed
95 /// by the user.
96 ///
97 /// The user accesses this state via `FirewheelCtx::node_state` and
98 /// `FirewheelCtx::node_state_mut`.
99 pub fn custom_state<T: 'static>(mut self, custom_state: T) -> Self {
100 self.custom_state = Some(Box::new(custom_state));
101 self
102 }
103
104 /// Set the latency of this node in frames (samples in a single channel of audio).
105 ///
106 /// By default this is set to `0`.
107 pub const fn latency_frames(mut self, latency_frames: u32) -> Self {
108 self.latency_frames = latency_frames;
109 self
110 }
111}
112
113impl Default for AudioNodeInfo {
114 fn default() -> Self {
115 Self::new()
116 }
117}
118
119impl From<AudioNodeInfo> for AudioNodeInfoInner {
120 fn from(value: AudioNodeInfo) -> Self {
121 AudioNodeInfoInner {
122 debug_name: value.debug_name,
123 channel_config: value.channel_config,
124 call_update_method: value.call_update_method,
125 custom_state: value.custom_state,
126 latency_frames: value.latency_frames,
127 }
128 }
129}
130
131/// Information about an [`AudioNode`]. Used internally by the Firewheel context.
132#[derive(Debug)]
133pub struct AudioNodeInfoInner {
134 pub debug_name: &'static str,
135 pub channel_config: ChannelConfig,
136 pub call_update_method: bool,
137 pub custom_state: Option<Box<dyn Any>>,
138 pub latency_frames: u32,
139}
140
141/// A trait representing a node in a Firewheel audio graph.
142///
143/// # Notes about ECS
144///
145/// In order to be friendlier to ECS's (entity component systems), it is encouraged
146/// that any struct deriving this trait be POD (plain ol' data). If you want your
147/// audio node to be usable in the Bevy game engine, also derive
148/// `bevy_ecs::prelude::Component`. (You can hide this derive behind a feature flag
149/// by using `#[cfg_attr(feature = "bevy", derive(bevy_ecs::prelude::Component))]`).
150///
151/// # Audio Node Lifecycle
152///
153/// 1. The user constructs the node as POD or from a custom constructor method for
154/// that node.
155/// 2. The user adds the node to the graph using `FirewheelCtx::add_node`. If the
156/// node has any custom configuration, then the user passes that configuration to this
157/// method as well. In this method, the Firewheel context calls [`AudioNode::info`] to
158/// get information about the node. The node can also store any custom state in the
159/// [`AudioNodeInfo`] struct.
160/// 3. At this point the user may now call `FirewheelCtx::node_state` and
161/// `FirewheelCtx::node_state_mut` to retrieve the node's custom state.
162/// 4. If [`AudioNodeInfo::call_update_method`] was set to `true`, then
163/// [`AudioNode::update`] will be called every time the Firewheel context updates.
164/// The node's custom state is also accessible in this method.
165/// 5. When the Firewheel context is ready for the node to start processing data,
166/// it calls [`AudioNode::construct_processor`] to retrieve the realtime
167/// [`AudioNodeProcessor`] counterpart of the node. This processor counterpart is
168/// then sent to the audio thread.
169/// 6. The Firewheel processor calls [`AudioNodeProcessor::process`] whenever there
170/// is a new block of audio data to process.
171/// 7. (Graceful shutdown)
172///
173/// 7a. The Firewheel processor calls [`AudioNodeProcessor::stream_stopped`].
174/// The processor is then sent back to the main thread.
175///
176/// 7b. If a new audio stream is started, then the context will call
177/// [`AudioNodeProcessor::new_stream`] on the main thread, and then send the
178/// processor back to the audio thread for processing.
179///
180/// 7c. If the Firewheel context is dropped before a new stream is started, then
181/// both the node and the processor counterpart are dropped.
182/// 8. (Audio thread crashes or stops unexpectedly) - The node's processor counterpart
183/// may or may not be dropped. The user may try to create a new audio stream, in which
184/// case [`AudioNode::construct_processor`] might be called again. If a second processor
185/// instance is not able to be created, then the node may panic.
186pub trait AudioNode {
187 /// A type representing this constructor's configuration.
188 ///
189 /// This is intended as a one-time configuration to be used
190 /// when constructing an audio node. When no configuration
191 /// is required, [`EmptyConfig`] should be used.
192 type Configuration: Default;
193
194 /// Get information about this node.
195 ///
196 /// This method is only called once after the node is added to the audio graph.
197 fn info(&self, configuration: &Self::Configuration) -> AudioNodeInfo;
198
199 /// Construct a realtime processor for this node.
200 ///
201 /// * `configuration` - The custom configuration of this node.
202 /// * `cx` - A context for interacting with the Firewheel context. This context
203 /// also includes information about the audio stream.
204 fn construct_processor(
205 &self,
206 configuration: &Self::Configuration,
207 cx: ConstructProcessorContext,
208 ) -> impl AudioNodeProcessor;
209
210 /// If [`AudioNodeInfo::call_update_method`] was set to `true`, then the Firewheel
211 /// context will call this method on every update cycle.
212 ///
213 /// * `configuration` - The custom configuration of this node.
214 /// * `cx` - A context for interacting with the Firewheel context.
215 fn update(&mut self, configuration: &Self::Configuration, cx: UpdateContext) {
216 let _ = configuration;
217 let _ = cx;
218 }
219}
220
221/// A context for [`AudioNode::construct_processor`].
222pub struct ConstructProcessorContext<'a> {
223 /// The ID of this audio node.
224 pub node_id: NodeID,
225 /// Information about the running audio stream.
226 pub stream_info: &'a StreamInfo,
227 custom_state: &'a mut Option<Box<dyn Any>>,
228}
229
230impl<'a> ConstructProcessorContext<'a> {
231 pub fn new(
232 node_id: NodeID,
233 stream_info: &'a StreamInfo,
234 custom_state: &'a mut Option<Box<dyn Any>>,
235 ) -> Self {
236 Self {
237 node_id,
238 stream_info,
239 custom_state,
240 }
241 }
242
243 /// Get an immutable reference to the custom state that was created in
244 /// [`AudioNodeInfo::custom_state`].
245 pub fn custom_state<T: 'static>(&self) -> Option<&T> {
246 self.custom_state
247 .as_ref()
248 .and_then(|s| s.downcast_ref::<T>())
249 }
250
251 /// Get a mutable reference to the custom state that was created in
252 /// [`AudioNodeInfo::custom_state`].
253 pub fn custom_state_mut<T: 'static>(&mut self) -> Option<&mut T> {
254 self.custom_state
255 .as_mut()
256 .and_then(|s| s.downcast_mut::<T>())
257 }
258}
259
260/// A context for [`AudioNode::update`].
261pub struct UpdateContext<'a> {
262 /// The ID of this audio node.
263 pub node_id: NodeID,
264 /// Information about the running audio stream. If no audio stream is running,
265 /// then this will be `None`.
266 pub stream_info: Option<&'a StreamInfo>,
267 custom_state: &'a mut Option<Box<dyn Any>>,
268 event_queue: &'a mut Vec<NodeEvent>,
269}
270
271impl<'a> UpdateContext<'a> {
272 pub fn new(
273 node_id: NodeID,
274 stream_info: Option<&'a StreamInfo>,
275 custom_state: &'a mut Option<Box<dyn Any>>,
276 event_queue: &'a mut Vec<NodeEvent>,
277 ) -> Self {
278 Self {
279 node_id,
280 stream_info,
281 custom_state,
282 event_queue,
283 }
284 }
285
286 /// Queue an event to send to this node's processor counterpart.
287 pub fn queue_event(&mut self, event: NodeEventType) {
288 self.event_queue.push(NodeEvent {
289 node_id: self.node_id,
290 #[cfg(feature = "scheduled_events")]
291 time: None,
292 event,
293 });
294 }
295
296 /// Queue an event to send to this node's processor counterpart, at a certain time.
297 ///
298 /// # Performance
299 ///
300 /// Note that for most nodes that handle scheduled events, this will split the buffer
301 /// into chunks and process those chunks. If two events are scheduled too close to one
302 /// another in time then that chunk may be too small for the audio processing to be
303 /// fully vectorized.
304 #[cfg(feature = "scheduled_events")]
305 pub fn schedule_event(&mut self, event: NodeEventType, time: EventInstant) {
306 self.event_queue.push(NodeEvent {
307 node_id: self.node_id,
308 time: Some(time),
309 event,
310 });
311 }
312
313 /// Get an immutable reference to the custom state that was created in
314 /// [`AudioNodeInfo::custom_state`].
315 pub fn custom_state<T: 'static>(&self) -> Option<&T> {
316 self.custom_state
317 .as_ref()
318 .and_then(|s| s.downcast_ref::<T>())
319 }
320
321 /// Get a mutable reference to the custom state that was created in
322 /// [`AudioNodeInfo::custom_state`].
323 pub fn custom_state_mut<T: 'static>(&mut self) -> Option<&mut T> {
324 self.custom_state
325 .as_mut()
326 .and_then(|s| s.downcast_mut::<T>())
327 }
328}
329
330/// An empty constructor configuration.
331///
332/// This should be preferred over `()` because it implements
333/// Bevy's `Component` trait, making the
334/// [`AudioNode`] implementor trivially Bevy-compatible.
335#[derive(Debug, Default, Clone, Copy, PartialEq)]
336#[cfg_attr(feature = "bevy", derive(bevy_ecs::prelude::Component))]
337#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
338pub struct EmptyConfig;
339
340/// A type-erased dyn-compatible [`AudioNode`].
341pub trait DynAudioNode {
342 /// Get information about this node.
343 ///
344 /// This method is only called once after the node is added to the audio graph.
345 fn info(&self) -> AudioNodeInfo;
346
347 /// Construct a realtime processor for this node.
348 ///
349 /// * `cx` - A context for interacting with the Firewheel context. This context
350 /// also includes information about the audio stream.
351 fn construct_processor(&self, cx: ConstructProcessorContext) -> Box<dyn AudioNodeProcessor>;
352
353 /// If [`AudioNodeInfo::call_update_method`] was set to `true`, then the Firewheel
354 /// context will call this method on every update cycle.
355 ///
356 /// * `cx` - A context for interacting with the Firewheel context.
357 fn update(&mut self, cx: UpdateContext) {
358 let _ = cx;
359 }
360}
361
362/// Pairs constructors with their configurations.
363///
364/// This is useful for type-erasing an [`AudioNode`].
365pub struct Constructor<T, C> {
366 constructor: T,
367 configuration: C,
368}
369
370impl<T: AudioNode> Constructor<T, T::Configuration> {
371 pub fn new(constructor: T, configuration: Option<T::Configuration>) -> Self {
372 Self {
373 constructor,
374 configuration: configuration.unwrap_or_default(),
375 }
376 }
377}
378
379impl<T: AudioNode> DynAudioNode for Constructor<T, T::Configuration> {
380 fn info(&self) -> AudioNodeInfo {
381 self.constructor.info(&self.configuration)
382 }
383
384 fn construct_processor(&self, cx: ConstructProcessorContext) -> Box<dyn AudioNodeProcessor> {
385 Box::new(
386 self.constructor
387 .construct_processor(&self.configuration, cx),
388 )
389 }
390
391 fn update(&mut self, cx: UpdateContext) {
392 self.constructor.update(&self.configuration, cx);
393 }
394}
395
396/// The trait describing the realtime processor counterpart to an
397/// audio node.
398pub trait AudioNodeProcessor: 'static + Send {
399 /// Process the given block of audio. Only process data in the
400 /// buffers up to `samples`.
401 ///
402 /// The node *MUST* either return `ProcessStatus::ClearAllOutputs`
403 /// or fill all output buffers with data.
404 ///
405 /// If any output buffers contain all zeros up to `samples` (silent),
406 /// then mark that buffer as silent in [`ProcInfo::out_silence_mask`].
407 ///
408 /// * `info` - Information about this processing block.
409 /// * `buffers` - The buffers of data to process.
410 /// * `events` - A list of events for this node to process.
411 /// * `extra` - Additional buffers and utilities.
412 fn process(
413 &mut self,
414 info: &ProcInfo,
415 buffers: ProcBuffers,
416 events: &mut ProcEvents,
417 extra: &mut ProcExtra,
418 ) -> ProcessStatus;
419
420 /// Called when the audio stream has been stopped.
421 fn stream_stopped(&mut self, logger: &mut RealtimeLogger) {
422 let _ = logger;
423 }
424
425 /// Called when a new audio stream has been started after a previous
426 /// call to [`AudioNodeProcessor::stream_stopped`].
427 ///
428 /// Note, this method gets called on the main thread, not the audio
429 /// thread. So it is safe to allocate/deallocate here.
430 fn new_stream(&mut self, stream_info: &StreamInfo) {
431 let _ = stream_info;
432 }
433}
434
435pub const NUM_SCRATCH_BUFFERS: usize = 8;
436
437/// The buffers used in [`AudioNodeProcessor::process`]
438pub struct ProcBuffers<'a, 'b> {
439 /// The audio input buffers.
440 ///
441 /// The number of channels will always equal the [`ChannelConfig::num_inputs`]
442 /// value that was returned in [`AudioNode::info`].
443 ///
444 /// Each channel slice will have a length of [`ProcInfo::frames`].
445 pub inputs: &'a [&'b [f32]],
446
447 /// The audio output buffers.
448 ///
449 /// The number of channels will always equal the [`ChannelConfig::num_outputs`]
450 /// value that was returned in [`AudioNode::info`].
451 ///
452 /// Each channel slice will have a length of [`ProcInfo::frames`].
453 ///
454 /// These buffers may contain junk data.
455 pub outputs: &'a mut [&'b mut [f32]],
456}
457
458/// Extra buffers and utilities for [`AudioNodeProcessor::process`]
459pub struct ProcExtra {
460 /// A list of extra scratch buffers that can be used for processing.
461 /// This removes the need for nodes to allocate their own scratch buffers.
462 /// Each buffer has a length of [`StreamInfo::max_block_frames`]. These
463 /// buffers are shared across all nodes, so assume that they contain junk
464 /// data.
465 pub scratch_buffers: ChannelBuffer<f32, NUM_SCRATCH_BUFFERS>,
466
467 /// A buffer of values that linearly ramp up/down between `0.0` and `1.0`
468 /// which can be used to implement efficient declicking when
469 /// pausing/resuming/stopping.
470 pub declick_values: DeclickValues,
471
472 /// A realtime-safe logger helper.
473 pub logger: RealtimeLogger,
474}
475
476/// Information for [`AudioNodeProcessor::process`]
477pub struct ProcInfo {
478 /// The number of frames (samples in a single channel of audio) in
479 /// this processing block.
480 ///
481 /// Not to be confused with video frames.
482 pub frames: usize,
483
484 /// An optional optimization hint on which input channels contain
485 /// all zeros (silence). The first bit (`0b1`) is the first channel,
486 /// the second bit is the second channel, and so on.
487 pub in_silence_mask: SilenceMask,
488
489 /// An optional optimization hint on which output channels contain
490 /// all zeros (silence). The first bit (`0b1`) is the first channel,
491 /// the second bit is the second channel, and so on.
492 pub out_silence_mask: SilenceMask,
493
494 /// An optional hint on which input channels are connected to other
495 /// nodes in the graph.
496 pub in_connected_mask: ConnectedMask,
497
498 /// An optional hint on which output channels are connected to other
499 /// nodes in the graph.
500 pub out_connected_mask: ConnectedMask,
501
502 /// The sample rate of the audio stream in samples per second.
503 pub sample_rate: NonZeroU32,
504
505 /// The reciprocal of the sample rate. This can be used to avoid a
506 /// division and improve performance.
507 pub sample_rate_recip: f64,
508
509 /// The current time of the audio clock at the first frame in this
510 /// processing block, equal to the total number of frames (samples in
511 /// a single channel of audio) that have been processed since this
512 /// Firewheel context was first started.
513 ///
514 /// Note, this value does *NOT* account for any output underflows
515 /// (underruns) that may have occured.
516 ///
517 /// Note, generally this value will always count up, but there may be
518 /// a few edge cases that cause this value to be less than the previous
519 /// block, such as when the sample rate of the stream has been changed.
520 pub clock_samples: InstantSamples,
521
522 /// The duration between when the stream was started an when the
523 /// Firewheel processor's `process` method was called.
524 ///
525 /// Note, this clock is not as accurate as the audio clock.
526 pub duration_since_stream_start: Duration,
527
528 /// Flags indicating the current status of the audio stream
529 pub stream_status: StreamStatus,
530
531 /// If an output underflow (underrun) occured, then this will contain
532 /// an estimate for the number of frames (samples in a single channel
533 /// of audio) that were dropped.
534 ///
535 /// This can be used to correct the timing of events if desired.
536 ///
537 /// Note, this is just an estimate, and may not always be perfectly
538 /// accurate.
539 ///
540 /// If an underrun did not occur, then this will be `0`.
541 pub dropped_frames: u32,
542
543 /// Information about the musical transport.
544 ///
545 /// This will be `None` if no musical transport is currently active,
546 /// or if the current transport is currently paused.
547 #[cfg(feature = "musical_transport")]
548 pub transport_info: Option<TransportInfo>,
549}
550
551impl ProcInfo {
552 /// The current time of the audio clock at the first frame in this
553 /// processing block, equal to the total number of seconds of data that
554 /// have been processed since this Firewheel context was first started.
555 ///
556 /// Note, this value does *NOT* account for any output underflows
557 /// (underruns) that may have occured.
558 ///
559 /// Note, generally this value will always count up, but there may be
560 /// a few edge cases that cause this value to be less than the previous
561 /// block, such as when the sample rate of the stream has been changed.
562 pub fn clock_seconds(&self) -> InstantSeconds {
563 self.clock_samples
564 .to_seconds(self.sample_rate, self.sample_rate_recip)
565 }
566
567 /// Get the current time of the audio clock in frames as a range for this
568 /// processing block.
569 pub fn clock_samples_range(&self) -> Range<InstantSamples> {
570 self.clock_samples..self.clock_samples + DurationSamples(self.frames as i64)
571 }
572
573 /// Get the current time of the audio clock in frames as a range for this
574 /// processing block.
575 pub fn clock_seconds_range(&self) -> Range<InstantSeconds> {
576 self.clock_seconds()
577 ..(self.clock_samples + DurationSamples(self.frames as i64))
578 .to_seconds(self.sample_rate, self.sample_rate_recip)
579 }
580
581 /// Get the playhead of the transport at the first frame in this processing
582 /// block.
583 ///
584 /// If there is no active transport, or if the transport is not currently
585 /// playing, then this will return `None`.
586 #[cfg(feature = "musical_transport")]
587 pub fn playhead(&self) -> Option<InstantMusical> {
588 self.transport_info.as_ref().and_then(|transport_info| {
589 transport_info
590 .start_clock_samples
591 .map(|start_clock_samples| {
592 transport_info.transport.samples_to_musical(
593 self.clock_samples,
594 start_clock_samples,
595 transport_info.speed_multiplier,
596 self.sample_rate,
597 self.sample_rate_recip,
598 )
599 })
600 })
601 }
602
603 /// Get the playhead of the transport as a range for this processing
604 /// block.
605 ///
606 /// If there is no active transport, or if the transport is not currently
607 /// playing, then this will return `None`.
608 #[cfg(feature = "musical_transport")]
609 pub fn playhead_range(&self) -> Option<Range<InstantMusical>> {
610 self.transport_info.as_ref().and_then(|transport_info| {
611 transport_info
612 .start_clock_samples
613 .map(|start_clock_samples| {
614 transport_info.transport.samples_to_musical(
615 self.clock_samples,
616 start_clock_samples,
617 transport_info.speed_multiplier,
618 self.sample_rate,
619 self.sample_rate_recip,
620 )
621 ..transport_info.transport.samples_to_musical(
622 self.clock_samples + DurationSamples(self.frames as i64),
623 start_clock_samples,
624 transport_info.speed_multiplier,
625 self.sample_rate,
626 self.sample_rate_recip,
627 )
628 })
629 })
630 }
631
632 /// Returns `true` if there is a transport and that transport is playing,
633 /// `false` otherwise.
634 #[cfg(feature = "musical_transport")]
635 pub fn transport_is_playing(&self) -> bool {
636 self.transport_info
637 .as_ref()
638 .map(|t| t.playing())
639 .unwrap_or(false)
640 }
641
642 /// Converts the given musical time to the corresponding time in samples.
643 ///
644 /// If there is no musical transport or the transport is not currently playing,
645 /// then this will return `None`.
646 #[cfg(feature = "musical_transport")]
647 pub fn musical_to_samples(&self, musical: InstantMusical) -> Option<InstantSamples> {
648 self.transport_info.as_ref().and_then(|transport_info| {
649 transport_info
650 .start_clock_samples
651 .map(|start_clock_samples| {
652 transport_info.transport.musical_to_samples(
653 musical,
654 start_clock_samples,
655 transport_info.speed_multiplier,
656 self.sample_rate,
657 )
658 })
659 })
660 }
661}
662
663#[cfg(feature = "musical_transport")]
664pub struct TransportInfo {
665 /// The current transport.
666 pub transport: MusicalTransport,
667
668 /// The instant that `MusicaltTime::ZERO` occured in units of
669 /// `ClockSamples`.
670 ///
671 /// If the transport is not currently playing, then this will be `None`.
672 pub start_clock_samples: Option<InstantSamples>,
673
674 /// The beats per minute at the first frame of this process block.
675 ///
676 /// (The `speed_multipler` has already been applied to this value.)
677 pub beats_per_minute: f64,
678
679 /// A multiplier for the playback speed of the transport. A value of `1.0`
680 /// means no change in speed, a value less than `1.0` means a decrease in
681 /// speed, and a value greater than `1.0` means an increase in speed.
682 pub speed_multiplier: f64,
683}
684
685#[cfg(feature = "musical_transport")]
686impl TransportInfo {
687 /// Whether or not the transport is currently playing (true) or paused
688 /// (false).
689 pub const fn playing(&self) -> bool {
690 self.start_clock_samples.is_some()
691 }
692}
693
694bitflags::bitflags! {
695 /// Flags indicating the current status of the audio stream
696 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
697 pub struct StreamStatus: u32 {
698 /// Some input data was discarded because of an overflow condition
699 /// at the audio driver.
700 const INPUT_OVERFLOW = 0b01;
701
702 /// The output buffer ran low, likely producing a break in the
703 /// output sound. (This is also known as an "underrun").
704 const OUTPUT_UNDERFLOW = 0b10;
705 }
706}
707
708/// The status of processing buffers in an audio node.
709#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
710pub enum ProcessStatus {
711 /// No output buffers were modified. If this is returned, then
712 /// the engine will automatically clear all output buffers
713 /// for you as efficiently as possible.
714 #[default]
715 ClearAllOutputs,
716 /// No output buffers were modified. If this is returned, then
717 /// the engine will automatically copy the input buffers to
718 /// their corresponding output buffers for you as efficiently
719 /// as possible.
720 Bypass,
721 /// All output buffers were filled with data.
722 OutputsModified { out_silence_mask: SilenceMask },
723}
724
725impl ProcessStatus {
726 /// All output buffers were filled with non-silence.
727 pub const fn outputs_not_silent() -> Self {
728 Self::OutputsModified {
729 out_silence_mask: SilenceMask::NONE_SILENT,
730 }
731 }
732
733 /// All output buffers were filled with data.
734 pub const fn outputs_modified(out_silence_mask: SilenceMask) -> Self {
735 Self::OutputsModified { out_silence_mask }
736 }
737}