pp-audiograph 0.2.0

Realtime audio processing graph library
Documentation
use crate::{AudioGraphError, channel::ChannelSelection, sample::Sample};
use std::collections::HashMap;

#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct FrameSize(pub usize);

/// Trait representing a buffer of audio samples organized by channels
///
/// Audiograph expects audio to be organized in a channel-based format where each channel stores its samples
/// in contiguous memory. Implementations for the most common use cases are provided (see [`MultiChannelBuffer`]
/// for owned buffers and [`MultiChannelBufferView`] for non-owning views), but you can implement this trait for
/// your own custom buffer types as needed.
pub trait AudioBuffer<T: Sample> {
    /// Returns the number of channels in the buffer
    fn num_channels(&self) -> usize;

    /// Returns the number of frames (samples per channel) in the buffer
    fn num_frames(&self) -> FrameSize;

    /// Returns a slice of samples for the specified channel index, or `None` if the index is out of bounds
    fn channel(&self, index: usize) -> Option<&[T]>;

    /// Returns a mutable slice of samples for the specified channel index, or `None` if the index is out of bounds
    fn channel_mut(&mut self, index: usize) -> Option<&mut [T]>;

    /// Clears the buffer, setting all samples to zero
    fn clear(&mut self);

    /// Interleaves the audio buffer into the provided output slice
    ///
    /// Returns the number of samples written to the output buffer or an error if the output buffer is too small.
    fn copy_to_interleaved(&self, output: &mut [T]) -> Result<usize, AudioGraphError> {
        let num_samples = self.num_channels() * self.num_frames().0;
        if output.len() < num_samples {
            return Err("Output buffer is too small");
        }

        for channel in 0..self.num_channels() {
            let src_channel = self.channel(channel).unwrap();
            for (frame, &sample) in src_channel.iter().enumerate() {
                output[frame * self.num_channels() + channel] = sample;
            }
        }

        Ok(num_samples)
    }

    /// Copies interleaved audio data from the input slice into the deinterleaved buffer format
    ///
    /// Returns the number of frames processed, or an error if the number of channels or the size
    /// of the input buffer exceeds the capacity of this buffer.
    /// Clears any remaining channels if num_channels is less than the number of channels in this buffer.
    /// Errors out if the provided input buffer size is not a multiple of the provided number of channels.
    fn copy_from_interleaved(
        &mut self,
        input: &[T],
        num_channels: usize,
    ) -> Result<FrameSize, AudioGraphError> {
        if num_channels > self.num_channels() {
            return Err("Input channel count exceeds buffer channel count");
        }

        if num_channels == 0 {
            self.clear();
            return Ok(FrameSize(0));
        }

        if !input.len().is_multiple_of(num_channels) {
            return Err("Input buffer size must be a multiple of the number of channels");
        }

        let max_num_samples = num_channels * self.num_frames().0;
        if input.len() > max_num_samples {
            return Err("Input buffer is too large");
        }

        let num_frames_processed = input.len() / num_channels;

        for channel in 0..num_channels {
            let dst_channel = self.channel_mut(channel).unwrap();
            for (frame, &sample) in input.iter().skip(channel).step_by(num_channels).enumerate() {
                dst_channel[frame] = sample;
            }
        }

        for channel in num_channels..self.num_channels() {
            let dst_channel = self.channel_mut(channel).unwrap();
            for sample in dst_channel.iter_mut() {
                *sample = T::zero();
            }
        }

        Ok(FrameSize(num_frames_processed))
    }

    /// Sums channels from another buffer into this buffer, optionally using a channel selection to specify which channels to sum.
    fn add(&mut self, other: &dyn AudioBuffer<T>, channel_selection: &Option<ChannelSelection>) {
        if let Some(selection) = channel_selection {
            let mut filtered_selection = selection.clone();
            filtered_selection.clamp(self.num_channels().min(other.num_channels()));
            for channel in filtered_selection.iter() {
                let src = other.channel(channel).unwrap();
                let dst = self.channel_mut(channel).unwrap();
                dst.iter_mut().zip(src.iter()).for_each(|(a, b)| {
                    *a += *b;
                });
            }
        } else {
            let num_channels = self.num_channels().min(other.num_channels());
            for channel in 0..num_channels {
                let src = other.channel(channel).unwrap();
                let dst = self.channel_mut(channel).unwrap();
                dst.iter_mut().zip(src.iter()).for_each(|(a, b)| {
                    *a += *b;
                });
            }
        }
    }
}

/// Implementation of [`AudioBuffer`] that owns the samples
pub struct MultiChannelBuffer<T: Sample> {
    channels: Vec<Box<[T]>>,
    num_frames: FrameSize,
}

impl<T: Sample> MultiChannelBuffer<T> {
    /// Construct a new multi channel buffer by pre-allocating internal buffers with zeros
    pub fn new(num_channels: usize, num_frames: FrameSize) -> Self {
        let mut channels = Vec::with_capacity(num_channels);
        for _ in 0..num_channels {
            channels.push(vec![T::zero(); num_frames.0].into_boxed_slice());
        }
        Self {
            channels,
            num_frames,
        }
    }
}

impl<T: Sample> AudioBuffer<T> for MultiChannelBuffer<T> {
    fn num_channels(&self) -> usize {
        self.channels.len()
    }

    fn num_frames(&self) -> FrameSize {
        self.num_frames
    }

    fn channel(&self, index: usize) -> Option<&[T]> {
        self.channels.get(index).map(|b| &**b)
    }

    fn channel_mut(&mut self, index: usize) -> Option<&mut [T]> {
        self.channels.get_mut(index).map(|b| &mut **b)
    }

    fn clear(&mut self) {
        for channel in self.channels.iter_mut() {
            for sample in channel.iter_mut() {
                *sample = T::zero();
            }
        }
    }
}

/// Non-owning view into a channel-based collection of audio samples
///
/// Useful for zero-copy processing of immutable (input) audio data. Example:
/// fn channel_based_callback<'a>(data: &[&[f32]]) {
///     let buffer_view = MultiChannelBufferView::new(data, FrameSize(data\[0\].len()));
/// }
pub struct MultiChannelBufferView<'a, T: Sample> {
    channels: &'a [&'a [T]],
    num_frames: FrameSize,
}

impl<'a, T: Sample> MultiChannelBufferView<'a, T> {
    /// Construct the view from a slice of sample data slices. Does not allocate.
    pub fn new(channels: &'a [&'a [T]], num_frames: FrameSize) -> Self {
        Self {
            channels,
            num_frames,
        }
    }
}

impl<T: Sample> AudioBuffer<T> for MultiChannelBufferView<'_, T> {
    fn num_channels(&self) -> usize {
        self.channels.len()
    }

    fn num_frames(&self) -> FrameSize {
        self.num_frames
    }

    fn channel(&self, index: usize) -> Option<&[T]> {
        self.channels.get(index).map(|b| &**b)
    }

    /// Always returns None as this is an immutable view type
    fn channel_mut(&mut self, _index: usize) -> Option<&mut [T]> {
        None
    }

    fn clear(&mut self) {}
}

/// Non-owning mutable view into a channel-based collection of audio samples.
///
/// Useful for zero-copy processing of mutable (output) audio data. Example:
/// fn channel_based_callback<'a>(data: &'a mut [&'a mut [f32]]) {
///     let mut mutable_buffer_view = MultiChannelBufferViewMut::new(data, FrameSize(data\[0\].len()));
/// }
pub struct MultiChannelBufferViewMut<'a, T: Sample> {
    channels: &'a mut [&'a mut [T]],
    num_frames: FrameSize,
}

impl<'a, T: Sample> MultiChannelBufferViewMut<'a, T> {
    /// Construct a mutable buffer view from a slice of mutable sample data slices. Does not allocate.
    pub fn new(channels: &'a mut [&'a mut [T]], num_frames: FrameSize) -> Self {
        assert!(!channels.is_empty());
        Self {
            channels,
            num_frames,
        }
    }
}

impl<T: Sample> AudioBuffer<T> for MultiChannelBufferViewMut<'_, T> {
    fn num_channels(&self) -> usize {
        self.channels.len()
    }

    fn num_frames(&self) -> FrameSize {
        self.num_frames
    }

    fn channel(&self, index: usize) -> Option<&[T]> {
        self.channels.get(index).map(|b| &**b)
    }

    fn channel_mut(&mut self, index: usize) -> Option<&mut [T]> {
        self.channels.get_mut(index).map(|b| &mut **b)
    }

    fn clear(&mut self) {
        for channel in &mut *self.channels {
            for sample in channel.iter_mut() {
                *sample = T::zero();
            }
        }
    }
}

/// Immutable AudioBuffer view that remaps channel indices
///
/// TODO: Define and use a Rewire type here and in the rewire graph
pub struct RewiredBufferView<'a, T: Sample> {
    pub buffer: &'a dyn AudioBuffer<T>,
    pub rewire: &'a HashMap<usize, usize>,
}

impl<T: Sample> AudioBuffer<T> for RewiredBufferView<'_, T> {
    fn num_channels(&self) -> usize {
        self.rewire.keys().max().map_or(0, |&max| max + 1)
    }

    fn num_frames(&self) -> FrameSize {
        self.buffer.num_frames()
    }

    fn channel(&self, index: usize) -> Option<&[T]> {
        if let Some(&source_channel) = self.rewire.get(&index) {
            self.buffer.channel(source_channel)
        } else {
            None
        }
    }

    fn channel_mut(&mut self, _index: usize) -> Option<&mut [T]> {
        None
    }

    fn clear(&mut self) {
        panic!("Cannot clear an immutable buffer view");
    }
}