1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
// Augmented Audio: Audio libraries and applications
// Copyright (c) 2022 Pedro Tacla Yamada
//
// The MIT License (MIT)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//! Provides abstractions for implementing:
//!
//! * Audio processing nodes
//! * MIDI processing nodes
//! * Audio buffers
//!
//! An audio processor implemented with these traits may work with multiple sample types, audio
//! buffer types and audio processing back-ends.
//!
//! Start looking at [AudioProcessor], then have a look at [AudioBuffer] and [MidiEventHandler].
//!
//! # Running the audio processors as CLIs, GUIs, VSTs or CPAL threads
//! See [`audio_processor_standalone`]
//!
//! # Running a graph of audio processors
//! See [`audio_processor_graph`]
//!
//! # AudioProcessor implementations
//! See: <https://github.com/yamadapc/augmented-audio/>
//!
use std::marker::PhantomData;

pub use num;
pub use num::Float;
pub use num::Zero;

pub use atomic_float::{AtomicF32, AtomicF64};
pub use audio_buffer::{AudioBuffer, InterleavedAudioBuffer, OwnedAudioBuffer, VecAudioBuffer};
pub use midi::{MidiEventHandler, MidiMessageLike, NoopMidiEventHandler};
pub use simple_processor::{BufferProcessor, SimpleAudioProcessor};

/// Atomic F32 implementation with `num` trait implementations
pub mod atomic_float;
/// Provides an abstraction for audio buffers that works for [`cpal`] and [`vst`] layouts
pub mod audio_buffer;
/// Provides an abstraction for MIDI processing that works for stand-alone and [`vst`] events
pub mod midi;
/// Parameters for [`AudioProcessor`]
pub mod parameters;
/// Simpler audio processor trait, ingesting sample by sample
pub mod simple_processor;

/// Options provided to the audio-processor before calling `process`.
#[derive(Clone, PartialEq, Debug, Copy)]
pub struct AudioProcessorSettings {
    pub sample_rate: f32,
    pub input_channels: usize,
    pub output_channels: usize,
    pub block_size: usize,
}

impl Default for AudioProcessorSettings {
    fn default() -> Self {
        Self::new(44100.0, 2, 2, 512)
    }
}

impl AudioProcessorSettings {
    pub fn new(
        sample_rate: f32,
        input_channels: usize,
        output_channels: usize,
        block_size: usize,
    ) -> Self {
        AudioProcessorSettings {
            sample_rate,
            input_channels,
            output_channels,
            block_size,
        }
    }

    /// The sample rate in samples/second as a floating point number
    pub fn sample_rate(&self) -> f32 {
        self.sample_rate
    }

    /// The number of input channels
    pub fn input_channels(&self) -> usize {
        self.input_channels
    }

    /// The number of output channels
    pub fn output_channels(&self) -> usize {
        self.output_channels
    }

    /// The number of samples which will be provided on each `process` call
    pub fn block_size(&self) -> usize {
        self.block_size
    }
}

impl AudioProcessorSettings {
    pub fn set_sample_rate(&mut self, sample_rate: f32) {
        self.sample_rate = sample_rate;
    }

    pub fn set_input_channels(&mut self, input_channels: usize) {
        self.input_channels = input_channels;
    }

    pub fn set_output_channels(&mut self, output_channels: usize) {
        self.output_channels = output_channels;
    }

    pub fn set_block_size(&mut self, block_size: usize) {
        self.block_size = block_size;
    }
}

/// Represents an audio processing node.
///
/// Implementors should define the SampleType the node will work over. See some [examples here](https://github.com/yamadapc/augmented-audio/tree/master/crates/augmented/application/audio-processor-standalone/examples).
pub trait AudioProcessor {
    type SampleType;

    /// Prepare for playback based on current audio settings
    fn prepare(&mut self, _settings: AudioProcessorSettings) {}

    /// Process a block of samples by mutating the input `AudioBuffer`
    fn process<BufferType: AudioBuffer<SampleType = Self::SampleType>>(
        &mut self,
        data: &mut BufferType,
    );
}

/// Auto-implemented object version of the audio-processor trait.
///
/// Given a known buffer-type, audio-processors can be made into objects using this type.
pub trait SliceAudioProcessor {
    fn prepare_slice(&mut self, _settings: AudioProcessorSettings) {}
    fn process_slice(&mut self, num_channels: usize, data: &mut [f32]);
}

impl<Processor> SliceAudioProcessor for Processor
where
    Processor: AudioProcessor<SampleType = f32>,
{
    fn prepare_slice(&mut self, settings: AudioProcessorSettings) {
        <Processor as AudioProcessor>::prepare(self, settings);
    }

    fn process_slice(&mut self, num_channels: usize, data: &mut [f32]) {
        let mut buffer = InterleavedAudioBuffer::new(num_channels, data);
        <Processor as AudioProcessor>::process(self, &mut buffer);
    }
}

/// Auto-implemented object version of the audio-processor trait.
///
/// Given a known buffer-type, audio-processors can be made into objects using this type.
pub trait ObjectAudioProcessor<BufferType> {
    fn prepare_obj(&mut self, _settings: AudioProcessorSettings) {}
    fn process_obj(&mut self, data: &mut BufferType);
}

impl<SampleType, BufferType, Processor> ObjectAudioProcessor<BufferType> for Processor
where
    SampleType: Float + Send,
    BufferType: AudioBuffer<SampleType = SampleType>,
    Processor: AudioProcessor<SampleType = SampleType>,
{
    fn prepare_obj(&mut self, settings: AudioProcessorSettings) {
        <Processor as AudioProcessor>::prepare(self, settings);
    }

    fn process_obj(&mut self, data: &mut BufferType) {
        <Processor as AudioProcessor>::process(self, data);
    }
}

/// An audio-processor which doesn't do any work.
pub struct NoopAudioProcessor<SampleType>(PhantomData<SampleType>);

impl<SampleType> Default for NoopAudioProcessor<SampleType> {
    fn default() -> Self {
        Self::new()
    }
}

impl<SampleType> NoopAudioProcessor<SampleType> {
    pub fn new() -> Self {
        NoopAudioProcessor(PhantomData::default())
    }
}

impl<SampleType: Send + Copy> SimpleAudioProcessor for NoopAudioProcessor<SampleType> {
    type SampleType = SampleType;
    fn s_process_frame(&mut self, _frame: &mut [SampleType]) {}
}

/// An audio-processor which mutes all channels.
pub struct SilenceAudioProcessor<SampleType>(PhantomData<SampleType>);

impl<SampleType> SilenceAudioProcessor<SampleType> {
    pub fn new() -> Self {
        SilenceAudioProcessor(PhantomData)
    }
}

impl<SampleType> Default for SilenceAudioProcessor<SampleType> {
    fn default() -> Self {
        Self::new()
    }
}

impl<SampleType: Float + Send> AudioProcessor for SilenceAudioProcessor<SampleType> {
    type SampleType = SampleType;

    fn process<BufferType: AudioBuffer<SampleType = Self::SampleType>>(
        &mut self,
        output: &mut BufferType,
    ) {
        for sample in output.slice_mut() {
            *sample = SampleType::zero();
        }
    }
}