1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
//! Dynamic audio player based on fixed samples stream
//!
//! This crate provides a dynamic audio player that can play audio samples stream coming
//! from an external generating source, such as an emulator.
//!
//! The [`AudioPlayer`] acts as an audio stream player that will play the samples as they come.
//! And will resample the audio if the generated sample rate is not supported by the audio device,
//!
//! # Supported sample types
//! For now, we rely on [`rubato`] crate for resampling, it has the trait [`Sample`] that is implemented for:
//! - [`f32`]
//! - [`f64`]
//!
//! # Example
//!
//! Here's an example of how to use the `AudioPlayer`:
//! ```rust,no_run
//! # use dynwave::{AudioPlayer, BufferSize};
//! // create a buffer, that can hold 1 second worth of samples
//! // (base it depend on how fast you generate samples, less buffer is better for latency)
//! let mut player = AudioPlayer::<f32>::new(44100, BufferSize::OneSecond).unwrap();
//!
//! // Start playing the audio
//! player.play().unwrap();
//!
//! // generate audio samples (can be done in a emulation loop for example)
//! let samples = generate_samples();
//! player.queue(&samples);
//!
//! // pause the audio
//! player.pause().unwrap();
//!
//! # fn generate_samples() -> Vec<f32> {
//! #     vec![0.0; 1]
//! # }
//! ```
pub mod error;

use cpal::{
    traits::{DeviceTrait, HostTrait, StreamTrait},
    SizedSample,
};
use error::{AudioPlayerError, PlayError};
use ringbuf::{HeapProducer, HeapRb};
use rubato::{FftFixedInOut, Resampler, Sample};

struct AudioResampler<T: Sample> {
    resampler: FftFixedInOut<T>,
    pre_resampled_buffer: Vec<T>,
    pre_resampled_split_buffers: [Vec<T>; 2],
    resample_process_buffers: [Vec<T>; 2],
    resampled_buffer: Vec<T>,
}

impl<T: Sample + SizedSample> AudioResampler<T> {
    fn new(input_rate: usize, output_rate: usize) -> Result<Self, AudioPlayerError> {
        let resampler = FftFixedInOut::<T>::new(
            input_rate,
            output_rate,
            // the number of samples for one video frame in 60 FPS
            input_rate / 60,
            2,
        )?;

        Ok(Self {
            resampler,
            pre_resampled_buffer: Vec::new(),
            pre_resampled_split_buffers: [Vec::new(), Vec::new()],
            resample_process_buffers: [Vec::new(), Vec::new()],
            resampled_buffer: Vec::new(),
        })
    }

    fn resample_into_producer(&mut self, data: &[T], producer: &mut HeapProducer<T>) {
        // helper method to split channels into separate vectors
        fn read_frames<T: Copy>(inbuffer: &[T], n_frames: usize, outputs: &mut [Vec<T>]) {
            for output in outputs.iter_mut() {
                output.clear();
                output.reserve(n_frames);
            }
            let mut value: T;
            let mut inbuffer_iter = inbuffer.iter();
            for _ in 0..n_frames {
                for output in outputs.iter_mut() {
                    value = *inbuffer_iter.next().unwrap();
                    output.push(value);
                }
            }
        }

        /// Helper to merge channels into a single vector
        /// the number of channels is the size of `waves` slice
        fn write_frames<T: Copy>(waves: &[Vec<T>], outbuffer: &mut Vec<T>) {
            let nbr = waves[0].len();
            for frame in 0..nbr {
                for wave in waves.iter() {
                    outbuffer.push(wave[frame]);
                }
            }
        }

        self.pre_resampled_buffer.extend_from_slice(data);
        // finish all the frames, as sometimes after appending many data
        // we might get 2 loops worth of unprocessed audio
        loop {
            let frames = self.resampler.input_frames_next();

            if self.pre_resampled_buffer.len() < frames * 2 {
                return;
            }

            // only read the needed frames
            read_frames(
                &self.pre_resampled_buffer,
                frames,
                &mut self.pre_resampled_split_buffers,
            );

            self.resample_process_buffers[0].clear();
            self.resample_process_buffers[0].clear();

            let output_frames = self.resampler.output_frames_next();
            self.resample_process_buffers[0].resize(output_frames, T::EQUILIBRIUM);
            self.resample_process_buffers[1].resize(output_frames, T::EQUILIBRIUM);

            self.resampler
                .process_into_buffer(
                    &self.pre_resampled_split_buffers,
                    &mut self.resample_process_buffers,
                    None,
                )
                .unwrap();

            // resample
            if self.resampled_buffer.len() < output_frames * 2 {
                self.resampled_buffer
                    .reserve(output_frames * 2 - self.resampled_buffer.len());
            }
            self.resampled_buffer.clear();
            write_frames(&self.resample_process_buffers, &mut self.resampled_buffer);

            producer.push_slice(&self.resampled_buffer);

            self.pre_resampled_buffer = self.pre_resampled_buffer.split_off(frames * 2);
        }
    }
}

/// The `BufferSize` enum represents the amount of audio samples that can be stored in the buffer.
/// Limiting the number of samples in the buffer is crucial for minimizing audio delay in audio playing.
///
/// We will use `emulation` as an example to refer to the process of generating audio samples.
///
/// minimizing the buffer size will help minimize audio delay such as audio coming from an emulator.
/// This is due to the fact that emulation speed does not always perfectly
/// match the audio playing speed (e.g., 44100Hz).
///
/// A smaller buffer size can help maintain better synchronization,
/// but it may cause noise or other issues on slower machines.
/// This can occur if the emulation process is slow, or if a CPU-intensive
/// process starts while the emulator is running.
#[derive(Debug, Clone, Copy, Default)]
pub enum BufferSize {
    #[default]
    /// 1/4 second worth of samples
    QuarterSecond,
    /// 1/2 second worth of samples
    HalfSecond,
    /// 1 second worth of samples
    OneSecond,
    /// Number of samples to store
    /// Be careful, here you have to calculate based on the sample rate manually
    Samples(usize),
}

impl BufferSize {
    /// Returns the number of samples in the buffer
    #[inline]
    #[must_use]
    fn store_for_samples(&self, sample_rate: usize, channels: usize) -> usize {
        match self {
            Self::QuarterSecond => sample_rate / 4 * channels,
            Self::HalfSecond => sample_rate / 2 * channels,
            Self::OneSecond => sample_rate * channels,
            Self::Samples(alternative_samples) => *alternative_samples,
        }
    }
}

/// The `AudioPlayer` struct represents an audio player that can play audio samples stream
/// coming from an external generating source, such as an emulator.
///
/// The `AudioPlayer` may resample the audio if the generated sample rate is not supported by the audio device,
/// which may cause a slight performance hit due to the resampling process. If the machine supports the input sample rate,
/// no resampling will be done, and the audio samples will be used as is.
///
/// # Example
///
/// Here's an example of how to use the `AudioPlayer`:
/// ```rust,no_run
/// # use dynwave::{AudioPlayer, BufferSize};
/// // create a buffer, that can hold 1 second worth of samples
/// // (base it depend on how fast you generate samples, less buffer is better for latency)
/// let mut player = AudioPlayer::<f32>::new(44100, BufferSize::OneSecond).unwrap();
///
/// // Start playing the audio
/// player.play().unwrap();
///
/// // generate audio samples (can be done in a emulation loop for example)
/// let samples = generate_samples();
/// player.queue(&samples);
///
/// // pause the audio
/// player.pause().unwrap();
///
/// # fn generate_samples() -> Vec<f32> {
/// #     vec![0.0; 1]
/// # }
/// ```
pub struct AudioPlayer<T: Sample> {
    buffer_producer: HeapProducer<T>,
    resampler: Option<AudioResampler<T>>,
    output_stream: cpal::Stream,
}

impl<T: Sample + SizedSample> AudioPlayer<T> {
    /// Creates a new instance of `AudioPlayer`.
    ///
    /// # Parameters
    /// * `sample_rate`: The sample rate of the audio player in Hz. Common values are `44100` or `48000`.
    /// * `buffer_size`: The size of the buffer that will store the audio samples. See [`BufferSize`] for options.
    ///
    /// # Returns
    /// Might return an `Error` if:
    /// - No output device is found
    /// - The output device does not support dual channel
    /// - Some error happened with the device backend
    /// - Could not create the audio stream
    ///
    /// Check [`AudioPlayerError`] for more information about the possible errors.
    ///
    /// # Example
    ///
    /// ```rust,no_run
    /// # use dynwave::{AudioPlayer, BufferSize};
    /// let sample_rate = 44100;
    /// let buffer_size = BufferSize::HalfSecond;
    /// let player = AudioPlayer::<f32>::new(sample_rate, buffer_size).unwrap();
    /// ```
    ///
    /// This example creates a new `AudioPlayer` with a sample rate of 44100 Hz and a buffer size of half a second.
    pub fn new(sample_rate: u32, buffer_size: BufferSize) -> Result<Self, AudioPlayerError> {
        let host = cpal::default_host();
        let output_device = host
            .default_output_device()
            .ok_or(AudioPlayerError::NoOutputDevice)?;

        let sample_rate = cpal::SampleRate(sample_rate);

        let conf = output_device
            .supported_output_configs()?
            .collect::<Vec<_>>();

        let mut found_conf = false;

        for c in &conf {
            // must have 2 channels and <T> format
            // (almost all? devices will have at least one configuration with these)
            if c.channels() == 2
                && c.sample_format() == T::FORMAT
                && c.min_sample_rate() <= sample_rate
                && c.max_sample_rate() >= sample_rate
            {
                found_conf = true;
                break;
            }
        }

        let (output_sample_rate, resampler) = if found_conf {
            (sample_rate, None)
        } else {
            let def_conf = output_device.default_output_config()?;

            if def_conf.channels() != 2 || def_conf.sample_format() != T::FORMAT {
                eprintln!("No supported configuration found for audio device, please open an issue in github `Amjad50/dynwave`\n\
                      list of supported configurations: {:#?}", conf);
                return Err(AudioPlayerError::DualChannelNotSupported);
            }

            (
                def_conf.sample_rate(),
                Some(AudioResampler::new(
                    sample_rate.0 as usize,
                    def_conf.sample_rate().0 as usize,
                )?),
            )
        };

        let config = cpal::StreamConfig {
            channels: 2,
            sample_rate: output_sample_rate,
            buffer_size: cpal::BufferSize::Default,
        };

        let ring_buffer_len = buffer_size.store_for_samples(output_sample_rate.0 as usize, 2);
        let buffer = HeapRb::new(ring_buffer_len);
        let (buffer_producer, mut buffer_consumer) = buffer.split();

        let output_data_fn = move |data: &mut [T], _: &cpal::OutputCallbackInfo| {
            for sample in data {
                *sample = buffer_consumer.pop().unwrap_or(T::EQUILIBRIUM);
            }
        };

        let output_stream =
            output_device.build_output_stream(&config, output_data_fn, Self::err_fn, None)?;

        Ok(Self {
            buffer_producer,
            output_stream,
            resampler,
        })
    }

    /// Start the player
    ///
    /// If the player is playing and if the buffer is emptied (played until finished without adding more data), popping sound might be heard.
    ///
    /// Might return an `Error` if:
    /// - The device associated with the stream is no longer available
    /// - Some error happened with the device backend
    ///
    /// Check [`PlayError`] for more information about the possible errors.
    pub fn play(&self) -> Result<(), PlayError> {
        self.output_stream.play().map_err(|e| e.into())
    }

    /// Pause the player
    ///
    /// Might return an `Error` if:
    /// - The device associated with the stream is no longer available
    /// - Some error happened with the device backend
    ///
    /// Check [`PlayError`] for more information about the possible errors.
    pub fn pause(&self) -> Result<(), PlayError> {
        self.output_stream.pause().map_err(|e| e.into())
    }

    /// Queues audio samples to be played.
    ///
    /// The `queue` function takes a slice of audio samples and adds them to the buffer. If a `resampler` is present,
    /// it resamples the audio data before adding it to the buffer.
    ///
    /// If the buffer is full, the function will drop the audio samples that don't fit in the buffer and won't block.
    ///
    /// If the player is playing, the audio samples will be played immediately, and if the buffer is emptied, popping sound might be heard.
    ///
    /// # Parameters
    /// * `data`: A slice of audio samples to be played.
    ///
    /// # Example
    /// ```rust,no_run
    /// # use dynwave::{AudioPlayer, BufferSize};
    /// let sample_rate = 44100;
    /// let buffer_size = BufferSize::HalfSecond;
    /// let mut player = AudioPlayer::new(sample_rate, buffer_size).unwrap();
    /// let samples = vec![0.5, 0.7, 0.9, 1.0, 0.9, 0.7, 0.5, 0.3, 0.1];
    /// player.queue(&samples);
    /// ```
    /// This example creates a new `AudioPlayer` with a sample rate of 44100 Hz and a buffer size of half a second, queues some audio samples, and then starts playing the audio.
    pub fn queue(&mut self, data: &[T]) {
        if let Some(resampler) = &mut self.resampler {
            resampler.resample_into_producer(data, &mut self.buffer_producer);
        } else {
            // no resampling
            self.buffer_producer.push_slice(data);
        }
    }

    fn err_fn(err: cpal::StreamError) {
        eprintln!("an error occurred on audio stream: {}", err);
    }
}