spectrusty_core/audio.rs
1/*
2 Copyright (C) 2020-2022 Rafal Michalski
3
4 This file is part of SPECTRUSTY, a Rust library for building emulators.
5
6 For the full copyright notice, see the lib.rs file.
7*/
8//! # Audio API.
9mod sample;
10
11use core::ops::{Deref, DerefMut};
12use core::marker::PhantomData;
13
14use crate::clock::{VFrameTs, VideoTs};
15use crate::video::VideoFrame;
16pub use sample::{
17 AudioSample,
18 FromSample,
19 IntoSample,
20 SampleDelta,
21 MulNorm
22};
23pub use crate::clock::FTs;
24
25/// A trait for interfacing Bandwidth-Limited Pulse Buffer implementations by square-wave audio generators.
26///
27/// The perfect square wave can be represented as an infinite sum of sinusoidal waves. The problem is
28/// that the frequency of those waves tends to infinity. The digitalization of sound is limited by the
29/// finite sample frequency and the maximum frequency that can be sampled is called the [Nyquist frequency].
30///
31/// When sampling of a square wave is naively implemented it produces a perceptible, unpleasant aliasing noise.
32///
33/// Square waves that sound "clear" should be constructed from a limited number of sinusoidal waves, but the
34/// computation of such a wave could be costly.
35///
36/// However thanks to the [Hard Sync] technique it is not necessary. Instead, a precomputed pattern is being
37/// applied to each "pulse step". The implementation of this is called the Bandwidth-Limited Pulse Buffer
38/// in short `Blimp` or `Blep`.
39///
40/// The audio stream is being produced in frames by the `Blep` implementation. First, the pulse steps are
41/// being added, then the frame is being finalized and after that, the audio samples can be generated from
42/// the collected pulses.
43///
44/// The way audio samples are being generated is outside of the scope of the `Blep` implementation. This trait
45/// only defines an interface for adding pulse steps and finalizing frames.
46///
47/// [Nyquist frequency]: https://en.wikipedia.org/wiki/Nyquist_frequency
48/// [Hard Sync]: https://www.cs.cmu.edu/~eli/papers/icmc01-hardsync.pdf
49pub trait Blep {
50 /// A type for sample ∆ amplitudes (pulse height).
51 type SampleDelta: SampleDelta;
52 /// This method allows the `Blep` implementation to reserve enough memory for the audio
53 /// frame with an additional margin and to set up a sample time rate and other internals.
54 ///
55 /// This method should not be called again unless any of the provided parameters changes.
56 ///
57 /// * `sample_rate` is a number of output audio samples per second.
58 /// * `ts_rate` is a number of time units per second which are being used as input time stamps.
59 /// * `frame_ts` is a duration of a single frame measured in time units specified with `ts_rate`.
60 /// * `margin_ts` specifies a largest required margin in time units for frame duration fluctuations.
61 ///
62 /// Each frame's duration may fluctuate randomly as long as it's not constantly growing nor shrinking
63 /// and on average equals to `frame_ts`.
64 ///
65 /// Specifically `frame_ts` + `margin_ts` - `frame start` specifies the largest value of a time stamp
66 /// that can be passed to [Blep::add_step] or [Blep::end_frame].
67 /// `frame_ts` - `margin_ts` - `frame start` specifies the smallest value of a time stamp
68 /// that can be passed to [Blep::end_frame].
69 ///
70 /// The smallest possible time stamp value that can be passed to [Blep::add_step] is a `frame start`
71 /// time stamp. It starts at 0 after calling this method and is modified by the `timestamp` value passed
72 /// to the last [Blep::end_frame] to `timestamp` - `frame_ts`. In other words, the next frame starts
73 /// when the previous ends minus frame duration.
74 fn ensure_frame_time(&mut self, sample_rate: u32, ts_rate: f64, frame_ts: FTs, margin_ts: FTs);
75 /// This method is being used to add square-wave pulse steps within a boundary of a single frame.
76 ///
77 // * `channel` specifies an output audio channel.
78 /// * `timestamp` specifies the time stamp of the pulse.
79 /// * `delta` specifies the pulse height (∆ amplitude).
80 ///
81 /// The implementation may panic if `timestamp` boundary limits are not uphold.
82 fn add_step(&mut self, channel: usize, timestamp: FTs, delta: Self::SampleDelta);
83 /// Finalizes audio frame.
84 ///
85 /// Some frames can end little late or earlier and this method should allow for such a flexibility.
86 ///
87 /// `timestamp` specifies when the frame should be finalized marking the timestamp of the next frame.
88 ///
89 /// Returns the number of samples that will be produced, single channel wise.
90 ///
91 /// The caller must ensure that no pulse step should be generated with a time stamp past `timstamp`
92 /// given here.
93 ///
94 /// The implementation may panic if this requirement is not uphold.
95 fn end_frame(&mut self, timestamp: FTs) -> usize;
96}
97
98/// A wrapper [Blep] implementation that filters pulses' ∆ amplitude before sending them to the
99/// underlying implementation.
100///
101/// `BlepAmpFilter` may be used to adjust generated audio volume dynamically.
102///
103/// *NOTE*: To emulate a linear volume control, `filter` value should be scaled
104/// [logarithmically](https://www.dr-lex.be/info-stuff/volumecontrols.html).
105pub struct BlepAmpFilter<B: Blep> {
106 /// A normalized filter value in the range `[0.0, 1.0]` (floats) or `[0, int::max_value()]` (integers).
107 pub filter: B::SampleDelta,
108 /// A downstream [Blep] implementation.
109 pub blep: B,
110}
111/// A wrapper [Blep] implementation that redirects extra channels to a stereo [Blep]
112/// as a monophonic channel.
113///
114/// Requires a downstream [Blep] implementation that provides at least 2 audio channels.
115/// ```text
116/// BlepStereo channel Blep impl channel
117/// 0 -----------------------> 0
118/// 1 -----------------------> 1
119/// >= 2 ---- * mono_filter ----> 0
120/// \---> 1
121/// ```
122pub struct BlepStereo<B: Blep> {
123 /// A monophonic filter value in the range [0.0, 1.0] (floats) or [0, int::max_value()] (integers).
124 pub mono_filter: B::SampleDelta,
125 /// A downstream [Blep] implementation.
126 pub blep: B,
127}
128
129/// A digital level to a sample amplitude conversion trait.
130pub trait AmpLevels<T: Copy> {
131 /// This method should return the appropriate digital sample amplitude for the given `level`.
132 ///
133 /// The best approximation is `a*(level/max_level*b).exp()` according to
134 /// [this document](https://www.dr-lex.be/info-stuff/volumecontrols.html).
135 ///
136 /// *Please note* that most callbacks use only a limited number of bits in the `level`.
137 fn amp_level(level: u32) -> T;
138}
139
140/// A common trait for controllers rendering square-wave audio pulses.
141///
142/// This trait defines common methods to interface [Blep] implementations.
143pub trait AudioFrame<B: Blep> {
144 /// Sets up the [Blep] time rate and ensures there is enough room for the single frame's audio data.
145 ///
146 /// * `sample_rate` is the number of samples per second of the rendered audio,
147 /// * `cpu_hz` is the number of the emulated CPU cycles (T-states) per second.
148 fn ensure_audio_frame_time(&self, blep: &mut B, sample_rate: u32, cpu_hz: f64);
149 /// Returns a timestamp to be passed to [Blep] to end the frame.
150 ///
151 /// # Panics
152 /// Panics if the current frame execution didn't get to the near of end-of-frame.
153 /// To check if you can actually call this method, invoke [FrameState::is_frame_over][crate::chip::FrameState::is_frame_over].
154 fn get_audio_frame_end_time(&self) -> FTs;
155 /// Calls [Blep::end_frame] to finalize the frame and prepare it for rendition.
156 ///
157 /// Returns a number of samples ready to be rendered in a single channel.
158 ///
159 /// # Panics
160 /// Panics if the current frame execution didn't get to the near of end-of-frame.
161 /// To check if you can actually call this method, invoke [FrameState::is_frame_over][crate::chip::FrameState::is_frame_over].
162 #[inline]
163 fn end_audio_frame(&self, blep: &mut B) -> usize {
164 blep.end_frame(self.get_audio_frame_end_time())
165 }
166}
167
168/// A trait for controllers generating audio pulses from the EAR/MIC output.
169pub trait EarMicOutAudioFrame<B: Blep> {
170 /// Renders EAR/MIC output as square-wave pulses via [Blep] interface.
171 ///
172 /// Provide [AmpLevels] that can handle `level` values from 0 to 3 (2-bits).
173 /// ```text
174 /// EAR MIC level
175 /// 0 0 0
176 /// 0 1 1
177 /// 1 0 2
178 /// 1 1 3
179 ///```
180 /// `channel` - target [Blep] audio channel.
181 fn render_earmic_out_audio_frame<V: AmpLevels<B::SampleDelta>>(&self, blep: &mut B, channel: usize);
182}
183
184/// A trait for controllers generating audio pulses from the EAR input.
185pub trait EarInAudioFrame<B: Blep> {
186 /// Renders EAR input as square-wave pulses via [Blep] interface.
187 ///
188 /// Provide [AmpLevels] that can handle `level` values from 0 to 1 (1-bit).
189 /// `channel` - target [Blep] audio channel.
190 fn render_ear_in_audio_frame<V: AmpLevels<B::SampleDelta>>(&self, blep: &mut B, channel: usize);
191}
192
193/*
194Value output to bit: 4 3 | Iss 2 Iss 3 Iss 2 V Iss 3 V
195 1 1 | 1 1 3.79 3.70
196 1 0 | 1 1 3.66 3.56
197 0 1 | 1 0 0.73 0.66
198 0 0 | 0 0 0.39 0.34
199*/
200pub const AMPS_EAR_MIC: [f32; 4] = [0.34/3.70, 0.66/3.70, 3.56/3.70, 1.0];
201pub const AMPS_EAR_OUT: [f32; 4] = [0.34/3.70, 0.34/3.70, 1.0, 1.0];
202pub const AMPS_EAR_IN: [f32; 2] = [0.34/3.70, 0.66/3.70];
203
204pub const AMPS_EAR_MIC_I32: [i32; 4] = [0x0bc3_1d10, 0x16d5_1a60, 0x7b28_20ff, 0x7fff_ffff];
205pub const AMPS_EAR_OUT_I32: [i32; 4] = [0x0bc3_1d10, 0x0bc3_1d10, 0x7fff_ffff, 0x7fff_ffff];
206pub const AMPS_EAR_IN_I32: [i32; 2] = [0x0bc3_1d10, 0x16d5_1a60];
207
208pub const AMPS_EAR_MIC_I16: [i16; 4] = [0x0bc3, 0x16d5, 0x7b27, 0x7fff];
209pub const AMPS_EAR_OUT_I16: [i16; 4] = [0x0bc3, 0x0bc3, 0x7fff, 0x7fff];
210pub const AMPS_EAR_IN_I16: [i16; 2] = [0x0bc3, 0x16d5];
211
212/// Implements [AmpLevels] trait, useful when rendering combined EAR OUT and MIC OUT audio signal.
213///
214/// Uses 2 lowest bits of a given `level`.
215#[derive(Clone, Default, Debug)]
216pub struct EarMicAmps4<T>(PhantomData<T>);
217/// Implements [AmpLevels] trait, useful when rendering EAR OUT audio ignoring MIC OUT signal.
218///
219/// Uses 2 lowest bits of a given `level`, but ignores the lowest bit.
220#[derive(Clone, Default, Debug)]
221pub struct EarOutAmps4<T>(PhantomData<T>);
222/// Implements [AmpLevels] trait, useful when rendering EAR IN audio.
223///
224/// Uses only one bit of a given `level`.
225#[derive(Clone, Default, Debug)]
226pub struct EarInAmps2<T>(PhantomData<T>);
227
228macro_rules! impl_amp_levels {
229 ($([$ty:ty, $ear_mic:ident, $ear_out:ident, $ear_in:ident]),*) => { $(
230 impl AmpLevels<$ty> for EarMicAmps4<$ty> {
231 #[inline(always)]
232 fn amp_level(level: u32) -> $ty {
233 $ear_mic[(level & 3) as usize]
234 }
235 }
236
237 impl AmpLevels<$ty> for EarOutAmps4<$ty> {
238 #[inline(always)]
239 fn amp_level(level: u32) -> $ty {
240 $ear_out[(level & 3) as usize]
241 }
242 }
243
244 impl AmpLevels<$ty> for EarInAmps2<$ty> {
245 #[inline(always)]
246 fn amp_level(level: u32) -> $ty {
247 $ear_in[(level & 1) as usize]
248 }
249 }
250 )* };
251}
252impl_amp_levels!([f32, AMPS_EAR_MIC, AMPS_EAR_OUT, AMPS_EAR_IN],
253 [i32, AMPS_EAR_MIC_I32, AMPS_EAR_OUT_I32, AMPS_EAR_IN_I32],
254 [i16, AMPS_EAR_MIC_I16, AMPS_EAR_OUT_I16, AMPS_EAR_IN_I16]);
255
256impl<B: Blep> BlepAmpFilter<B> {
257 pub fn build(filter: B::SampleDelta) -> impl FnOnce(B) -> Self
258 {
259 move |blep| Self::new(filter, blep)
260 }
261
262 pub fn new(filter: B::SampleDelta, blep: B) -> Self {
263 BlepAmpFilter { blep, filter }
264 }
265}
266
267impl<B: Blep> BlepStereo<B> {
268 pub fn build(mono_filter: B::SampleDelta) -> impl FnOnce(B) -> Self {
269 move |blep| Self::new(mono_filter, blep)
270 }
271
272 pub fn new(mono_filter: B::SampleDelta, blep: B) -> Self {
273 BlepStereo { blep, mono_filter }
274 }
275}
276
277impl<B: Blep> Deref for BlepAmpFilter<B> {
278 type Target = B;
279 fn deref(&self) -> &B {
280 &self.blep
281 }
282}
283
284impl<B: Blep> DerefMut for BlepAmpFilter<B> {
285 fn deref_mut(&mut self) -> &mut B {
286 &mut self.blep
287 }
288}
289
290impl<B: Blep> Deref for BlepStereo<B> {
291 type Target = B;
292 fn deref(&self) -> &B {
293 &self.blep
294 }
295}
296
297impl<B: Blep> DerefMut for BlepStereo<B> {
298 fn deref_mut(&mut self) -> &mut B {
299 &mut self.blep
300 }
301}
302
303impl<B> Blep for BlepAmpFilter<B>
304 where B: Blep, B::SampleDelta: MulNorm + SampleDelta
305{
306 type SampleDelta = B::SampleDelta;
307
308 #[inline]
309 fn ensure_frame_time(&mut self, sample_rate: u32, ts_rate: f64, frame_ts: FTs, margin_ts: FTs) {
310 self.blep.ensure_frame_time(sample_rate, ts_rate, frame_ts, margin_ts)
311 }
312 #[inline]
313 fn end_frame(&mut self, timestamp: FTs) -> usize {
314 self.blep.end_frame(timestamp)
315 }
316 #[inline]
317 fn add_step(&mut self, channel: usize, timestamp: FTs, delta: Self::SampleDelta) {
318 self.blep.add_step(channel, timestamp, delta.mul_norm(self.filter))
319 }
320}
321
322impl<B> Blep for BlepStereo<B>
323 where B: Blep, B::SampleDelta: MulNorm + SampleDelta
324{
325 type SampleDelta = B::SampleDelta;
326 #[inline]
327
328 fn ensure_frame_time(&mut self, sample_rate: u32, ts_rate: f64, frame_ts: FTs, margin_ts: FTs) {
329 self.blep.ensure_frame_time(sample_rate, ts_rate, frame_ts, margin_ts)
330 }
331 #[inline]
332 fn end_frame(&mut self, timestamp: FTs) -> usize {
333 self.blep.end_frame(timestamp)
334 }
335 #[inline]
336 fn add_step(&mut self, channel: usize, timestamp: FTs, delta: B::SampleDelta) {
337 match channel {
338 0|1 => self.blep.add_step(channel, timestamp, delta),
339 _ => {
340 let delta = delta.mul_norm(self.mono_filter);
341 self.blep.add_step(0, timestamp, delta);
342 self.blep.add_step(1, timestamp, delta);
343 }
344 }
345 }
346}
347
348/// A helper method for rendering square-wave audio from slices containing updates of audio
349/// digital levels, sorted by time encoded in [VideoTs] time stamps.
350pub fn render_audio_frame_vts<VF,VL,L,A,T>(
351 prev_state: u8,
352 end_ts: Option<VFrameTs<VF>>,
353 changes: &[T],
354 blep: &mut A, channel: usize
355 )
356 where VF: VideoFrame,
357 VL: AmpLevels<L>,
358 L: SampleDelta,
359 A: Blep<SampleDelta=L>,
360 T: Copy, (VideoTs, u8): From<T>,
361{
362 let mut last_vol = VL::amp_level(prev_state.into());
363 for &tsd in changes.iter() {
364 let (ts, state) = tsd.into();
365 let vts: VFrameTs<_> = ts.into();
366 if let Some(end_ts) = end_ts {
367 if vts >= end_ts { // TODO >= or >
368 break
369 }
370 }
371 let next_vol = VL::amp_level(state.into());
372 if let Some(delta) = last_vol.sample_delta(next_vol) {
373 let timestamp = vts.into_tstates();
374 blep.add_step(channel, timestamp, delta);
375 last_vol = next_vol;
376 }
377 }
378}
379
380/// A helper method for rendering square-wave audio from slices containing updates of audio
381/// digital levels, sorted by T-state counter value.
382pub fn render_audio_frame_ts<VL,L,A,T>(
383 prev_state: u8,
384 end_ts: Option<FTs>,
385 changes: &[T],
386 blep: &mut A,
387 channel: usize
388 )
389 where VL: AmpLevels<L>,
390 L: SampleDelta,
391 A: Blep<SampleDelta=L>,
392 T: Copy, (FTs, u8): From<T>,
393{
394 let mut last_vol = VL::amp_level(prev_state.into());
395 for &tsd in changes.iter() {
396 let (ts, state) = tsd.into();
397 if let Some(end_ts) = end_ts {
398 if ts >= end_ts { // TODO >= or >
399 break
400 }
401 }
402 // print!("{}:{} ", state, ts);
403 let next_vol = VL::amp_level(state.into());
404 if let Some(delta) = last_vol.sample_delta(next_vol) {
405 blep.add_step(channel, ts, delta);
406 last_vol = next_vol;
407 }
408 }
409}