coreaudio/audio_unit/stream_format.rs
1//! A rustification of the `AudioStreamBasicDescription` type.
2//!
3//! Find the original `AudioStreamBasicDescription` reference [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Reference/CoreAudioDataTypesRef/#//apple_ref/c/tdef/AudioStreamBasicDescription).
4
5use objc2_core_audio_types::AudioStreamBasicDescription;
6
7use super::audio_format::AudioFormat;
8use super::audio_format::LinearPcmFlags;
9use super::SampleFormat;
10use crate::error::{self, Error};
11
12/// A representation of the AudioStreamBasicDescription specifically for use with the AudioUnit API.
13///
14/// By using a type specific to the audio unit API, we can remove a lot of unnecessary boilerplate
15/// that is normally associated with the AudioStreamBasicDescription.
16///
17/// Seeing as `LinearPCM` data (the `AudioFormat` used by the `AudioUnit` API) implies a single
18/// frame per packet, we can infer many of the fields in an ASBD from the sample type.
19///
20/// `bytes_per_packet = size_of::<S>()`
21/// `bytes_per_frame = size_of::<S>()`
22/// `frames_per_packet` = 1
23/// `bits_per_channel = size_of::<S>()` / channels_per_frame * 8
24///
25/// > A *packet* is a collection of one or more contiguous frames. In linear PCM audio, a packet is
26/// > always a single frame.
27///
28/// [from *Core Audio Overview*](https://developer.apple.com/library/ios/documentation/MusicAudio/Conceptual/CoreAudioOverview/WhatisCoreAudio/WhatisCoreAudio.html)
29///
30/// > The canonical formats in Core Audio are as follows:
31/// >
32/// > - iOS input and output: Linear PCM with 16-bit integer samples.
33/// > - iOS audio units and other audio processing: Noninterleaved linear PCM with 8.24-bit
34/// > fixed-point samples
35/// > - Mac input and output: Linear PCM with 32-bit floating point samples.
36/// > - Mac audio units and other audio processing: Noninterleaved linear PCM with 32-bit floating
37/// > point samples.
38#[derive(Copy, Clone, Debug)]
39pub struct StreamFormat {
40 /// The number of frames of audio data per second used to represent a signal.
41 pub sample_rate: f64,
42 /// The sample format used to represent the audio data.
43 ///
44 /// In OS X, Core Audio expects audio data to be in native-endian, 32-bit floating-point,
45 /// linear PCM format.
46 ///
47 /// iOS uses integer and fixed-point audio data. The result is faster calculations and less
48 /// battery drain when processing audio. iOS provides a Converter audio unit and inclues the
49 /// interfaces from Audio Converter Services (TODO: look into exposing this).
50 pub sample_format: SampleFormat,
51 /// The format flags for the given StreamFormat.
52 pub flags: super::audio_format::LinearPcmFlags,
53 /// The number of channels.
54 pub channels: u32,
55}
56
57impl StreamFormat {
58 /// Convert an AudioStreamBasicDescription into a StreamFormat.
59 ///
60 /// Note: `audio_unit::StreamFormat` exclusively uses the `LinearPCM` `AudioFormat`. This is as
61 /// specified in the documentation:
62 ///
63 /// > Specify kAudioFormatLinearPCM for the mFormatID field. Audio units use uncompressed audio
64 /// > data, so this is the correct format identifier to use whenever you work with audio units.
65 ///
66 /// [*Audio Unit Hosting Guide for iOS*](https://developer.apple.com/library/ios/documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/AudioUnitHostingFundamentals/AudioUnitHostingFundamentals.html)
67 ///
68 /// Returns an `Error` if the `AudioFormat` inferred by the ASBD is not `LinearPCM`.
69 ///
70 /// Returns an `Error` if the sample format of the asbd cannot be matched to a format supported by SampleFormat.
71 #[allow(non_snake_case)]
72 pub fn from_asbd(asbd: AudioStreamBasicDescription) -> Result<StreamFormat, Error> {
73 const NOT_SUPPORTED: Error = Error::AudioUnit(error::audio_unit::Error::FormatNotSupported);
74
75 let AudioStreamBasicDescription {
76 mSampleRate,
77 mFormatID,
78 mFormatFlags,
79 mBytesPerFrame: _,
80 mChannelsPerFrame,
81 mBitsPerChannel,
82 ..
83 } = asbd;
84
85 // Retrieve the LinearPCM flags.
86 let flags = match AudioFormat::from_format_and_flag(mFormatID, Some(mFormatFlags)) {
87 Some(AudioFormat::LinearPCM(flags)) => flags,
88 _ => return Err(NOT_SUPPORTED),
89 };
90
91 // Determine the `SampleFormat` to use.
92 let sample_format =
93 match SampleFormat::from_flags_and_bits_per_sample(flags, mBitsPerChannel) {
94 Some(sample_format) => sample_format,
95 None => return Err(NOT_SUPPORTED),
96 };
97 let channels = mChannelsPerFrame;
98 Ok(StreamFormat {
99 sample_rate: mSampleRate,
100 flags,
101 sample_format,
102 channels,
103 })
104 }
105
106 /// Convert a StreamFormat into an AudioStreamBasicDescription.
107 /// Note that this function assumes that only packed formats are used.
108 /// This only affects I24, since all other formats supported by `StreamFormat`
109 /// are always packed.
110 pub fn to_asbd(self) -> AudioStreamBasicDescription {
111 let StreamFormat {
112 sample_rate,
113 flags,
114 sample_format,
115 channels,
116 } = self;
117
118 let (format, maybe_flag) =
119 AudioFormat::LinearPCM(flags | LinearPcmFlags::IS_PACKED).as_format_and_flag();
120
121 let flag = maybe_flag.unwrap_or(u32::MAX - 2147483647);
122
123 let non_interleaved = flags.contains(LinearPcmFlags::IS_NON_INTERLEAVED);
124 let bytes_per_frame = if non_interleaved {
125 sample_format.size_in_bytes() as u32
126 } else {
127 sample_format.size_in_bytes() as u32 * channels
128 };
129 const FRAMES_PER_PACKET: u32 = 1;
130 let bytes_per_packet = bytes_per_frame * FRAMES_PER_PACKET;
131 let bits_per_channel = sample_format.size_in_bits();
132
133 AudioStreamBasicDescription {
134 mSampleRate: sample_rate,
135 mFormatID: format,
136 mFormatFlags: flag,
137 mBytesPerPacket: bytes_per_packet,
138 mFramesPerPacket: FRAMES_PER_PACKET,
139 mBytesPerFrame: bytes_per_frame,
140 mChannelsPerFrame: channels,
141 mBitsPerChannel: bits_per_channel,
142 mReserved: 0,
143 }
144 }
145}