objc2_audio_toolbox/generated/
AudioQueue.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::cell::UnsafeCell;
4use core::ffi::*;
5use core::marker::{PhantomData, PhantomPinned};
6use core::ptr::NonNull;
7#[cfg(feature = "dispatch2")]
8use dispatch2::*;
9use objc2::__framework_prelude::*;
10#[cfg(feature = "objc2-core-audio-types")]
11use objc2_core_audio_types::*;
12#[cfg(feature = "objc2-core-foundation")]
13use objc2_core_foundation::*;
14
15use crate::*;
16
17/// A value that uniquely identifies an audio queue property.
18///
19/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuepropertyid?language=objc)
20pub type AudioQueuePropertyID = u32;
21
22/// A value that uniquely identifies an audio queue parameter.
23///
24/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueparameterid?language=objc)
25pub type AudioQueueParameterID = u32;
26
27/// A value for an audio queue parameter.
28///
29/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueparametervalue?language=objc)
30pub type AudioQueueParameterValue = f32;
31
32/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/opaqueaudioqueue?language=objc)
33#[repr(C)]
34#[derive(Debug)]
35pub struct OpaqueAudioQueue {
36    inner: [u8; 0],
37    _p: UnsafeCell<PhantomData<(*const UnsafeCell<()>, PhantomPinned)>>,
38}
39
40unsafe impl RefEncode for OpaqueAudioQueue {
41    const ENCODING_REF: Encoding = Encoding::Pointer(&Encoding::Struct("OpaqueAudioQueue", &[]));
42}
43
44/// An opaque data type that represents an audio queue.
45///
46/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueref?language=objc)
47pub type AudioQueueRef = *mut OpaqueAudioQueue;
48
49/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/opaqueaudioqueuetimeline?language=objc)
50#[repr(C)]
51#[derive(Debug)]
52pub struct OpaqueAudioQueueTimeline {
53    inner: [u8; 0],
54    _p: UnsafeCell<PhantomData<(*const UnsafeCell<()>, PhantomPinned)>>,
55}
56
57unsafe impl RefEncode for OpaqueAudioQueueTimeline {
58    const ENCODING_REF: Encoding =
59        Encoding::Pointer(&Encoding::Struct("OpaqueAudioQueueTimeline", &[]));
60}
61
62/// An opaque data type that represents an audio queue timeline.
63///
64/// You can use this object to observe any overloads in the audio device associated with the
65/// audio queue. A timeline object receives notifications of discontinuities in the audio
66/// hardware's sample timeline--for instance, a period of silence when sound was expected.
67/// Causes of discontinuities include changes in the device state or processing overloads.
68/// See Technical Q
69/// &
70/// A: QA 1467 for a discussion of Core Audio overload warnings. These
71/// warnings indicate you are taking too long to process audio data and the system has cut
72/// you off. You query a timeline object by passing it as a parameter to
73/// AudioQueueGetCurrentTime, which means a discontinuity has occurred.
74///
75/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuetimelineref?language=objc)
76pub type AudioQueueTimelineRef = *mut OpaqueAudioQueueTimeline;
77
78/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidbuffer?language=objc)
79pub const kAudioQueueErr_InvalidBuffer: OSStatus = -66687;
80/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_bufferempty?language=objc)
81pub const kAudioQueueErr_BufferEmpty: OSStatus = -66686;
82/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_disposalpending?language=objc)
83pub const kAudioQueueErr_DisposalPending: OSStatus = -66685;
84/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidproperty?language=objc)
85pub const kAudioQueueErr_InvalidProperty: OSStatus = -66684;
86/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidpropertysize?language=objc)
87pub const kAudioQueueErr_InvalidPropertySize: OSStatus = -66683;
88/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidparameter?language=objc)
89pub const kAudioQueueErr_InvalidParameter: OSStatus = -66682;
90/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_cannotstart?language=objc)
91pub const kAudioQueueErr_CannotStart: OSStatus = -66681;
92/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invaliddevice?language=objc)
93pub const kAudioQueueErr_InvalidDevice: OSStatus = -66680;
94/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_bufferinqueue?language=objc)
95pub const kAudioQueueErr_BufferInQueue: OSStatus = -66679;
96/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidrunstate?language=objc)
97pub const kAudioQueueErr_InvalidRunState: OSStatus = -66678;
98/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidqueuetype?language=objc)
99pub const kAudioQueueErr_InvalidQueueType: OSStatus = -66677;
100/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_permissions?language=objc)
101pub const kAudioQueueErr_Permissions: OSStatus = -66676;
102/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidpropertyvalue?language=objc)
103pub const kAudioQueueErr_InvalidPropertyValue: OSStatus = -66675;
104/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_primetimedout?language=objc)
105pub const kAudioQueueErr_PrimeTimedOut: OSStatus = -66674;
106/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_codecnotfound?language=objc)
107pub const kAudioQueueErr_CodecNotFound: OSStatus = -66673;
108/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidcodecaccess?language=objc)
109pub const kAudioQueueErr_InvalidCodecAccess: OSStatus = -66672;
110/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_queueinvalidated?language=objc)
111pub const kAudioQueueErr_QueueInvalidated: OSStatus = -66671;
112/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_toomanytaps?language=objc)
113pub const kAudioQueueErr_TooManyTaps: OSStatus = -66670;
114/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidtapcontext?language=objc)
115pub const kAudioQueueErr_InvalidTapContext: OSStatus = -66669;
116/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_recordunderrun?language=objc)
117pub const kAudioQueueErr_RecordUnderrun: OSStatus = -66668;
118/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidtaptype?language=objc)
119pub const kAudioQueueErr_InvalidTapType: OSStatus = -66667;
120/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_bufferenqueuedtwice?language=objc)
121pub const kAudioQueueErr_BufferEnqueuedTwice: OSStatus = -66666;
122/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_cannotstartyet?language=objc)
123pub const kAudioQueueErr_CannotStartYet: OSStatus = -66665;
124/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_enqueueduringreset?language=objc)
125pub const kAudioQueueErr_EnqueueDuringReset: OSStatus = -66632;
126/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueerr_invalidofflinemode?language=objc)
127pub const kAudioQueueErr_InvalidOfflineMode: OSStatus = -66626;
128
129/// value is UInt32
130///
131/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_isrunning?language=objc)
132pub const kAudioQueueProperty_IsRunning: AudioQueuePropertyID = 0x6171726e;
133/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueuedeviceproperty_samplerate?language=objc)
134pub const kAudioQueueDeviceProperty_SampleRate: AudioQueuePropertyID = 0x61717372;
135/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueuedeviceproperty_numberchannels?language=objc)
136pub const kAudioQueueDeviceProperty_NumberChannels: AudioQueuePropertyID = 0x61716463;
137/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_currentdevice?language=objc)
138pub const kAudioQueueProperty_CurrentDevice: AudioQueuePropertyID = 0x61716364;
139/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_magiccookie?language=objc)
140pub const kAudioQueueProperty_MagicCookie: AudioQueuePropertyID = 0x61716d63;
141/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_maximumoutputpacketsize?language=objc)
142pub const kAudioQueueProperty_MaximumOutputPacketSize: AudioQueuePropertyID = 0x786f7073;
143/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_streamdescription?language=objc)
144pub const kAudioQueueProperty_StreamDescription: AudioQueuePropertyID = 0x61716674;
145/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_channellayout?language=objc)
146pub const kAudioQueueProperty_ChannelLayout: AudioQueuePropertyID = 0x6171636c;
147/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_enablelevelmetering?language=objc)
148pub const kAudioQueueProperty_EnableLevelMetering: AudioQueuePropertyID = 0x61716d65;
149/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_currentlevelmeter?language=objc)
150pub const kAudioQueueProperty_CurrentLevelMeter: AudioQueuePropertyID = 0x61716d76;
151/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_currentlevelmeterdb?language=objc)
152pub const kAudioQueueProperty_CurrentLevelMeterDB: AudioQueuePropertyID = 0x61716d64;
153/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_decodebuffersizeframes?language=objc)
154pub const kAudioQueueProperty_DecodeBufferSizeFrames: AudioQueuePropertyID = 0x64636266;
155/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_convertererror?language=objc)
156pub const kAudioQueueProperty_ConverterError: AudioQueuePropertyID = 0x71637665;
157/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_enabletimepitch?language=objc)
158pub const kAudioQueueProperty_EnableTimePitch: AudioQueuePropertyID = 0x715f7470;
159/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_timepitchalgorithm?language=objc)
160pub const kAudioQueueProperty_TimePitchAlgorithm: AudioQueuePropertyID = 0x71747061;
161/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_timepitchbypass?language=objc)
162pub const kAudioQueueProperty_TimePitchBypass: AudioQueuePropertyID = 0x71747062;
163
164/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueuetimepitchalgorithm_spectral?language=objc)
165pub const kAudioQueueTimePitchAlgorithm_Spectral: u32 = 0x73706563;
166/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueuetimepitchalgorithm_timedomain?language=objc)
167pub const kAudioQueueTimePitchAlgorithm_TimeDomain: u32 = 0x7469646f;
168/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueuetimepitchalgorithm_varispeed?language=objc)
169pub const kAudioQueueTimePitchAlgorithm_Varispeed: u32 = 0x76737064;
170
171/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueproperty_channelassignments?language=objc)
172pub const kAudioQueueProperty_ChannelAssignments: AudioQueuePropertyID = 0x61716361;
173
174/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueparam_volume?language=objc)
175pub const kAudioQueueParam_Volume: AudioQueueParameterID = 1;
176/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueparam_playrate?language=objc)
177pub const kAudioQueueParam_PlayRate: AudioQueueParameterID = 2;
178/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueparam_pitch?language=objc)
179pub const kAudioQueueParam_Pitch: AudioQueueParameterID = 3;
180/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueparam_volumeramptime?language=objc)
181pub const kAudioQueueParam_VolumeRampTime: AudioQueueParameterID = 4;
182/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/kaudioqueueparam_pan?language=objc)
183pub const kAudioQueueParam_Pan: AudioQueueParameterID = 13;
184
185/// Flags used in conjunction with processing taps
186///
187/// In the flags passed to AudioQueueProcessingTapNew, either the PreEffects
188/// or PostEffects flag must be set, but not both.
189///
190///
191/// Signifies that the processing tap is inserted before any effects.
192/// Passed to AudioQueueProcessingTapNew and to the callback.
193///
194/// Signifies that the processing tap is inserted after any effects.
195/// Passed to AudioQueueProcessingTapNew and to the callback.
196///
197/// Signifies that the processing tap is a siphon; it does not call
198/// GetSourceAudio. The callback instead receives the source audio
199/// and may not modify it. Passed to AudioQueueProcessingTapNew and to the callback.
200///
201/// Signifies that the source audio is the beginning of a continuous stream,
202/// i.e. following the beginning or resumption of playback or recording.
203/// Returned from GetSourceAudio.
204///
205/// Signifies that the source audio is past the end of stream. This happens when
206/// the audio queue is being stopped asynchronously and has finished playing
207/// all of its data. Returned from GetSourceAudio and should be propagated
208/// on return from the callback.
209///
210/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueprocessingtapflags?language=objc)
211// NS_OPTIONS
212#[repr(transparent)]
213#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
214pub struct AudioQueueProcessingTapFlags(pub u32);
215bitflags::bitflags! {
216    impl AudioQueueProcessingTapFlags: u32 {
217        #[doc(alias = "kAudioQueueProcessingTap_PreEffects")]
218        const PreEffects = 1<<0;
219        #[doc(alias = "kAudioQueueProcessingTap_PostEffects")]
220        const PostEffects = 1<<1;
221        #[doc(alias = "kAudioQueueProcessingTap_Siphon")]
222        const Siphon = 1<<2;
223        #[doc(alias = "kAudioQueueProcessingTap_StartOfStream")]
224        const StartOfStream = 1<<8;
225        #[doc(alias = "kAudioQueueProcessingTap_EndOfStream")]
226        const EndOfStream = 1<<9;
227    }
228}
229
230unsafe impl Encode for AudioQueueProcessingTapFlags {
231    const ENCODING: Encoding = u32::ENCODING;
232}
233
234unsafe impl RefEncode for AudioQueueProcessingTapFlags {
235    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
236}
237
238/// Defines a buffer of audio data to be managed by an audio queue.
239///
240/// Each audio queue has an associated set of audio queue buffers. You can request that a
241/// queue allocate buffers using the AudioQueueAllocateBuffer function and dispose of them
242/// using the AudioQueueFreeBuffer function.
243///
244/// You may also use AudioQueueAllocateBufferWithPacketDescriptions to allocate buffers
245/// with space for AudioPacketDescriptions, as used in VBR formats. The
246/// mPacketDescriptionCapacity, mmPacketDescriptions, and mPacketDescriptionCount
247/// fields may only be used with buffers allocated with this function.
248///
249///
250/// The size of the buffer, in bytes. This size is set when the buffer is allocated and
251/// cannot be changed.
252///
253/// A pointer to the audio data in the buffer. Although you can write data to this buffer,
254/// you cannot make it point to another address.
255///
256/// The number of bytes of valid audio data in the buffer. You set this value when providing
257/// data for playback; the audio queue sets this value when recording data from a recording
258/// queue.
259///
260/// A value you may specify to identify the buffer when it is passed back in recording or
261/// playback callback functions.
262///
263/// The maximum number of packet descriptions that can be stored in mPacketDescriptions.
264///
265/// An array of AudioStreamPacketDescriptions associated with the buffer.
266///
267/// The number of valid packet descriptions in the buffer. You set this value when providing
268/// buffers for playback; the audio queue sets this value when returning buffers from
269/// a recording queue.
270///
271/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuebuffer?language=objc)
272#[cfg(feature = "objc2-core-audio-types")]
273#[repr(C)]
274#[derive(Clone, Copy, Debug, PartialEq)]
275pub struct AudioQueueBuffer {
276    pub mAudioDataBytesCapacity: u32,
277    pub mAudioData: NonNull<c_void>,
278    pub mAudioDataByteSize: u32,
279    pub mUserData: *mut c_void,
280    pub mPacketDescriptionCapacity: u32,
281    pub mPacketDescriptions: *const AudioStreamPacketDescription,
282    pub mPacketDescriptionCount: u32,
283}
284
285#[cfg(feature = "objc2-core-audio-types")]
286unsafe impl Encode for AudioQueueBuffer {
287    const ENCODING: Encoding = Encoding::Struct(
288        "AudioQueueBuffer",
289        &[
290            <u32>::ENCODING,
291            <NonNull<c_void>>::ENCODING,
292            <u32>::ENCODING,
293            <*mut c_void>::ENCODING,
294            <u32>::ENCODING,
295            <*const AudioStreamPacketDescription>::ENCODING,
296            <u32>::ENCODING,
297        ],
298    );
299}
300
301#[cfg(feature = "objc2-core-audio-types")]
302unsafe impl RefEncode for AudioQueueBuffer {
303    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
304}
305
306/// An pointer to an AudioQueueBuffer.
307///
308/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuebufferref?language=objc)
309#[cfg(feature = "objc2-core-audio-types")]
310pub type AudioQueueBufferRef = *mut AudioQueueBuffer;
311
312/// Specifies a value for an audio queue parameter.
313///
314/// Two ways are available to supply an audio queue with parameters:
315///
316/// - Provide one or more parameters by calling the AudioQueueEnqueueBufferWithParameters
317/// function. In this case, the parameters are applied to the specified buffer when it is
318/// played.
319///
320/// - Assign a parameter value immediately to an audio queue by calling the
321/// AudioQueueSetParameter function.
322///
323/// Note that the AudioQueueGetParameter function always returns the actual value of the
324/// parameter.
325///
326/// In macOS v10.5, audio queues have one parameter available: kAudioQueueParam_Volume,
327/// which controls the queue's playback volume.
328///
329///
330/// The parameter.
331///
332/// The value of the specified parameter.
333///
334/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueparameterevent?language=objc)
335#[repr(C)]
336#[derive(Clone, Copy, Debug, PartialEq)]
337pub struct AudioQueueParameterEvent {
338    pub mID: AudioQueueParameterID,
339    pub mValue: AudioQueueParameterValue,
340}
341
342unsafe impl Encode for AudioQueueParameterEvent {
343    const ENCODING: Encoding = Encoding::Struct(
344        "AudioQueueParameterEvent",
345        &[
346            <AudioQueueParameterID>::ENCODING,
347            <AudioQueueParameterValue>::ENCODING,
348        ],
349    );
350}
351
352unsafe impl RefEncode for AudioQueueParameterEvent {
353    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
354}
355
356/// Specifies the current level metering information for one channel of an audio queue.
357///
358/// The audio channel's average RMS power.
359///
360/// The audio channel's peak RMS power
361///
362/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuelevelmeterstate?language=objc)
363#[repr(C)]
364#[derive(Clone, Copy, Debug, PartialEq)]
365pub struct AudioQueueLevelMeterState {
366    pub mAveragePower: f32,
367    pub mPeakPower: f32,
368}
369
370unsafe impl Encode for AudioQueueLevelMeterState {
371    const ENCODING: Encoding = Encoding::Struct(
372        "AudioQueueLevelMeterState",
373        &[<f32>::ENCODING, <f32>::ENCODING],
374    );
375}
376
377unsafe impl RefEncode for AudioQueueLevelMeterState {
378    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
379}
380
381/// [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/opaqueaudioqueueprocessingtap?language=objc)
382#[repr(C)]
383#[derive(Debug)]
384pub struct OpaqueAudioQueueProcessingTap {
385    inner: [u8; 0],
386    _p: UnsafeCell<PhantomData<(*const UnsafeCell<()>, PhantomPinned)>>,
387}
388
389unsafe impl RefEncode for OpaqueAudioQueueProcessingTap {
390    const ENCODING_REF: Encoding =
391        Encoding::Pointer(&Encoding::Struct("OpaqueAudioQueueProcessingTap", &[]));
392}
393
394/// An object for intercepting and processing audio within an audio queue.
395///
396/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueprocessingtapref?language=objc)
397pub type AudioQueueProcessingTapRef = *mut OpaqueAudioQueueProcessingTap;
398
399/// Specifies an audio device channel to which the queue will play or from which
400/// it will record.
401///
402/// On iOS, this is a port UID obtained from AVAudioSession. On macOS, this is the UID
403/// obtained from an AudioDeviceID.
404///
405/// The 1-based index of the channel.
406///
407/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuechannelassignment?language=objc)
408#[cfg(feature = "objc2-core-foundation")]
409#[repr(C)]
410#[derive(Clone, Copy, Debug, PartialEq)]
411pub struct AudioQueueChannelAssignment {
412    pub mDeviceUID: NonNull<CFString>,
413    pub mChannelNumber: u32,
414}
415
416#[cfg(feature = "objc2-core-foundation")]
417unsafe impl Encode for AudioQueueChannelAssignment {
418    const ENCODING: Encoding = Encoding::Struct(
419        "AudioQueueChannelAssignment",
420        &[<NonNull<CFString>>::ENCODING, <u32>::ENCODING],
421    );
422}
423
424#[cfg(feature = "objc2-core-foundation")]
425unsafe impl RefEncode for AudioQueueChannelAssignment {
426    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
427}
428
429/// Defines a pointer to a block that is called when a playback audio
430/// queue has finished taking data from a buffer.
431///
432/// A playback buffer callback is invoked when the audio queue has finished with the data to
433/// be played and the buffer is available to your application for reuse. Your application
434/// might want to immediately refill and re-enqueue the completed buffer at this time.
435///
436///
437/// Parameter `inAQ`: The audio queue that invoked the callback.
438///
439/// Parameter `inBuffer`: The audio queue buffer made available by the audio queue.
440///
441/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueoutputcallbackblock?language=objc)
442#[cfg(all(feature = "block2", feature = "objc2-core-audio-types"))]
443pub type AudioQueueOutputCallbackBlock =
444    *mut block2::DynBlock<dyn Fn(AudioQueueRef, AudioQueueBufferRef)>;
445
446/// Defines a pointer to a block that is called when a recording audio
447/// queue has finished filling a buffer.
448///
449/// You specify a recording buffer callback when calling AudioQueueNewInput. Your callback
450/// is invoked each time the recording audio queue has filled a buffer with input data.
451/// Typically, your callback should write the audio queue buffer's data to a file or other
452/// buffer, and then re-queue the audio queue buffer to receive more data.
453///
454///
455/// Parameter `inAQ`: The audio queue that invoked the callback.
456///
457/// Parameter `inBuffer`: An audio queue buffer, newly filled by the audio queue, containing the new audio data
458/// your callback needs to write.
459///
460/// Parameter `inStartTime`: A pointer to an audio time stamp structure corresponding to the first sample contained
461/// in the buffer. This contains the sample time of the first sample in the buffer.
462///
463/// Parameter `inNumberPacketDescriptions`: The number of audio packets contained in the data provided to the callback
464///
465/// Parameter `inPacketDescs`: For compressed formats which require packet descriptions, the packet descriptions
466/// produced by the encoder for the incoming buffer.
467///
468/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueinputcallbackblock?language=objc)
469#[cfg(all(feature = "block2", feature = "objc2-core-audio-types"))]
470pub type AudioQueueInputCallbackBlock = *mut block2::DynBlock<
471    dyn Fn(
472        AudioQueueRef,
473        AudioQueueBufferRef,
474        NonNull<AudioTimeStamp>,
475        u32,
476        *const AudioStreamPacketDescription,
477    ),
478>;
479
480/// Defines a pointer to a callback function that is called when a playback audio
481/// queue has finished taking data from a buffer.
482///
483/// A playback buffer callback is invoked when the audio queue has finished with the data to
484/// be played and the buffer is available to your application for reuse. Your application
485/// might want to immediately refill and re-enqueue the completed buffer at this time.
486///
487///
488/// Parameter `inUserData`: The value specified by the inUserData parameter of the AudioQueueNewOutput function.
489///
490/// Parameter `inAQ`: The audio queue that invoked the callback.
491///
492/// Parameter `inBuffer`: The audio queue buffer made available by the audio queue.
493///
494/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueoutputcallback?language=objc)
495#[cfg(feature = "objc2-core-audio-types")]
496pub type AudioQueueOutputCallback =
497    Option<unsafe extern "C-unwind" fn(*mut c_void, AudioQueueRef, AudioQueueBufferRef)>;
498
499/// Defines a pointer to a callback function that is called when a recording audio
500/// queue has finished filling a buffer.
501///
502/// You specify a recording buffer callback when calling AudioQueueNewInput. Your callback
503/// is invoked each time the recording audio queue has filled a buffer with input data.
504/// Typically, your callback should write the audio queue buffer's data to a file or other
505/// buffer, and then re-queue the audio queue buffer to receive more data.
506///
507///
508/// Parameter `inUserData`: The value you've specified in the inUserData parameter of the AudioQueueNewInput
509/// function.
510///
511/// Parameter `inAQ`: The audio queue that invoked the callback.
512///
513/// Parameter `inBuffer`: An audio queue buffer, newly filled by the audio queue, containing the new audio data
514/// your callback needs to write.
515///
516/// Parameter `inStartTime`: A pointer to an audio time stamp structure corresponding to the first sample contained
517/// in the buffer. This contains the sample time of the first sample in the buffer.
518///
519/// Parameter `inNumberPacketDescriptions`: The number of audio packets contained in the data provided to the callback
520///
521/// Parameter `inPacketDescs`: For compressed formats which require packet descriptions, the packet descriptions
522/// produced by the encoder for the incoming buffer.
523///
524/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueinputcallback?language=objc)
525#[cfg(feature = "objc2-core-audio-types")]
526pub type AudioQueueInputCallback = Option<
527    unsafe extern "C-unwind" fn(
528        *mut c_void,
529        AudioQueueRef,
530        AudioQueueBufferRef,
531        NonNull<AudioTimeStamp>,
532        u32,
533        *const AudioStreamPacketDescription,
534    ),
535>;
536
537/// Defines a pointer to a callback function that is called when a specified
538/// property changes value.
539///
540/// You assign a property listener callback when calling AudioQueueAddPropertyListener.
541///
542///
543/// Parameter `inUserData`: A pointer to the data specified by the inUserData parameter of the
544/// AudioQueueAddPropertyListener function.
545///
546/// Parameter `inAQ`: The audio queue that invoked the callback.
547///
548/// Parameter `inID`: The ID of the property that invoked the callback.
549///
550/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueuepropertylistenerproc?language=objc)
551pub type AudioQueuePropertyListenerProc =
552    Option<unsafe extern "C-unwind" fn(*mut c_void, AudioQueueRef, AudioQueuePropertyID)>;
553
554/// A function called when an audio queue has data to be processed by its tap
555///
556/// A processing callback is invoked when the audio queue has data that can be processed by a given
557/// tap.
558///
559/// The audio queue will call the processing callback when it has sufficient data to provide for
560/// processing.
561///
562/// In the case of a siphoning tap, the callback function can inspect the audio data in ioData, but
563/// should not otherwise modify it. The callback should not call
564/// AudioQueueProcessingTapGetSourceAudio.
565///
566/// A non-siphoning callback should call AudioQueueProcessingTapGetSourceAudio to request from the
567/// queue as much source data as it needs in order to produce the requested number of output
568/// samples. When the callback requests source data it may receive less data than it requests.
569///
570/// In the case of a tap on an audio output queue, the tap must emit the exact number of sample
571/// frames that the queue requests. In normal circumstances, the tap's requests for source data will
572/// be satisfied (as the client running the audio queue is also providing the queue with the audio
573/// source material). If there is insufficient source data available (this is indicated by the
574/// outNumberFrames from the GetSource call), then the processing tap should deal as best as it can;
575/// it can either return less data than was requested or insert silence, noise, etc. itself. If it
576/// returns less data than requested, the hosting audio queue will fill in the remainder with
577/// silence.
578///
579/// In the case of a tap on an audio input queue, the tap may provide back less audio data than is
580/// being requested. Typically this will occur because the tap will ask for source data that is not
581/// available at this time (the audio input hasn't arrived yet), so the tap should cache the source
582/// data that it needs and return as many processed samples as it can. If the processing tap falls
583/// behind and is not providing data quickly enough silence will be generated in the data provided
584/// to the client (and there is no signal about this either).
585///
586/// A processing tap executes in a semi-real-time context, so the general limitations for real-time
587/// processing apply. Avoid using API's which may block. In particular, it is not safe to call the
588/// audio queue on which the tap was installed, with the exceptions of
589/// AudioQueueProcessingTapGetSourceAudio and AudioQueueProcessingTapGetQueueTime.
590///
591/// In normal operation the source data will be continuous from the last time the callback was
592/// called and the processed samples should be continuous from the previous samples returned. If
593/// there is any discontinuity between the last samples provided for processing the audio queue will
594/// set the bit for kAudioQueueProcessing_StartOfStream in the inFlags. After a discontinuity the
595/// first sample that the processing tap outputs should correspond to the first sample that was
596/// provided in the source samples (so a reset and then consequent process serves to re-anchor a
597/// relationship between the processing tap's source and processed samples). In this case the
598/// processing tap will typically discard any previous state (for instance, if a processing tap was
599/// adding a reverb to a signal, then the discontinuity flag would act the same as AudioUnitReset;
600/// any previous source information in the processing tap should be discarded).
601///
602/// The caller is responsible for absorbing any processing delays. For example, if the processing is
603/// to be done by an audio unit that reports a processing latency, then the caller should remove
604/// those latency samples from the audio unit's rendering and not return them to the audio queue.
605///
606/// The processing tap is able to operate on the provided source data in place (that is, it can do
607/// "in place processing") and return pointers to that buffer rather than its own. This works in a
608/// similar way as AudioUnit render operations.
609///
610/// When an output audio queue is being stopped asynchronously, the processing tap will see the
611/// kAudioQueueProcessingTap_EndOfStream bit set on return from GetSourceAudio, and is responsible
612/// for propagating this bit from the callback when its processing has reached this point.
613///
614/// A processing tap will NEVER see the same source data again, so, it should keep its own copy if
615/// it needs to keep it for further reference past the duration of this call. It also cannot assume
616/// that the pointers to the source data that it retrieves will remain valid AFTER the processing
617/// tap has executed.
618///
619/// The processing tap should ensure that the data pointers it provides in outProcessedData remain
620/// valid until the tap is executed again.
621///
622/// A processing tap is destroyed implicitly when its audio queue is disposed. It may also be
623/// removed explicitly, via AudioQueueProcessingTapDispose.
624///
625///
626/// Parameter `inClientData`: the client data pointer passed to AudioQueueProcessingTapNew
627///
628/// Parameter `inAQTap`: The tap for this callback.
629///
630/// Parameter `inNumberFrames`: The requested number of sample frames to be rendered.
631///
632/// Parameter `ioFlags`: On entry, the flags passed at construction time are provided. On exit,
633/// the start/end of stream flags should be set when appropriate.
634///
635/// Parameter `ioTimeStamp`: On an input audio queue, the timestamp must be returned from this function.
636/// On an output audio queue, the callback is provided a continuous timestamp.
637///
638/// Parameter `outNumberFrames`: The number of frames of audio data provided in the processed data. Can be 0.
639///
640/// Parameter `ioData`: For non-siphoning taps, on entry, the buffer pointers are null and the lengths
641/// are zero. On exit, they should contain the tap's output.
642///
643/// Siphoning taps receive valid buffers which they must not alter.
644///
645/// See also [Apple's documentation](https://developer.apple.com/documentation/audiotoolbox/audioqueueprocessingtapcallback?language=objc)
646#[cfg(feature = "objc2-core-audio-types")]
647pub type AudioQueueProcessingTapCallback = Option<
648    unsafe extern "C-unwind" fn(
649        NonNull<c_void>,
650        AudioQueueProcessingTapRef,
651        u32,
652        NonNull<AudioTimeStamp>,
653        NonNull<AudioQueueProcessingTapFlags>,
654        NonNull<u32>,
655        NonNull<AudioBufferList>,
656    ),
657>;
658
659extern "C-unwind" {
660    /// Creates a new audio queue for playing audio data.
661    ///
662    /// To create an playback audio queue, you allocate buffers, then queue buffers (using
663    /// AudioQueueEnqueueBuffer). The callback receives buffers and typically queues them again.
664    /// To schedule a buffer for playback, providing parameter and start time information, call
665    /// AudioQueueEnqueueBufferWithParameters.
666    ///
667    ///
668    /// Parameter `inFormat`: A pointer to a structure describing the format of the audio data to be played. For
669    /// linear PCM, only interleaved formats are supported. Compressed formats are supported.
670    ///
671    /// Parameter `inCallbackProc`: A pointer to a callback function to be called when the audio queue has finished playing
672    /// a buffer.
673    ///
674    /// Parameter `inUserData`: A value or pointer to data that you specify to be passed to the callback function.
675    ///
676    /// Parameter `inCallbackRunLoop`: The event loop on which inCallbackProc is to be called. If you specify NULL, the
677    /// callback is called on one of the audio queue's internal threads.
678    ///
679    /// Parameter `inCallbackRunLoopMode`: The run loop mode in which to call the callback. Typically, you pass
680    /// kCFRunLoopCommonModes. (NULL also specifies kCFRunLoopCommonModes). Other
681    /// possibilities are implementation specific. You can choose to create your own thread with
682    /// your own run loops. For more information on run loops, see Run Loops or CFRunLoop
683    /// Reference.
684    ///
685    /// Parameter `inFlags`: Reserved for future use. Pass 0.
686    ///
687    /// Parameter `outAQ`: On return, this variable contains a pointer to the newly created playback audio queue
688    /// object.
689    ///
690    /// Returns: An OSStatus result code.
691    #[cfg(all(feature = "objc2-core-audio-types", feature = "objc2-core-foundation"))]
692    pub fn AudioQueueNewOutput(
693        in_format: NonNull<AudioStreamBasicDescription>,
694        in_callback_proc: AudioQueueOutputCallback,
695        in_user_data: *mut c_void,
696        in_callback_run_loop: Option<&CFRunLoop>,
697        in_callback_run_loop_mode: Option<&CFString>,
698        in_flags: u32,
699        out_aq: NonNull<AudioQueueRef>,
700    ) -> OSStatus;
701}
702
703extern "C-unwind" {
704    /// Creates a new audio queue for recording audio data.
705    ///
706    /// Outline of how to use the queue for input:
707    ///
708    /// - create input queue
709    /// - allocate buffers
710    /// - enqueue buffers (AudioQueueEnqueueBuffer, not with parameters, no packet descriptions)
711    /// - the callback receives buffers and re-enqueues them
712    ///
713    ///
714    /// Parameter `inFormat`: A pointer to a structure describing the format of the audio data to be recorded. For
715    /// linear PCM, only interleaved formats are supported. Compressed formats are supported.
716    ///
717    /// Parameter `inCallbackProc`: A pointer to a callback function to be called when the audio queue has finished filling
718    /// a buffer.
719    ///
720    /// Parameter `inUserData`: A value or pointer to data that you specify to be passed to the callback function.
721    ///
722    /// Parameter `inCallbackRunLoop`: The event loop on which inCallbackProc is to be called. If you specify NULL, the
723    /// callback is called on one of the audio queue's internal threads.
724    ///
725    /// Parameter `inCallbackRunLoopMode`: The run loop mode in which to call the callback. Typically, you pass
726    /// kCFRunLoopCommonModes. (NULL also specifies kCFRunLoopCommonModes). Other
727    /// possibilities are implementation specific. You can choose to create your own thread with
728    /// your own run loops. For more information on run loops, see Run Loops or CFRunLoop
729    /// Reference.
730    ///
731    /// Parameter `inFlags`: Reserved for future use. Pass 0.
732    ///
733    /// Parameter `outAQ`: On return, this variable contains a pointer to the newly created recording audio queue
734    /// object.
735    ///
736    /// Returns: An OSStatus result code.
737    #[cfg(all(feature = "objc2-core-audio-types", feature = "objc2-core-foundation"))]
738    pub fn AudioQueueNewInput(
739        in_format: NonNull<AudioStreamBasicDescription>,
740        in_callback_proc: AudioQueueInputCallback,
741        in_user_data: *mut c_void,
742        in_callback_run_loop: Option<&CFRunLoop>,
743        in_callback_run_loop_mode: Option<&CFString>,
744        in_flags: u32,
745        out_aq: NonNull<AudioQueueRef>,
746    ) -> OSStatus;
747}
748
749extern "C-unwind" {
750    /// Creates a new audio queue for playing audio data.
751    ///
752    /// To create an playback audio queue, you allocate buffers, then queue buffers (using
753    /// AudioQueueEnqueueBuffer). The callback receives buffers and typically queues them again.
754    /// To schedule a buffer for playback, providing parameter and start time information, call
755    /// AudioQueueEnqueueBufferWithParameters.
756    ///
757    ///
758    /// Parameter `outAQ`: On return, this variable contains a pointer to the newly created playback audio queue
759    /// object.
760    ///
761    /// Parameter `inFormat`: A pointer to a structure describing the format of the audio data to be played. For
762    /// linear PCM, only interleaved formats are supported. Compressed formats are supported.
763    ///
764    /// Parameter `inFlags`: Reserved for future use. Pass 0.
765    ///
766    /// Parameter `inCallbackDispatchQueue`: The dispatch queue from which inCallbackBlock is to be called.
767    ///
768    /// Parameter `inCallbackBlock`: A pointer to a callback block to be called when the audio queue has finished playing
769    /// a buffer.
770    ///
771    /// Returns: An OSStatus result code.
772    #[cfg(all(
773        feature = "block2",
774        feature = "dispatch2",
775        feature = "objc2-core-audio-types"
776    ))]
777    pub fn AudioQueueNewOutputWithDispatchQueue(
778        out_aq: NonNull<AudioQueueRef>,
779        in_format: NonNull<AudioStreamBasicDescription>,
780        in_flags: u32,
781        in_callback_dispatch_queue: &DispatchQueue,
782        in_callback_block: AudioQueueOutputCallbackBlock,
783    ) -> OSStatus;
784}
785
786extern "C-unwind" {
787    /// Creates a new audio queue for recording audio data.
788    ///
789    /// Outline of how to use the queue for input:
790    ///
791    /// - create input queue
792    /// - allocate buffers
793    /// - enqueue buffers (AudioQueueEnqueueBuffer, not with parameters, no packet descriptions)
794    /// - the callback receives buffers and re-enqueues them
795    ///
796    ///
797    /// Parameter `outAQ`: On return, this variable contains a pointer to the newly created recording audio queue
798    /// object.
799    ///
800    /// Parameter `inFormat`: A pointer to a structure describing the format of the audio data to be recorded. For
801    /// linear PCM, only interleaved formats are supported. Compressed formats are supported.
802    ///
803    /// Parameter `inFlags`: Reserved for future use. Pass 0.
804    ///
805    /// Parameter `inCallbackDispatchQueue`: The dispatch queue from which inCallbackBlock is to be called.
806    ///
807    /// Parameter `inCallbackBlock`: A pointer to a callback block to be called when the audio queue has finished filling
808    /// a buffer.
809    ///
810    /// Returns: An OSStatus result code.
811    #[cfg(all(
812        feature = "block2",
813        feature = "dispatch2",
814        feature = "objc2-core-audio-types"
815    ))]
816    pub fn AudioQueueNewInputWithDispatchQueue(
817        out_aq: NonNull<AudioQueueRef>,
818        in_format: NonNull<AudioStreamBasicDescription>,
819        in_flags: u32,
820        in_callback_dispatch_queue: &DispatchQueue,
821        in_callback_block: AudioQueueInputCallbackBlock,
822    ) -> OSStatus;
823}
824
825/// Disposes an existing audio queue.
826///
827/// Disposing of the audio queue also disposes of all its resources, including its buffers.
828///
829///
830/// Parameter `inAQ`: The audio queue you want to dispose of
831///
832/// Parameter `inImmediate`: If you pass true, the audio queue is disposed of immediately (that is, synchronously).
833/// If you pass false, disposal does not take place until all enqueued buffers are
834/// processed. Whether you call AudioQueueDispose synchronously or asynchronously, you can
835/// no longer interact with the queue, and the queue no longer invokes any callbacks to your
836/// application after the function returns.
837///
838/// Note that if AudioQueueDispose is called from a buffer completion callback or property
839/// listener, you may receive further callbacks afterwards.
840///
841/// Returns: An OSStatus result code.
842#[inline]
843pub unsafe extern "C-unwind" fn AudioQueueDispose(
844    in_aq: AudioQueueRef,
845    in_immediate: bool,
846) -> OSStatus {
847    extern "C-unwind" {
848        fn AudioQueueDispose(in_aq: AudioQueueRef, in_immediate: Boolean) -> OSStatus;
849    }
850    unsafe { AudioQueueDispose(in_aq, in_immediate as _) }
851}
852
853extern "C-unwind" {
854    /// Asks an audio queue to allocate a buffer.
855    ///
856    /// Once allocated, the pointer to the buffer and the buffer's size are fixed and cannot be
857    /// changed. The mAudioDataByteSize field in the audio queue buffer structure,
858    /// AudioQueueBuffer, is initially set to 0.
859    ///
860    ///
861    /// Parameter `inAQ`: The audio queue you want to allocate a buffer.
862    ///
863    /// Parameter `inBufferByteSize`: The desired size of the new buffer, in bytes. An appropriate buffer size depends on the
864    /// processing you will perform on the data as well as on the audio data format.
865    ///
866    /// Parameter `outBuffer`: On return, points to the newly created audio buffer. The mAudioDataByteSize field in the
867    /// audio queue buffer structure, AudioQueueBuffer, is initially set to 0.
868    ///
869    /// Returns: An OSStatus result code.
870    #[cfg(feature = "objc2-core-audio-types")]
871    pub fn AudioQueueAllocateBuffer(
872        in_aq: AudioQueueRef,
873        in_buffer_byte_size: u32,
874        out_buffer: NonNull<AudioQueueBufferRef>,
875    ) -> OSStatus;
876}
877
878extern "C-unwind" {
879    /// Asks an audio queue to allocate a buffer with space for packet descriptions.
880    ///
881    /// Once allocated, the pointer to the buffer and the buffer's size are fixed and cannot be
882    /// changed. The mAudioDataByteSize field in the audio queue buffer structure,
883    /// AudioQueueBuffer, is initially set to 0.
884    ///
885    ///
886    /// Parameter `inAQ`: The audio queue you want to allocate a buffer.
887    ///
888    /// Parameter `inBufferByteSize`: The desired size of the new buffer, in bytes. An appropriate buffer size depends on the
889    /// processing you will perform on the data as well as on the audio data format.
890    ///
891    /// Parameter `inNumberPacketDescriptions`: The desired capacity of the packet description array in the new buffer.
892    ///
893    /// Parameter `outBuffer`: On return, points to the newly created audio buffer. The mAudioDataByteSize field in the
894    /// audio queue buffer structure, AudioQueueBuffer, is initially set to 0.
895    ///
896    /// Returns: An OSStatus result code.
897    #[cfg(feature = "objc2-core-audio-types")]
898    pub fn AudioQueueAllocateBufferWithPacketDescriptions(
899        in_aq: AudioQueueRef,
900        in_buffer_byte_size: u32,
901        in_number_packet_descriptions: u32,
902        out_buffer: NonNull<AudioQueueBufferRef>,
903    ) -> OSStatus;
904}
905
906extern "C-unwind" {
907    /// Disposes of an audio queue buffer.
908    ///
909    /// This function disposes of the buffer allocated by AudioQueueAllocateBuffer. Disposing of
910    /// an audio queue also automatically disposes of any associated buffers and timeline
911    /// objects. Call this function only if you want to dispose of a particular buffer while
912    /// continuing to use an audio queue. You can dispose of buffers only when the associated
913    /// queue is stopped (that is, not processing audio data).
914    ///
915    ///
916    /// Parameter `inAQ`: The queue from which the buffer was allocated.
917    ///
918    /// Parameter `inBuffer`: The buffer to be disposed.
919    ///
920    /// Returns: An OSStatus result code.
921    #[cfg(feature = "objc2-core-audio-types")]
922    pub fn AudioQueueFreeBuffer(in_aq: AudioQueueRef, in_buffer: AudioQueueBufferRef) -> OSStatus;
923}
924
925extern "C-unwind" {
926    /// Assigns a buffer to an audio queue for recording or playback.
927    ///
928    /// If the buffer was allocated with AudioQueueAllocateBufferWithPacketDescriptions,
929    /// the client should provide packet descriptions in the buffer's mPacketDescriptions
930    /// and mPacketDescriptionCount fields rather than in inPacketDescs and
931    /// inNumPacketDescs, which should be NULL and 0, respectively, in this case.
932    ///
933    /// For an input queue, pass 0 and NULL for inNumPacketDescs and inPacketDescs,
934    /// respectively. Your callback will receive packet descriptions owned by the audio queue.
935    ///
936    ///
937    /// Parameter `inAQ`: The audio queue you are assigning the buffer to.
938    ///
939    /// Parameter `inBuffer`: The buffer to queue (that is, to be recorded into or played from).
940    ///
941    /// Parameter `inNumPacketDescs`: The number of packet descriptions pointed to by the inPacketDescs pointer. Applicable
942    /// only for output queues and required only for variable-bit-rate (VBR) audio formats. Pass
943    /// 0 for input queues (no packet descriptions are required).
944    ///
945    /// Parameter `inPacketDescs`: An array of packet descriptions. Applicable only for output queues and required only for
946    /// variable-bit-rate (VBR) audio formats. Pass NULL for input queues (no packet
947    /// descriptions are required).
948    ///
949    /// Returns: An OSStatus result code.
950    #[cfg(feature = "objc2-core-audio-types")]
951    pub fn AudioQueueEnqueueBuffer(
952        in_aq: AudioQueueRef,
953        in_buffer: AudioQueueBufferRef,
954        in_num_packet_descs: u32,
955        in_packet_descs: *const AudioStreamPacketDescription,
956    ) -> OSStatus;
957}
958
959extern "C-unwind" {
960    /// Assigns a buffer to an audio queue for playback, providing parameters
961    /// and start time information.
962    ///
963    /// You can exert some control of the buffer queue by using this function. You can assign
964    /// audio queue settings that are in effect carried by an audio queue buffer as you enqueue
965    /// it. Hence, these changes only take effect when an audio queue buffer begins playing.
966    ///
967    /// This function queues a buffer for playback only, not for recording. Audio queues for
968    /// recording have no parameters, do not support variable-bit-rate (VBR) formats (which
969    /// might require trimming), and have a different way to handle timing. When queued for
970    /// playback, the buffer must contain the audio data to be played back. See
971    /// AudioQueueEnqueueBuffer for details on queuing a buffer for recording.
972    ///
973    /// If the buffer was allocated with AudioQueueAllocateBufferWithPacketDescriptions,
974    /// the client should provide packet descriptions in the buffer's mPacketDescriptions
975    /// and mPacketDescriptionCount fields rather than in inPacketDescs and
976    /// inNumPacketDescs, which should be NULL and 0, respectively, in this case.
977    ///
978    ///
979    /// Parameter `inAQ`: The audio queue associated with the buffer.
980    ///
981    /// Parameter `inBuffer`: The buffer to be played from.
982    ///
983    /// Parameter `inNumPacketDescs`: The number of packet descriptions pointed to by the inPacketDescs parameter. Required
984    /// only for variable-bit-rate (VBR) audio formats. Pass 0 if no packet descriptions are
985    /// required.
986    ///
987    /// Parameter `inPacketDescs`: A pointer to an array of audio stream packet descriptions. Required only for VBR audio
988    /// formats. Pass NULL if no packet descriptions are required.
989    ///
990    /// Parameter `inTrimFramesAtStart`: The number of priming frames to skip at the start of the buffer.
991    ///
992    /// Parameter `inTrimFramesAtEnd`: The number of frames to skip at the end of the buffer.
993    ///
994    /// Parameter `inNumParamValues`: The number of parameter values pointed to by the inParamValues parameter.
995    ///
996    /// Parameter `inParamValues`: An array of parameter values. (In macOS v10.5, there is only one parameter,
997    /// kAudioQueueParam_Volume.) These values are set before buffer playback and cannot be
998    /// changed while the buffer is playing. How accurately changes in parameters can be
999    /// scheduled depends on the size of the buffer. If there are no parameters to set
1000    /// (inNumParamValues = 0), pass NULL.
1001    ///
1002    /// Parameter `inStartTime`: A pointer to a structure containing the desired start time for playing the buffer. If
1003    /// you specify the time using the mSampleTime field of the AudioTimeStamp structure, the
1004    /// sample time is relative to the time the queue started. If you pass NULL for the start
1005    /// time, the buffer starts immediately after the previously queued buffer, or as soon as
1006    /// possible if no buffers are queued ahead of it. Buffers are played in the order they are
1007    /// queued. If multiple buffers are queued, their times must be in ascending order or NULL;
1008    /// otherwise, an error occurs. The start time indicates when the actual audio data in the
1009    /// buffer is to be played (that is, the trim frames are not counted).
1010    ///
1011    /// Note: When specifying a start time for a buffer, if the buffer is not the first enqueued
1012    /// since AudioQueueStop or AudioQueueReset, it is normally necessary to call AudioQueueFlush
1013    /// before AudioQueueEnqueueBufferWithParameters.
1014    ///
1015    /// Parameter `outActualStartTime`: On return, points to an AudioTimeStamp structure indicating when the buffer will
1016    /// actually play.
1017    ///
1018    /// Returns: An OSStatus result code.
1019    #[cfg(feature = "objc2-core-audio-types")]
1020    pub fn AudioQueueEnqueueBufferWithParameters(
1021        in_aq: AudioQueueRef,
1022        in_buffer: AudioQueueBufferRef,
1023        in_num_packet_descs: u32,
1024        in_packet_descs: *const AudioStreamPacketDescription,
1025        in_trim_frames_at_start: u32,
1026        in_trim_frames_at_end: u32,
1027        in_num_param_values: u32,
1028        in_param_values: *const AudioQueueParameterEvent,
1029        in_start_time: *const AudioTimeStamp,
1030        out_actual_start_time: *mut AudioTimeStamp,
1031    ) -> OSStatus;
1032}
1033
1034extern "C-unwind" {
1035    /// Begins playing or recording audio.
1036    ///
1037    /// If the audio hardware is not already running, this function starts it.
1038    ///
1039    ///
1040    /// Parameter `inAQ`: The audio queue to start.
1041    ///
1042    /// Parameter `inStartTime`: A pointer to the time at which the audio queue should start. If you specify the time
1043    /// using the mSampleTime field of the AudioTimeStamp structure, the sample time is
1044    /// referenced to the sample frame timeline of the associated audio device. May be NULL.
1045    ///
1046    /// Returns: An OSStatus result code.
1047    #[cfg(feature = "objc2-core-audio-types")]
1048    pub fn AudioQueueStart(in_aq: AudioQueueRef, in_start_time: *const AudioTimeStamp) -> OSStatus;
1049}
1050
1051extern "C-unwind" {
1052    /// Begins decoding buffers in preparation for playback.
1053    ///
1054    /// This function begins decoding buffers in preparation for playback. It returns when at
1055    /// least the number of audio sample frames are decoded and ready to play or when all
1056    /// enqueued buffers have been completely decoded. To ensure that a buffer has been decoded
1057    /// and is completely ready for playback, before playback:
1058    ///
1059    /// 1.  Call AudioQueueEnqueueBuffer.
1060    /// 2.  Call AudioQueuePrime, which waits if you pass 0 to have a default number of
1061    /// frames decoded.
1062    /// 3.  Call AudioQueueStart.
1063    ///
1064    /// Calls to AudioQueuePrime following AudioQueueStart/AudioQueuePrime, and before
1065    /// AudioQueueReset/AudioQueueStop, will have no useful effect. In this situation,
1066    /// outNumberOfFramesPrepared will not have a useful return value.
1067    ///
1068    ///
1069    /// Parameter `inAQ`: The audio queue to be primed.
1070    ///
1071    /// Parameter `inNumberOfFramesToPrepare`: The number of frames to decode before returning. Pass 0 to decode all enqueued buffers.
1072    ///
1073    /// Parameter `outNumberOfFramesPrepared`: If not NULL, on return, a pointer to the number of frames actually decoded and prepared
1074    /// for playback.
1075    ///
1076    /// Returns: An OSStatus result code.
1077    pub fn AudioQueuePrime(
1078        in_aq: AudioQueueRef,
1079        in_number_of_frames_to_prepare: u32,
1080        out_number_of_frames_prepared: *mut u32,
1081    ) -> OSStatus;
1082}
1083
1084/// Stops playing or recording audio.
1085///
1086/// This function resets the audio queue and stops the audio hardware associated with the
1087/// queue if it is not in use by other audio services. Synchronous stops occur immediately,
1088/// regardless of previously buffered audio data. Asynchronous stops occur after all queued
1089/// buffers have been played or recorded.
1090///
1091///
1092/// Parameter `inAQ`: The audio queue to stop.
1093///
1094/// Parameter `inImmediate`: If you pass true, the stop request occurs immediately (that is, synchronously), and the
1095/// function returns when the audio queue has stopped. Buffer callbacks are invoked during
1096/// the stopping. If you pass false, the function returns immediately, but the queue does
1097/// not stop until all its queued buffers are played or filled (that is, the stop occurs
1098/// asynchronously). Buffer callbacks are invoked as necessary until the queue actually
1099/// stops. Also, a playback audio queue callback calls this function when there is no more
1100/// audio to play.
1101///
1102/// Note that when stopping immediately, all pending buffer callbacks are normally invoked
1103/// during the process of stopping. But if the calling thread is responding to a buffer
1104/// callback, then it is possible for additional buffer callbacks to occur after
1105/// AudioQueueStop returns.
1106///
1107/// Returns: An OSStatus result code.
1108#[inline]
1109pub unsafe extern "C-unwind" fn AudioQueueStop(
1110    in_aq: AudioQueueRef,
1111    in_immediate: bool,
1112) -> OSStatus {
1113    extern "C-unwind" {
1114        fn AudioQueueStop(in_aq: AudioQueueRef, in_immediate: Boolean) -> OSStatus;
1115    }
1116    unsafe { AudioQueueStop(in_aq, in_immediate as _) }
1117}
1118
1119extern "C-unwind" {
1120    /// Pauses audio playback or recording.
1121    ///
1122    /// Pausing the queue does not affect buffers or reset the audio queue. To resume playback
1123    /// or recording using the audio queue, call AudioQueueStart.
1124    ///
1125    ///
1126    /// Parameter `inAQ`: The queue to be paused.
1127    ///
1128    /// Returns: An OSStatus result code.
1129    pub fn AudioQueuePause(in_aq: AudioQueueRef) -> OSStatus;
1130}
1131
1132extern "C-unwind" {
1133    /// Resets the audio queue's decoder state.
1134    ///
1135    /// After all queued buffers have been played, the function cleans up all decoder state
1136    /// information. You must call this function following a sequence of buffers of encoded
1137    /// audio; otherwise, some of the audio might not play in the next set of queued buffers.
1138    /// The only time it is not necessary to call AudioQueueFlush is following AudioQueueStop
1139    /// with inImmediate=false. (This action internally calls AudioQueueFlush.)
1140    ///
1141    /// Also, you might wish to call this function before calling AudioQueueStop depending on
1142    /// whether you want to stop immediately regardless of what has played or whether you want
1143    /// to ensure that all buffered data and all data that is in the middle of processing gets
1144    /// recorded or played before stopping.
1145    ///
1146    ///
1147    /// Parameter `inAQ`: The audio queue to be flushed.
1148    ///
1149    ///
1150    /// Returns: An OSStatus result code.
1151    pub fn AudioQueueFlush(in_aq: AudioQueueRef) -> OSStatus;
1152}
1153
1154extern "C-unwind" {
1155    /// Resets an audio queue.
1156    ///
1157    /// This function immediately resets an audio queue, flushes any queued buffer, removes all
1158    /// buffers from previously scheduled use, and resets any decoder and digital signal
1159    /// processing (DSP) state information. It also invokes callbacks for any flushed buffers.
1160    /// If you queue any buffers after calling this function, processing does not occur until
1161    /// the decoder and DSP state information is reset. Hence, a discontinuity (that is, a
1162    /// "glitch") might occur.
1163    ///
1164    /// Note that when resetting, all pending buffer callbacks are normally invoked
1165    /// during the process of resetting. But if the calling thread is responding to a buffer
1166    /// callback, then it is possible for additional buffer callbacks to occur after
1167    /// AudioQueueReset returns.
1168    ///
1169    ///
1170    /// Parameter `inAQ`: The audio queue to reset.
1171    ///
1172    ///
1173    /// Returns: An OSStatus result code.
1174    pub fn AudioQueueReset(in_aq: AudioQueueRef) -> OSStatus;
1175}
1176
1177extern "C-unwind" {
1178    /// Obtains an audio queue parameter value.
1179    ///
1180    /// You can access the current parameter values for an audio queue at any time with this
1181    /// function.
1182    ///
1183    ///
1184    /// Parameter `inAQ`: The audio queue whose parameter value you want to obtain.
1185    ///
1186    /// Parameter `inParamID`: The ID of the parameter you want to obtain. In macOS v10.5, audio queues have one
1187    /// parameter available: kAudioQueueParam_Volume, which controls the queue's playback
1188    /// volume.
1189    ///
1190    /// Parameter `outValue`: On return, points to the current value of the specified parameter.
1191    ///
1192    /// Returns: An OSStatus result code.
1193    pub fn AudioQueueGetParameter(
1194        in_aq: AudioQueueRef,
1195        in_param_id: AudioQueueParameterID,
1196        out_value: NonNull<AudioQueueParameterValue>,
1197    ) -> OSStatus;
1198}
1199
1200extern "C-unwind" {
1201    /// Sets an audio queue parameter value.
1202    ///
1203    /// Parameter `inAQ`: The audio queue whose parameter value you want to set.
1204    ///
1205    /// Parameter `inParamID`: The ID of the parameter you want to set.
1206    ///
1207    /// Parameter `inValue`: The parameter value to set.
1208    ///
1209    /// Returns: An OSStatus result code.
1210    pub fn AudioQueueSetParameter(
1211        in_aq: AudioQueueRef,
1212        in_param_id: AudioQueueParameterID,
1213        in_value: AudioQueueParameterValue,
1214    ) -> OSStatus;
1215}
1216
1217extern "C-unwind" {
1218    /// Obtains an audio queue property value.
1219    ///
1220    /// Parameter `inAQ`: The audio queue whose property value you want to obtain.
1221    ///
1222    /// Parameter `inID`: The ID of the property you want to obtain. See "Audio Queue Property IDs."
1223    ///
1224    /// Parameter `outData`: On return, points to the desired property value.
1225    ///
1226    /// Parameter `ioDataSize`: A pointer to the size of the property data. On input, points to the maximum bytes of
1227    /// space the caller expects to receive. On return, points to the actual data size.
1228    ///
1229    /// Returns: An OSStatus result code.
1230    pub fn AudioQueueGetProperty(
1231        in_aq: AudioQueueRef,
1232        in_id: AudioQueuePropertyID,
1233        out_data: NonNull<c_void>,
1234        io_data_size: NonNull<u32>,
1235    ) -> OSStatus;
1236}
1237
1238extern "C-unwind" {
1239    /// Sets an audio queue property value.
1240    ///
1241    /// Parameter `inAQ`: The audio queue whose property value you want to set.
1242    ///
1243    /// Parameter `inID`: The ID of the property you want to set. See "Audio Queue Property IDs" for the various
1244    /// audio queue properties.
1245    ///
1246    /// Parameter `inData`: A pointer to the property value to set.
1247    ///
1248    /// Parameter `inDataSize`: The size of the property data.
1249    ///
1250    /// Returns: An OSStatus result code.
1251    pub fn AudioQueueSetProperty(
1252        in_aq: AudioQueueRef,
1253        in_id: AudioQueuePropertyID,
1254        in_data: NonNull<c_void>,
1255        in_data_size: u32,
1256    ) -> OSStatus;
1257}
1258
1259extern "C-unwind" {
1260    /// Obtains the size of an audio queue property.
1261    ///
1262    /// Parameter `inAQ`: The audio queue containing the property value whose size you want to obtain.
1263    ///
1264    /// Parameter `inID`: The ID of the property value whose size you want to obtain. See "Audio Queue Property
1265    /// IDs" for possible values.
1266    ///
1267    /// Parameter `outDataSize`: On return, points to the size of the specified property value.
1268    ///
1269    /// Returns: An OSStatus result code.
1270    pub fn AudioQueueGetPropertySize(
1271        in_aq: AudioQueueRef,
1272        in_id: AudioQueuePropertyID,
1273        out_data_size: NonNull<u32>,
1274    ) -> OSStatus;
1275}
1276
1277extern "C-unwind" {
1278    /// Adds a listener callback for a property.
1279    ///
1280    /// This callback is used to act upon a change in an audio queue property such as
1281    /// kAudioQueueProperty_IsRunning. For instance, if your application has a user interface
1282    /// with a Play/Stop button, and kAudioQueueProperty_IsRunning changes, you need to update
1283    /// your button.
1284    ///
1285    ///
1286    /// Parameter `inAQ`: The audio queue that owns the property you want to assign the listener callback to.
1287    ///
1288    /// Parameter `inID`: The ID of the property to which you want to assign a listener callback. See "Audio Queue Property IDs".
1289    ///
1290    /// Parameter `inProc`: The listener callback to be called when the property value changes.
1291    ///
1292    /// Parameter `inUserData`: A value to be passed to the listener callback when it is called.
1293    ///
1294    /// Returns: An OSStatus result code.
1295    pub fn AudioQueueAddPropertyListener(
1296        in_aq: AudioQueueRef,
1297        in_id: AudioQueuePropertyID,
1298        in_proc: AudioQueuePropertyListenerProc,
1299        in_user_data: *mut c_void,
1300    ) -> OSStatus;
1301}
1302
1303extern "C-unwind" {
1304    /// Removes a listener callback for a property.
1305    ///
1306    /// Parameter `inAQ`: The audio queue that owns the property from which you want to remove a listener.
1307    ///
1308    /// Parameter `inID`: The ID of the property from which you want to remove a listener.
1309    ///
1310    /// Parameter `inProc`: The listener being removed.
1311    ///
1312    /// Parameter `inUserData`: The same inUserData value that was previously passed to AudioQueueAddPropertyListener.
1313    ///
1314    /// Returns: An OSStatus result code.
1315    pub fn AudioQueueRemovePropertyListener(
1316        in_aq: AudioQueueRef,
1317        in_id: AudioQueuePropertyID,
1318        in_proc: AudioQueuePropertyListenerProc,
1319        in_user_data: *mut c_void,
1320    ) -> OSStatus;
1321}
1322
1323extern "C-unwind" {
1324    /// Creates a timeline object.
1325    ///
1326    /// You need to instantiate a timeline object if you want to know about any timeline
1327    /// discontinuities. See AudioQueueGetCurrentTime for more details.
1328    ///
1329    ///
1330    /// Parameter `inAQ`: The audio queue to associate with the new timeline object.
1331    ///
1332    /// Parameter `outTimeline`: On return, points to the newly created timeline object.
1333    ///
1334    /// Returns: An OSStatus result code.
1335    pub fn AudioQueueCreateTimeline(
1336        in_aq: AudioQueueRef,
1337        out_timeline: NonNull<AudioQueueTimelineRef>,
1338    ) -> OSStatus;
1339}
1340
1341extern "C-unwind" {
1342    /// Disposes of a timeline object.
1343    ///
1344    /// Disposing of an audio queue automatically disposes of any associated timeline objects.
1345    /// Call this function only if you want to dispose of a timeline object and not the audio
1346    /// queue associated with it.
1347    ///
1348    ///
1349    /// Parameter `inAQ`: The audio queue associated with the timeline object you want to dispose of.
1350    ///
1351    /// Parameter `inTimeline`: The timeline object to dispose of.
1352    ///
1353    /// Returns: An OSStatus result code.
1354    pub fn AudioQueueDisposeTimeline(
1355        in_aq: AudioQueueRef,
1356        in_timeline: AudioQueueTimelineRef,
1357    ) -> OSStatus;
1358}
1359
1360extern "C-unwind" {
1361    /// Obtains the current audio queue time.
1362    ///
1363    /// You must specify a timeline object if you want to be notified about any timeline
1364    /// discontinuities in the outTimelineDiscontinuity parameter. If you don't care about
1365    /// discontinuities, pass NULL in the inTimeLine and outTimelineDiscontinuity parameters.
1366    ///
1367    ///
1368    /// Parameter `inAQ`: The audio queue whose current time you want to obtain.
1369    ///
1370    /// Parameter `inTimeline`: The audio queue timeline object to which any timeline discontinuities are reported. May
1371    /// be NULL.
1372    ///
1373    /// Parameter `outTimeStamp`: On return, points to an audio timestamp structure containing the current audio queue
1374    /// time. The mSampleTime field is in terms of the audio queue's sample rate, and relative
1375    /// to the time at which the queue has started or will start.
1376    ///
1377    /// Parameter `outTimelineDiscontinuity`: Can be NULL. On return, only set to true or false if the inTimeLine parameter is not
1378    /// NULL. Set to true if a discontinuity has occurred in the sample timeline of the audio
1379    /// queue. For instance, the device's sample rate changed and a gap occurred in playback or
1380    /// recording, or the audio queue was unable to prepare and playback in time because it was
1381    /// late.
1382    ///
1383    /// Returns: An OSStatus result code.
1384    #[cfg(feature = "objc2-core-audio-types")]
1385    pub fn AudioQueueGetCurrentTime(
1386        in_aq: AudioQueueRef,
1387        in_timeline: AudioQueueTimelineRef,
1388        out_time_stamp: *mut AudioTimeStamp,
1389        out_timeline_discontinuity: *mut Boolean,
1390    ) -> OSStatus;
1391}
1392
1393extern "C-unwind" {
1394    /// Obtains the current time of the audio device associated with an audio queue.
1395    ///
1396    /// If the audio device associated with the audio queue is not running, the only valid field
1397    /// in the audio timestamp structure is mHostTime. This result differentiates the action of
1398    /// this function from that of the AudioDeviceGetCurrentTime function, (declared in
1399    /// AudioHardware.h) which returns an error if the audio device is not running.
1400    ///
1401    ///
1402    /// Parameter `inAQ`: The audio queue whose audio device is to be queried.
1403    ///
1404    /// Parameter `outTimeStamp`: A pointer to a structure that, on return, contains the current time of the audio device
1405    /// associated with the audio queue.
1406    ///
1407    /// Returns: An OSStatus result code.
1408    #[cfg(feature = "objc2-core-audio-types")]
1409    pub fn AudioQueueDeviceGetCurrentTime(
1410        in_aq: AudioQueueRef,
1411        out_time_stamp: NonNull<AudioTimeStamp>,
1412    ) -> OSStatus;
1413}
1414
1415extern "C-unwind" {
1416    /// Converts the time in the time base of the associated audio device from one
1417    /// representation to another.
1418    ///
1419    /// This function converts from one time representation to another (for example, from sample
1420    /// time to host time or vice versa):
1421    ///
1422    /// - Sample time is the absolute sample frame time. Sample numbers are the count of the samples
1423    /// on the audio device.
1424    /// - Host time is the time base of the host machine such as the time of the bus clock on the CPU.
1425    ///
1426    /// The mSampleTime field in the AudioTimestamp structure (described in Core Audio Data
1427    /// Types Reference) is always in device time, not in audio queue time. Audio queue time is
1428    /// relative to the audio queue's start time. The associated audio device has to be running
1429    /// for the AudioQueueDeviceTranslateTime function to provide a result.
1430    ///
1431    ///
1432    /// Parameter `inAQ`: The queue whose audio device is to perform the requested time translation.
1433    ///
1434    /// Parameter `inTime`: A pointer to a structure containing the time to be translated.
1435    ///
1436    /// Parameter `outTime`: On entry, mFlags indicate the desired translations. On exit, mFlags indicates which
1437    /// of the requested translated fields were successfully populated.
1438    ///
1439    /// Returns: An OSStatus result code.
1440    #[cfg(feature = "objc2-core-audio-types")]
1441    pub fn AudioQueueDeviceTranslateTime(
1442        in_aq: AudioQueueRef,
1443        in_time: NonNull<AudioTimeStamp>,
1444        out_time: NonNull<AudioTimeStamp>,
1445    ) -> OSStatus;
1446}
1447
1448extern "C-unwind" {
1449    /// Obtains an audio device's start time that is closest to a requested start time.
1450    ///
1451    /// Parameter `inAQ`: The audio queue whose device's nearest start time you want to obtain.
1452    ///
1453    /// Parameter `ioRequestedStartTime`: On entry, points to the requested start time. On return, points to the actual start time.
1454    ///
1455    /// Parameter `inFlags`: Reserved for future use. Pass 0.
1456    ///
1457    /// Returns: An OSStatus result code.
1458    #[cfg(feature = "objc2-core-audio-types")]
1459    pub fn AudioQueueDeviceGetNearestStartTime(
1460        in_aq: AudioQueueRef,
1461        io_requested_start_time: NonNull<AudioTimeStamp>,
1462        in_flags: u32,
1463    ) -> OSStatus;
1464}
1465
1466extern "C-unwind" {
1467    /// Specify an audio format to which the queue will perform subsequent offline rendering,
1468    /// or disable offline rendering.
1469    ///
1470    /// An output queue's audio playback can be redirected for capture to an audio file,
1471    /// to support an export function, for example. AudioQueueSetOfflineRenderFormat switches
1472    /// a queue between normal and offline rendering modes.
1473    ///
1474    ///
1475    /// Parameter `inAQ`: The output queue whose offline rendering mode is to be changed.
1476    ///
1477    /// Parameter `inFormat`: The desired format for offline rendering. Pass NULL to disable offline rendering and return the
1478    /// queue to normal output to an audio device. This format must be linear PCM and (if not mono)
1479    /// interleaved.
1480    ///
1481    /// Parameter `inLayout`: The desired channel layout for offline rendering; also NULL when disabling offline rendering.
1482    ///
1483    /// Returns: An OSStatus result code.
1484    #[cfg(feature = "objc2-core-audio-types")]
1485    pub fn AudioQueueSetOfflineRenderFormat(
1486        in_aq: AudioQueueRef,
1487        in_format: *const AudioStreamBasicDescription,
1488        in_layout: *const AudioChannelLayout,
1489    ) -> OSStatus;
1490}
1491
1492extern "C-unwind" {
1493    /// Obtain a buffer of audio output from a queue in offline rendering mode.
1494    ///
1495    /// Parameter `inAQ`: The output queue from which to obtain output.
1496    ///
1497    /// Parameter `inTimestamp`: The point in time corresponding to the beginning of the output buffer. Only mSampleTime
1498    /// is used. mFlags must include kAudioTimeStampSampleTimeValid.
1499    ///
1500    /// Parameter `ioBuffer`: The buffer into which the queue will render.
1501    ///
1502    /// Parameter `inNumberFrames`: The number of frames of audio to render. Note that fewer frames than requested may be returned.
1503    /// This can happen if insufficient data was enqueued.
1504    ///
1505    /// Returns: An OSStatus result code.
1506    #[cfg(feature = "objc2-core-audio-types")]
1507    pub fn AudioQueueOfflineRender(
1508        in_aq: AudioQueueRef,
1509        in_timestamp: NonNull<AudioTimeStamp>,
1510        io_buffer: AudioQueueBufferRef,
1511        in_number_frames: u32,
1512    ) -> OSStatus;
1513}
1514
1515extern "C-unwind" {
1516    /// Create a new processing tap
1517    ///
1518    /// This function creates a processing tap on a given audio queue. A
1519    /// processing tap can only be established (or removed) on an audio queue that is
1520    /// stopped (paused is not sufficient). The processing tap will then be used to
1521    /// process either decoded data in the case of an output queue, or input data
1522    /// (before it is encoded) in the case of an input queue.
1523    ///
1524    /// The processing is performed on audio either before or after any effects or other
1525    /// processing (varispeed, etc) is applied by the audio queue, depending on inFlags.
1526    ///
1527    ///
1528    /// Parameter `inAQ`: The audio queue from which to create the processing tap
1529    ///
1530    /// Parameter `inCallback`: A callback which the queue will call to process the audio
1531    ///
1532    /// Parameter `inClientData`: Client data provided to the callback
1533    ///
1534    /// Parameter `inFlags`: Flags that are used to control aspects of the processing tap.
1535    /// Valid flags are:
1536    /// - kAudioQueueProcessingTap_PreEffects: processing is done before any
1537    /// further effects are applied by the audio queue to the audio
1538    /// - kAudioQueueProcessingTap_PostEffects: processing is done after all
1539    /// processing is done, including that of other taps.
1540    /// - kAudioQueueProcessingTap_Siphon
1541    ///
1542    /// Parameter `outMaxFrames`: The maximum number of sample frames that can be requested of a processing
1543    /// tap at any one time. Typically this will be approximately 50 msec of audio
1544    /// (2048 samples
1545    /// @
1546    /// 44.1kHz)
1547    ///
1548    /// Parameter `outProcessingFormat`: The format in which the client will receive the audio data to be processed.
1549    /// This will always be the same sample rate as the client format and usually
1550    /// the same number of channels as the client format of the audio queue. (NOTE:
1551    /// the number of channels may be different in some cases if the client format
1552    /// has some channel count restrictions, for instance the client provides 5.1
1553    /// AAC, but the decoder can only produce stereo). The channel order, if the
1554    /// same as the client format, will be the same as the client channel order. If
1555    /// the channel count is changed, it will be to either 1 (mono) or 2 (stereo, in
1556    /// which case the first channel is left, the second right).
1557    ///
1558    /// If the data is not in a convenient format for the client to process in, then
1559    /// the client should convert the data to and from that format. This is the most
1560    /// efficient mechanism to use (as the audio queue can chose a format that is
1561    /// most efficient from its playback (or recording) requirement.
1562    ///
1563    /// Parameter `outAQTap`: The processing tap object.
1564    ///
1565    ///
1566    /// Returns: An OSStatus result code.
1567    #[cfg(feature = "objc2-core-audio-types")]
1568    pub fn AudioQueueProcessingTapNew(
1569        in_aq: AudioQueueRef,
1570        in_callback: AudioQueueProcessingTapCallback,
1571        in_client_data: *mut c_void,
1572        in_flags: AudioQueueProcessingTapFlags,
1573        out_max_frames: NonNull<u32>,
1574        out_processing_format: NonNull<AudioStreamBasicDescription>,
1575        out_aq_tap: NonNull<AudioQueueProcessingTapRef>,
1576    ) -> OSStatus;
1577}
1578
1579extern "C-unwind" {
1580    /// Dispose a processing tap object
1581    ///
1582    /// As with AudioQueueProcessingTapNew(), this call can only be made on an
1583    /// audio queue that is stopped (paused is not sufficient)
1584    ///
1585    ///
1586    /// Parameter `inAQTap`: The processing tap to dispose.
1587    ///
1588    ///
1589    /// Returns: An OSStatus result code.
1590    pub fn AudioQueueProcessingTapDispose(in_aq_tap: AudioQueueProcessingTapRef) -> OSStatus;
1591}
1592
1593extern "C-unwind" {
1594    /// Used by a processing tap to retrieve source audio.
1595    ///
1596    /// This function may only be called from the processing tap's callback.
1597    ///
1598    ///
1599    /// Parameter `inAQTap`: the processing tap
1600    ///
1601    /// Parameter `inNumberFrames`: the number of frames the processing tap requires for its processing
1602    ///
1603    /// Parameter `ioTimeStamp`: On an input audio queue, the timestamp is returned from this function.
1604    /// On an output audio queue, the caller must provide a continuous timestamp.
1605    ///
1606    /// Parameter `outFlags`: flags to describe state about the input requested, e.g.
1607    /// discontinuity/complete
1608    ///
1609    /// Parameter `outNumberFrames`: the number of source frames that have been provided by the parent audio
1610    /// queue. This can be less than the number of requested frames specified in
1611    /// inNumberFrames
1612    ///
1613    /// Parameter `ioData`: the audio buffer list which will contain the source data. The audio queue owns
1614    /// the buffer pointers if NULL pointers were provided (recommended). In this case
1615    /// the source buffers are only valid for the duration of the processing tap
1616    /// callback. If the buffer pointers are non-NULL, then they must be big enough to
1617    /// hold inNumberFrames, and the audio queue will copy its source data into those
1618    /// buffers.
1619    ///
1620    ///
1621    /// Returns: An OSStatus result code.
1622    #[cfg(feature = "objc2-core-audio-types")]
1623    pub fn AudioQueueProcessingTapGetSourceAudio(
1624        in_aq_tap: AudioQueueProcessingTapRef,
1625        in_number_frames: u32,
1626        io_time_stamp: NonNull<AudioTimeStamp>,
1627        out_flags: NonNull<AudioQueueProcessingTapFlags>,
1628        out_number_frames: NonNull<u32>,
1629        io_data: NonNull<AudioBufferList>,
1630    ) -> OSStatus;
1631}
1632
1633extern "C-unwind" {
1634    /// Used by a processing tap to retrieve the queue's current time.
1635    ///
1636    /// This function may only be called from the processing tap's callback, and only
1637    /// for audio output queues. It must be called after calling
1638    /// AudioQueueProcessingTapGetSourceAudio().
1639    ///
1640    ///
1641    /// Parameter `inAQTap`: the processing tap
1642    ///
1643    /// Parameter `outQueueSampleTime`: the current sample time of the audio queue. This will appear to be stationary
1644    /// if the queue is paused.
1645    ///
1646    /// Parameter `outQueueFrameCount`: the number of sample frames of queue time corresponding to the current chunk of
1647    /// audio being processed by the tap. This will differ from the frame count passed
1648    /// to the tap if the queue's playback rate is currently other than 1.0, due to the
1649    /// use of time compression/expansion. The frame count can also be 0 if the queue is
1650    /// paused.
1651    ///
1652    ///
1653    /// Returns: An OSStatus result code.
1654    pub fn AudioQueueProcessingTapGetQueueTime(
1655        in_aq_tap: AudioQueueProcessingTapRef,
1656        out_queue_sample_time: NonNull<f64>,
1657        out_queue_frame_count: NonNull<u32>,
1658    ) -> OSStatus;
1659}