objc2-avf-audio 0.3.2

Bindings to the AVFAudio framework
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
//! This file has been automatically generated by `objc2`'s `header-translator`.
//! DO NOT EDIT
use core::ffi::*;
use core::ptr::NonNull;
use objc2::__framework_prelude::*;
#[cfg(feature = "objc2-audio-toolbox")]
#[cfg(not(target_os = "watchos"))]
use objc2_audio_toolbox::*;
#[cfg(feature = "objc2-core-audio-types")]
use objc2_core_audio_types::*;
use objc2_foundation::*;

use crate::*;

/// A block which will be called by AVAudioEngine's render call when operating in the manual
/// rendering mode, to get input data as needed.
///
/// Parameter `inNumberOfFrames`: The number of frames required to complete the request. You may supply either these many
/// frames or none.
///
/// Returns: An AudioBufferList containing data to be rendered, or null if no data is available.
/// The data in the returned buffer must not be cleared or re-filled until the input block is
/// called again or the rendering has finished.
/// The format of the returned buffer must match the format specified when registering the
/// block.
///
/// If you are out of data and return null or less than the requested number of frames, this
/// data will not be used for rendering. The engine will try to render from other active
/// sources in the processing graph, and will inform about the input node's status in the error
/// returned from its render call.
///
/// Note that when the engine is configured to operate in
/// `AVAudioEngineManualRenderingModeRealtime`, this block will be called from a realtime
/// context. Care should be taken not to make any blocking call (e.g. calling libdispatch,
/// blocking on a mutex, allocating memory etc.) which may cause an overload at the lower layers.
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioionodeinputblock?language=objc)
#[cfg(all(
    feature = "AVAudioTypes",
    feature = "block2",
    feature = "objc2-core-audio-types"
))]
pub type AVAudioIONodeInputBlock =
    *mut block2::DynBlock<dyn Fn(AVAudioFrameCount) -> *const AudioBufferList>;

/// Types of speech activity events.
///
/// Speech activity has started.
///
/// Speech activity has ended.
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingspeechactivityevent?language=objc)
// NS_ENUM
#[repr(transparent)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct AVAudioVoiceProcessingSpeechActivityEvent(pub NSInteger);
impl AVAudioVoiceProcessingSpeechActivityEvent {
    #[doc(alias = "AVAudioVoiceProcessingSpeechActivityStarted")]
    pub const Started: Self = Self(0);
    #[doc(alias = "AVAudioVoiceProcessingSpeechActivityEnded")]
    pub const Ended: Self = Self(1);
}

unsafe impl Encode for AVAudioVoiceProcessingSpeechActivityEvent {
    const ENCODING: Encoding = NSInteger::ENCODING;
}

unsafe impl RefEncode for AVAudioVoiceProcessingSpeechActivityEvent {
    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
}

/// Ducking level applied to other (i.e. non-voice) audio by AVAudio voice processing AU.
///
/// DuckingLevelDefault = Default ducking level to other audio for typical voice chat.
/// DuckingLevelMin = minimum ducking to other audio.
/// DuckingLevelMid = medium ducking to other audio.
/// DuckingLevelMax = maximum ducking to other audio.
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingotheraudioduckinglevel?language=objc)
// NS_ENUM
#[repr(transparent)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct AVAudioVoiceProcessingOtherAudioDuckingLevel(pub NSInteger);
impl AVAudioVoiceProcessingOtherAudioDuckingLevel {
    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelDefault")]
    pub const Default: Self = Self(0);
    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMin")]
    pub const Min: Self = Self(10);
    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMid")]
    pub const Mid: Self = Self(20);
    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMax")]
    pub const Max: Self = Self(30);
}

unsafe impl Encode for AVAudioVoiceProcessingOtherAudioDuckingLevel {
    const ENCODING: Encoding = NSInteger::ENCODING;
}

unsafe impl RefEncode for AVAudioVoiceProcessingOtherAudioDuckingLevel {
    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
}

/// The configuration of ducking other (i.e. non-voice) audio
///
///
/// Enables advanced ducking which ducks other audio based on the presence of voice activity from local and/or remote chat participants.
///
/// Ducking level of other audio
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingotheraudioduckingconfiguration?language=objc)
#[repr(C)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
    pub enableAdvancedDucking: Bool,
    pub duckingLevel: AVAudioVoiceProcessingOtherAudioDuckingLevel,
}

unsafe impl Encode for AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
    const ENCODING: Encoding = Encoding::Struct(
        "AVAudioVoiceProcessingOtherAudioDuckingConfiguration",
        &[
            <Bool>::ENCODING,
            <AVAudioVoiceProcessingOtherAudioDuckingLevel>::ENCODING,
        ],
    );
}

unsafe impl RefEncode for AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
}

extern_class!(
    /// Base class for a node that performs audio input or output in the engine.
    ///
    /// When the engine is configured to render to/from an audio device, on macOS, AVAudioInputNode
    /// and AVAudioOutputNode communicate with the system's default input and output devices.
    /// On iOS, they communicate with the devices appropriate to the app's AVAudioSession category
    /// and other configuration, also considering the user's actions such as
    /// connecting/disconnecting external devices.
    ///
    /// In the manual rendering mode, the AVAudioInputNode and AVAudioOutputNode perform the input
    /// and output in the engine, in response to client's request.
    ///
    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioionode?language=objc)
    #[unsafe(super(AVAudioNode, NSObject))]
    #[derive(Debug, PartialEq, Eq, Hash)]
    #[cfg(feature = "AVAudioNode")]
    pub struct AVAudioIONode;
);

#[cfg(feature = "AVAudioNode")]
extern_conformance!(
    unsafe impl NSObjectProtocol for AVAudioIONode {}
);

#[cfg(feature = "AVAudioNode")]
impl AVAudioIONode {
    extern_methods!(
        /// The presentation or hardware latency, applicable when the engine is rendering to/from an
        /// audio device.
        ///
        /// This corresponds to kAudioDevicePropertyLatency and kAudioStreamPropertyLatency.
        /// See
        /// <CoreAudio
        /// /AudioHardwareBase.h>.
        #[unsafe(method(presentationLatency))]
        #[unsafe(method_family = none)]
        pub unsafe fn presentationLatency(&self) -> NSTimeInterval;

        #[cfg(feature = "objc2-audio-toolbox")]
        #[cfg(not(target_os = "watchos"))]
        /// The node's underlying AudioUnit, if any.
        ///
        /// This is only necessary for certain advanced usages.
        #[unsafe(method(audioUnit))]
        #[unsafe(method_family = none)]
        pub unsafe fn audioUnit(&self) -> AudioUnit;

        /// Indicates whether voice processing is enabled.
        #[unsafe(method(isVoiceProcessingEnabled))]
        #[unsafe(method_family = none)]
        pub unsafe fn isVoiceProcessingEnabled(&self) -> bool;

        /// Enable or disable voice processing on the IO node.
        ///
        /// Parameter `enabled`: Whether voice processing is to be enabled.
        ///
        /// Parameter `outError`: On exit, if the IO node cannot enable or diable voice processing, a description of the error
        ///
        /// Returns: YES for success
        ///
        /// If enabled, the input node does signal processing on the incoming audio (taking out any
        /// of the audio that is played from the device at a given time from the incoming audio).
        /// Disabling this mode on either of the IO nodes automatically disabled it on the other IO node.
        ///
        /// Voice processing requires both input and output nodes to be in the voice processing mode.
        /// Enabling this mode on either of the IO nodes automatically enables it on the other IO node.
        /// Voice processing is only supported when the engine is rendering to the audio device and not
        /// in the manual rendering mode.
        /// Voice processing can only be be enabled or disabled when the engine is in a stopped state.
        ///
        /// The output format of the input node and the input format of the output node have to be
        /// the same and they can only be changed when the engine is in a stopped state.
        #[unsafe(method(setVoiceProcessingEnabled:error:_))]
        #[unsafe(method_family = none)]
        pub unsafe fn setVoiceProcessingEnabled_error(
            &self,
            enabled: bool,
        ) -> Result<(), Retained<NSError>>;
    );
}

/// Methods declared on superclass `NSObject`.
#[cfg(feature = "AVAudioNode")]
impl AVAudioIONode {
    extern_methods!(
        #[unsafe(method(init))]
        #[unsafe(method_family = init)]
        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;

        #[unsafe(method(new))]
        #[unsafe(method_family = new)]
        pub unsafe fn new() -> Retained<Self>;
    );
}

extern_class!(
    /// A node that performs audio input in the engine.
    ///
    /// When the engine is rendering to/from an audio device, this node connects to the system's
    /// audio input.
    /// When the engine is operating in manual rendering mode, this node can be used to supply
    /// the input data to the engine.
    ///
    /// This node has one element.
    /// The format of the input scope reflects:
    /// - the audio hardware sample rate and channel count, when connected to the hardware
    /// - the format of the PCM audio data that the node will supply to the engine, in the
    /// manual rendering mode (see `setManualRenderingInputPCMFormat:inputBlock:`)
    ///
    /// When rendering from an audio device, the input node does not support format conversion.
    /// Hence the format of the output scope must be same as that of the input, as well as the
    /// formats for all the nodes connected in the input node chain.
    ///
    /// In the manual rendering mode, the format of the output scope is initially the same as that
    /// of the input, but you may set it to a different format, in which case the node will convert.
    ///
    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioinputnode?language=objc)
    #[unsafe(super(AVAudioIONode, AVAudioNode, NSObject))]
    #[derive(Debug, PartialEq, Eq, Hash)]
    #[cfg(feature = "AVAudioNode")]
    pub struct AVAudioInputNode;
);

#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
extern_conformance!(
    unsafe impl AVAudio3DMixing for AVAudioInputNode {}
);

#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
extern_conformance!(
    unsafe impl AVAudioMixing for AVAudioInputNode {}
);

#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
extern_conformance!(
    unsafe impl AVAudioStereoMixing for AVAudioInputNode {}
);

#[cfg(feature = "AVAudioNode")]
extern_conformance!(
    unsafe impl NSObjectProtocol for AVAudioInputNode {}
);

#[cfg(feature = "AVAudioNode")]
impl AVAudioInputNode {
    extern_methods!(
        #[unsafe(method(init))]
        #[unsafe(method_family = init)]
        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;

        #[cfg(all(
            feature = "AVAudioFormat",
            feature = "AVAudioTypes",
            feature = "block2",
            feature = "objc2-core-audio-types"
        ))]
        /// Supply the data through the input node to the engine operating in the manual rendering mode.
        ///
        /// Parameter `format`: The format of the PCM audio data the block will supply to the engine
        ///
        /// Parameter `block`: The block the engine will call on the input node to get the audio to send to the output,
        /// when operating in the manual rendering mode. See `AVAudioIONodeInputBlock` for more details
        ///
        /// Returns: YES for success
        ///
        /// This block must be set if the input node is being used when the engine is operating in
        /// manual rendering mode.
        /// Switching the engine to render to/from an audio device invalidates any previously set block,
        /// and makes this method ineffective.
        ///
        /// # Safety
        ///
        /// `block` must be a valid pointer.
        #[unsafe(method(setManualRenderingInputPCMFormat:inputBlock:))]
        #[unsafe(method_family = none)]
        pub unsafe fn setManualRenderingInputPCMFormat_inputBlock(
            &self,
            format: &AVAudioFormat,
            block: AVAudioIONodeInputBlock,
        ) -> bool;

        /// Bypass all processing for microphone uplink done by the voice processing unit.
        ///
        /// Querying this property when voice processing is disabled will return false.
        #[unsafe(method(isVoiceProcessingBypassed))]
        #[unsafe(method_family = none)]
        pub unsafe fn isVoiceProcessingBypassed(&self) -> bool;

        /// Setter for [`isVoiceProcessingBypassed`][Self::isVoiceProcessingBypassed].
        #[unsafe(method(setVoiceProcessingBypassed:))]
        #[unsafe(method_family = none)]
        pub unsafe fn setVoiceProcessingBypassed(&self, voice_processing_bypassed: bool);

        /// Enable automatic gain control on the processed microphone uplink.
        /// signal. Enabled by default.
        ///
        /// Querying this property when voice processing is disabled will return false.
        #[unsafe(method(isVoiceProcessingAGCEnabled))]
        #[unsafe(method_family = none)]
        pub unsafe fn isVoiceProcessingAGCEnabled(&self) -> bool;

        /// Setter for [`isVoiceProcessingAGCEnabled`][Self::isVoiceProcessingAGCEnabled].
        #[unsafe(method(setVoiceProcessingAGCEnabled:))]
        #[unsafe(method_family = none)]
        pub unsafe fn setVoiceProcessingAGCEnabled(&self, voice_processing_agc_enabled: bool);

        /// Mutes the input of the voice processing unit.
        ///
        /// Querying this property when voice processing is disabled will return false.
        #[unsafe(method(isVoiceProcessingInputMuted))]
        #[unsafe(method_family = none)]
        pub unsafe fn isVoiceProcessingInputMuted(&self) -> bool;

        /// Setter for [`isVoiceProcessingInputMuted`][Self::isVoiceProcessingInputMuted].
        #[unsafe(method(setVoiceProcessingInputMuted:))]
        #[unsafe(method_family = none)]
        pub unsafe fn setVoiceProcessingInputMuted(&self, voice_processing_input_muted: bool);

        #[cfg(feature = "block2")]
        /// Register a listener to be notified when speech activity event occurs while the input is muted.
        ///
        /// Parameter `listenerBlock`: The block the engine will call when speech activity event occurs while the input is muted.
        /// Passing nil will remove an already set block.
        ///
        /// Returns: YES for success
        ///
        /// Continuous presence of or lack of speech activity during mute will not cause redundant notification.
        /// In order to use this API, it's expected to implement the mute via the voiceProcessingInputMuted.
        #[unsafe(method(setMutedSpeechActivityEventListener:))]
        #[unsafe(method_family = none)]
        pub unsafe fn setMutedSpeechActivityEventListener(
            &self,
            listener_block: Option<
                &block2::DynBlock<dyn Fn(AVAudioVoiceProcessingSpeechActivityEvent)>,
            >,
        ) -> bool;

        /// The configuration of ducking other (i.e. non-voice) audio
        ///
        /// Configures the ducking of other (i.e. non-voice) audio, including advanced ducking enablement and ducking level.
        /// In general, when other audio is played during voice chat, applying a higher level of ducking could increase the intelligibility of the voice chat.
        /// If not set, the default ducking configuration is to disable advanced ducking, with a ducking level set to AVAudioVoiceProcessingOtherAudioDuckingLevelDefault.
        #[unsafe(method(voiceProcessingOtherAudioDuckingConfiguration))]
        #[unsafe(method_family = none)]
        pub unsafe fn voiceProcessingOtherAudioDuckingConfiguration(
            &self,
        ) -> AVAudioVoiceProcessingOtherAudioDuckingConfiguration;

        /// Setter for [`voiceProcessingOtherAudioDuckingConfiguration`][Self::voiceProcessingOtherAudioDuckingConfiguration].
        #[unsafe(method(setVoiceProcessingOtherAudioDuckingConfiguration:))]
        #[unsafe(method_family = none)]
        pub unsafe fn setVoiceProcessingOtherAudioDuckingConfiguration(
            &self,
            voice_processing_other_audio_ducking_configuration: AVAudioVoiceProcessingOtherAudioDuckingConfiguration,
        );
    );
}

/// Methods declared on superclass `NSObject`.
#[cfg(feature = "AVAudioNode")]
impl AVAudioInputNode {
    extern_methods!(
        #[unsafe(method(new))]
        #[unsafe(method_family = new)]
        pub unsafe fn new() -> Retained<Self>;
    );
}

extern_class!(
    /// A node that performs audio output in the engine.
    ///
    /// When the engine is rendering to/from an audio device, this node connects to the system's
    /// audio output.
    /// When the engine is operating in manual rendering mode, this node performs output in
    /// response to client's requests.
    ///
    /// This node has one element.
    /// The format of the output scope reflects:
    /// - the audio hardware sample rate and channel count, when connected to the hardware
    /// - the engine's manual rendering mode output format (see
    /// `AVAudioEngine(manualRenderingFormat)`), in the manual rendering mode
    ///
    /// The format of the input scope is initially the same as that of the
    /// output, but you may set it to a different format, in which case the node will convert.
    ///
    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiooutputnode?language=objc)
    #[unsafe(super(AVAudioIONode, AVAudioNode, NSObject))]
    #[derive(Debug, PartialEq, Eq, Hash)]
    #[cfg(feature = "AVAudioNode")]
    pub struct AVAudioOutputNode;
);

#[cfg(feature = "AVAudioNode")]
extern_conformance!(
    unsafe impl NSObjectProtocol for AVAudioOutputNode {}
);

#[cfg(feature = "AVAudioNode")]
impl AVAudioOutputNode {
    extern_methods!(
        #[unsafe(method(init))]
        #[unsafe(method_family = init)]
        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
    );
}

/// Methods declared on superclass `NSObject`.
#[cfg(feature = "AVAudioNode")]
impl AVAudioOutputNode {
    extern_methods!(
        #[unsafe(method(new))]
        #[unsafe(method_family = new)]
        pub unsafe fn new() -> Retained<Self>;
    );
}