objc2_avf_audio/generated/AVAudioIONode.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-audio-toolbox")]
7#[cfg(not(target_os = "watchos"))]
8use objc2_audio_toolbox::*;
9#[cfg(feature = "objc2-core-audio-types")]
10use objc2_core_audio_types::*;
11use objc2_foundation::*;
12
13use crate::*;
14
15/// A block which will be called by AVAudioEngine's render call when operating in the manual
16/// rendering mode, to get input data as needed.
17///
18/// Parameter `inNumberOfFrames`: The number of frames required to complete the request. You may supply either these many
19/// frames or none.
20///
21/// Returns: An AudioBufferList containing data to be rendered, or null if no data is available.
22/// The data in the returned buffer must not be cleared or re-filled until the input block is
23/// called again or the rendering has finished.
24/// The format of the returned buffer must match the format specified when registering the
25/// block.
26///
27/// If you are out of data and return null or less than the requested number of frames, this
28/// data will not be used for rendering. The engine will try to render from other active
29/// sources in the processing graph, and will inform about the input node's status in the error
30/// returned from its render call.
31///
32/// Note that when the engine is configured to operate in
33/// `AVAudioEngineManualRenderingModeRealtime`, this block will be called from a realtime
34/// context. Care should be taken not to make any blocking call (e.g. calling libdispatch,
35/// blocking on a mutex, allocating memory etc.) which may cause an overload at the lower layers.
36///
37/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioionodeinputblock?language=objc)
38#[cfg(all(
39 feature = "AVAudioTypes",
40 feature = "block2",
41 feature = "objc2-core-audio-types"
42))]
43pub type AVAudioIONodeInputBlock =
44 *mut block2::DynBlock<dyn Fn(AVAudioFrameCount) -> *const AudioBufferList>;
45
46/// Types of speech activity events.
47///
48/// Speech activity has started.
49///
50/// Speech activity has ended.
51///
52/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingspeechactivityevent?language=objc)
53// NS_ENUM
54#[repr(transparent)]
55#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
56pub struct AVAudioVoiceProcessingSpeechActivityEvent(pub NSInteger);
57impl AVAudioVoiceProcessingSpeechActivityEvent {
58 #[doc(alias = "AVAudioVoiceProcessingSpeechActivityStarted")]
59 pub const Started: Self = Self(0);
60 #[doc(alias = "AVAudioVoiceProcessingSpeechActivityEnded")]
61 pub const Ended: Self = Self(1);
62}
63
64unsafe impl Encode for AVAudioVoiceProcessingSpeechActivityEvent {
65 const ENCODING: Encoding = NSInteger::ENCODING;
66}
67
68unsafe impl RefEncode for AVAudioVoiceProcessingSpeechActivityEvent {
69 const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
70}
71
72/// Ducking level applied to other (i.e. non-voice) audio by AVAudio voice processing AU.
73///
74/// DuckingLevelDefault = Default ducking level to other audio for typical voice chat.
75/// DuckingLevelMin = minimum ducking to other audio.
76/// DuckingLevelMid = medium ducking to other audio.
77/// DuckingLevelMax = maximum ducking to other audio.
78///
79/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingotheraudioduckinglevel?language=objc)
80// NS_ENUM
81#[repr(transparent)]
82#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
83pub struct AVAudioVoiceProcessingOtherAudioDuckingLevel(pub NSInteger);
84impl AVAudioVoiceProcessingOtherAudioDuckingLevel {
85 #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelDefault")]
86 pub const Default: Self = Self(0);
87 #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMin")]
88 pub const Min: Self = Self(10);
89 #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMid")]
90 pub const Mid: Self = Self(20);
91 #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMax")]
92 pub const Max: Self = Self(30);
93}
94
95unsafe impl Encode for AVAudioVoiceProcessingOtherAudioDuckingLevel {
96 const ENCODING: Encoding = NSInteger::ENCODING;
97}
98
99unsafe impl RefEncode for AVAudioVoiceProcessingOtherAudioDuckingLevel {
100 const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
101}
102
103/// The configuration of ducking other (i.e. non-voice) audio
104///
105///
106/// Enables advanced ducking which ducks other audio based on the presence of voice activity from local and/or remote chat participants.
107///
108/// Ducking level of other audio
109///
110/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingotheraudioduckingconfiguration?language=objc)
111#[repr(C)]
112#[derive(Clone, Copy, Debug, PartialEq)]
113pub struct AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
114 pub enableAdvancedDucking: Bool,
115 pub duckingLevel: AVAudioVoiceProcessingOtherAudioDuckingLevel,
116}
117
118unsafe impl Encode for AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
119 const ENCODING: Encoding = Encoding::Struct(
120 "AVAudioVoiceProcessingOtherAudioDuckingConfiguration",
121 &[
122 <Bool>::ENCODING,
123 <AVAudioVoiceProcessingOtherAudioDuckingLevel>::ENCODING,
124 ],
125 );
126}
127
128unsafe impl RefEncode for AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
129 const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
130}
131
132extern_class!(
133 /// Base class for a node that performs audio input or output in the engine.
134 ///
135 /// When the engine is configured to render to/from an audio device, on macOS, AVAudioInputNode
136 /// and AVAudioOutputNode communicate with the system's default input and output devices.
137 /// On iOS, they communicate with the devices appropriate to the app's AVAudioSession category
138 /// and other configuration, also considering the user's actions such as
139 /// connecting/disconnecting external devices.
140 ///
141 /// In the manual rendering mode, the AVAudioInputNode and AVAudioOutputNode perform the input
142 /// and output in the engine, in response to client's request.
143 ///
144 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioionode?language=objc)
145 #[unsafe(super(AVAudioNode, NSObject))]
146 #[derive(Debug, PartialEq, Eq, Hash)]
147 #[cfg(feature = "AVAudioNode")]
148 pub struct AVAudioIONode;
149);
150
151#[cfg(feature = "AVAudioNode")]
152extern_conformance!(
153 unsafe impl NSObjectProtocol for AVAudioIONode {}
154);
155
156#[cfg(feature = "AVAudioNode")]
157impl AVAudioIONode {
158 extern_methods!(
159 /// The presentation or hardware latency, applicable when the engine is rendering to/from an
160 /// audio device.
161 ///
162 /// This corresponds to kAudioDevicePropertyLatency and kAudioStreamPropertyLatency.
163 /// See
164 /// <CoreAudio
165 /// /AudioHardwareBase.h>.
166 #[unsafe(method(presentationLatency))]
167 #[unsafe(method_family = none)]
168 pub unsafe fn presentationLatency(&self) -> NSTimeInterval;
169
170 #[cfg(feature = "objc2-audio-toolbox")]
171 #[cfg(not(target_os = "watchos"))]
172 /// The node's underlying AudioUnit, if any.
173 ///
174 /// This is only necessary for certain advanced usages.
175 #[unsafe(method(audioUnit))]
176 #[unsafe(method_family = none)]
177 pub unsafe fn audioUnit(&self) -> AudioUnit;
178
179 /// Indicates whether voice processing is enabled.
180 #[unsafe(method(isVoiceProcessingEnabled))]
181 #[unsafe(method_family = none)]
182 pub unsafe fn isVoiceProcessingEnabled(&self) -> bool;
183
184 /// Enable or disable voice processing on the IO node.
185 ///
186 /// Parameter `enabled`: Whether voice processing is to be enabled.
187 ///
188 /// Parameter `outError`: On exit, if the IO node cannot enable or diable voice processing, a description of the error
189 ///
190 /// Returns: YES for success
191 ///
192 /// If enabled, the input node does signal processing on the incoming audio (taking out any
193 /// of the audio that is played from the device at a given time from the incoming audio).
194 /// Disabling this mode on either of the IO nodes automatically disabled it on the other IO node.
195 ///
196 /// Voice processing requires both input and output nodes to be in the voice processing mode.
197 /// Enabling this mode on either of the IO nodes automatically enables it on the other IO node.
198 /// Voice processing is only supported when the engine is rendering to the audio device and not
199 /// in the manual rendering mode.
200 /// Voice processing can only be be enabled or disabled when the engine is in a stopped state.
201 ///
202 /// The output format of the input node and the input format of the output node have to be
203 /// the same and they can only be changed when the engine is in a stopped state.
204 #[unsafe(method(setVoiceProcessingEnabled:error:_))]
205 #[unsafe(method_family = none)]
206 pub unsafe fn setVoiceProcessingEnabled_error(
207 &self,
208 enabled: bool,
209 ) -> Result<(), Retained<NSError>>;
210 );
211}
212
213/// Methods declared on superclass `NSObject`.
214#[cfg(feature = "AVAudioNode")]
215impl AVAudioIONode {
216 extern_methods!(
217 #[unsafe(method(init))]
218 #[unsafe(method_family = init)]
219 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
220
221 #[unsafe(method(new))]
222 #[unsafe(method_family = new)]
223 pub unsafe fn new() -> Retained<Self>;
224 );
225}
226
227extern_class!(
228 /// A node that performs audio input in the engine.
229 ///
230 /// When the engine is rendering to/from an audio device, this node connects to the system's
231 /// audio input.
232 /// When the engine is operating in manual rendering mode, this node can be used to supply
233 /// the input data to the engine.
234 ///
235 /// This node has one element.
236 /// The format of the input scope reflects:
237 /// - the audio hardware sample rate and channel count, when connected to the hardware
238 /// - the format of the PCM audio data that the node will supply to the engine, in the
239 /// manual rendering mode (see `setManualRenderingInputPCMFormat:inputBlock:`)
240 ///
241 /// When rendering from an audio device, the input node does not support format conversion.
242 /// Hence the format of the output scope must be same as that of the input, as well as the
243 /// formats for all the nodes connected in the input node chain.
244 ///
245 /// In the manual rendering mode, the format of the output scope is initially the same as that
246 /// of the input, but you may set it to a different format, in which case the node will convert.
247 ///
248 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioinputnode?language=objc)
249 #[unsafe(super(AVAudioIONode, AVAudioNode, NSObject))]
250 #[derive(Debug, PartialEq, Eq, Hash)]
251 #[cfg(feature = "AVAudioNode")]
252 pub struct AVAudioInputNode;
253);
254
255#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
256extern_conformance!(
257 unsafe impl AVAudio3DMixing for AVAudioInputNode {}
258);
259
260#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
261extern_conformance!(
262 unsafe impl AVAudioMixing for AVAudioInputNode {}
263);
264
265#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
266extern_conformance!(
267 unsafe impl AVAudioStereoMixing for AVAudioInputNode {}
268);
269
270#[cfg(feature = "AVAudioNode")]
271extern_conformance!(
272 unsafe impl NSObjectProtocol for AVAudioInputNode {}
273);
274
275#[cfg(feature = "AVAudioNode")]
276impl AVAudioInputNode {
277 extern_methods!(
278 #[unsafe(method(init))]
279 #[unsafe(method_family = init)]
280 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
281
282 #[cfg(all(
283 feature = "AVAudioFormat",
284 feature = "AVAudioTypes",
285 feature = "block2",
286 feature = "objc2-core-audio-types"
287 ))]
288 /// Supply the data through the input node to the engine operating in the manual rendering mode.
289 ///
290 /// Parameter `format`: The format of the PCM audio data the block will supply to the engine
291 ///
292 /// Parameter `block`: The block the engine will call on the input node to get the audio to send to the output,
293 /// when operating in the manual rendering mode. See `AVAudioIONodeInputBlock` for more details
294 ///
295 /// Returns: YES for success
296 ///
297 /// This block must be set if the input node is being used when the engine is operating in
298 /// manual rendering mode.
299 /// Switching the engine to render to/from an audio device invalidates any previously set block,
300 /// and makes this method ineffective.
301 #[unsafe(method(setManualRenderingInputPCMFormat:inputBlock:))]
302 #[unsafe(method_family = none)]
303 pub unsafe fn setManualRenderingInputPCMFormat_inputBlock(
304 &self,
305 format: &AVAudioFormat,
306 block: AVAudioIONodeInputBlock,
307 ) -> bool;
308
309 /// Bypass all processing for microphone uplink done by the voice processing unit.
310 ///
311 /// Querying this property when voice processing is disabled will return false.
312 #[unsafe(method(isVoiceProcessingBypassed))]
313 #[unsafe(method_family = none)]
314 pub unsafe fn isVoiceProcessingBypassed(&self) -> bool;
315
316 /// Setter for [`isVoiceProcessingBypassed`][Self::isVoiceProcessingBypassed].
317 #[unsafe(method(setVoiceProcessingBypassed:))]
318 #[unsafe(method_family = none)]
319 pub unsafe fn setVoiceProcessingBypassed(&self, voice_processing_bypassed: bool);
320
321 /// Enable automatic gain control on the processed microphone uplink.
322 /// signal. Enabled by default.
323 ///
324 /// Querying this property when voice processing is disabled will return false.
325 #[unsafe(method(isVoiceProcessingAGCEnabled))]
326 #[unsafe(method_family = none)]
327 pub unsafe fn isVoiceProcessingAGCEnabled(&self) -> bool;
328
329 /// Setter for [`isVoiceProcessingAGCEnabled`][Self::isVoiceProcessingAGCEnabled].
330 #[unsafe(method(setVoiceProcessingAGCEnabled:))]
331 #[unsafe(method_family = none)]
332 pub unsafe fn setVoiceProcessingAGCEnabled(&self, voice_processing_agc_enabled: bool);
333
334 /// Mutes the input of the voice processing unit.
335 ///
336 /// Querying this property when voice processing is disabled will return false.
337 #[unsafe(method(isVoiceProcessingInputMuted))]
338 #[unsafe(method_family = none)]
339 pub unsafe fn isVoiceProcessingInputMuted(&self) -> bool;
340
341 /// Setter for [`isVoiceProcessingInputMuted`][Self::isVoiceProcessingInputMuted].
342 #[unsafe(method(setVoiceProcessingInputMuted:))]
343 #[unsafe(method_family = none)]
344 pub unsafe fn setVoiceProcessingInputMuted(&self, voice_processing_input_muted: bool);
345
346 #[cfg(feature = "block2")]
347 /// Register a listener to be notified when speech activity event occurs while the input is muted.
348 ///
349 /// Parameter `listenerBlock`: The block the engine will call when speech activity event occurs while the input is muted.
350 /// Passing nil will remove an already set block.
351 ///
352 /// Returns: YES for success
353 ///
354 /// Continuous presence of or lack of speech activity during mute will not cause redundant notification.
355 /// In order to use this API, it's expected to implement the mute via the voiceProcessingInputMuted.
356 #[unsafe(method(setMutedSpeechActivityEventListener:))]
357 #[unsafe(method_family = none)]
358 pub unsafe fn setMutedSpeechActivityEventListener(
359 &self,
360 listener_block: Option<
361 &block2::DynBlock<dyn Fn(AVAudioVoiceProcessingSpeechActivityEvent)>,
362 >,
363 ) -> bool;
364
365 /// The configuration of ducking other (i.e. non-voice) audio
366 ///
367 /// Configures the ducking of other (i.e. non-voice) audio, including advanced ducking enablement and ducking level.
368 /// In general, when other audio is played during voice chat, applying a higher level of ducking could increase the intelligibility of the voice chat.
369 /// If not set, the default ducking configuration is to disable advanced ducking, with a ducking level set to AVAudioVoiceProcessingOtherAudioDuckingLevelDefault.
370 #[unsafe(method(voiceProcessingOtherAudioDuckingConfiguration))]
371 #[unsafe(method_family = none)]
372 pub unsafe fn voiceProcessingOtherAudioDuckingConfiguration(
373 &self,
374 ) -> AVAudioVoiceProcessingOtherAudioDuckingConfiguration;
375
376 /// Setter for [`voiceProcessingOtherAudioDuckingConfiguration`][Self::voiceProcessingOtherAudioDuckingConfiguration].
377 #[unsafe(method(setVoiceProcessingOtherAudioDuckingConfiguration:))]
378 #[unsafe(method_family = none)]
379 pub unsafe fn setVoiceProcessingOtherAudioDuckingConfiguration(
380 &self,
381 voice_processing_other_audio_ducking_configuration: AVAudioVoiceProcessingOtherAudioDuckingConfiguration,
382 );
383 );
384}
385
386/// Methods declared on superclass `NSObject`.
387#[cfg(feature = "AVAudioNode")]
388impl AVAudioInputNode {
389 extern_methods!(
390 #[unsafe(method(new))]
391 #[unsafe(method_family = new)]
392 pub unsafe fn new() -> Retained<Self>;
393 );
394}
395
396extern_class!(
397 /// A node that performs audio output in the engine.
398 ///
399 /// When the engine is rendering to/from an audio device, this node connects to the system's
400 /// audio output.
401 /// When the engine is operating in manual rendering mode, this node performs output in
402 /// response to client's requests.
403 ///
404 /// This node has one element.
405 /// The format of the output scope reflects:
406 /// - the audio hardware sample rate and channel count, when connected to the hardware
407 /// - the engine's manual rendering mode output format (see
408 /// `AVAudioEngine(manualRenderingFormat)`), in the manual rendering mode
409 ///
410 /// The format of the input scope is initially the same as that of the
411 /// output, but you may set it to a different format, in which case the node will convert.
412 ///
413 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiooutputnode?language=objc)
414 #[unsafe(super(AVAudioIONode, AVAudioNode, NSObject))]
415 #[derive(Debug, PartialEq, Eq, Hash)]
416 #[cfg(feature = "AVAudioNode")]
417 pub struct AVAudioOutputNode;
418);
419
420#[cfg(feature = "AVAudioNode")]
421extern_conformance!(
422 unsafe impl NSObjectProtocol for AVAudioOutputNode {}
423);
424
425#[cfg(feature = "AVAudioNode")]
426impl AVAudioOutputNode {
427 extern_methods!(
428 #[unsafe(method(init))]
429 #[unsafe(method_family = init)]
430 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
431 );
432}
433
434/// Methods declared on superclass `NSObject`.
435#[cfg(feature = "AVAudioNode")]
436impl AVAudioOutputNode {
437 extern_methods!(
438 #[unsafe(method(new))]
439 #[unsafe(method_family = new)]
440 pub unsafe fn new() -> Retained<Self>;
441 );
442}