objc2_avf_audio/generated/
AVAudioIONode.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-audio-toolbox")]
7#[cfg(not(target_os = "watchos"))]
8use objc2_audio_toolbox::*;
9#[cfg(feature = "objc2-core-audio-types")]
10use objc2_core_audio_types::*;
11use objc2_foundation::*;
12
13use crate::*;
14
15/// A block which will be called by AVAudioEngine's render call when operating in the manual
16/// rendering mode, to get input data as needed.
17///
18/// Parameter `inNumberOfFrames`: The number of frames required to complete the request. You may supply either these many
19/// frames or none.
20///
21/// Returns: An AudioBufferList containing data to be rendered, or null if no data is available.
22/// The data in the returned buffer must not be cleared or re-filled until the input block is
23/// called again or the rendering has finished.
24/// The format of the returned buffer must match the format specified when registering the
25/// block.
26///
27/// If you are out of data and return null or less than the requested number of frames, this
28/// data will not be used for rendering. The engine will try to render from other active
29/// sources in the processing graph, and will inform about the input node's status in the error
30/// returned from its render call.
31///
32/// Note that when the engine is configured to operate in
33/// `AVAudioEngineManualRenderingModeRealtime`, this block will be called from a realtime
34/// context. Care should be taken not to make any blocking call (e.g. calling libdispatch,
35/// blocking on a mutex, allocating memory etc.) which may cause an overload at the lower layers.
36///
37/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioionodeinputblock?language=objc)
38#[cfg(all(
39    feature = "AVAudioTypes",
40    feature = "block2",
41    feature = "objc2-core-audio-types"
42))]
43pub type AVAudioIONodeInputBlock =
44    *mut block2::Block<dyn Fn(AVAudioFrameCount) -> *const AudioBufferList>;
45
46/// Types of speech activity events.
47///
48/// Speech activity has started.
49///
50/// Speech activity has ended.
51///
52/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingspeechactivityevent?language=objc)
53// NS_ENUM
54#[repr(transparent)]
55#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
56pub struct AVAudioVoiceProcessingSpeechActivityEvent(pub NSInteger);
57impl AVAudioVoiceProcessingSpeechActivityEvent {
58    #[doc(alias = "AVAudioVoiceProcessingSpeechActivityStarted")]
59    pub const Started: Self = Self(0);
60    #[doc(alias = "AVAudioVoiceProcessingSpeechActivityEnded")]
61    pub const Ended: Self = Self(1);
62}
63
64unsafe impl Encode for AVAudioVoiceProcessingSpeechActivityEvent {
65    const ENCODING: Encoding = NSInteger::ENCODING;
66}
67
68unsafe impl RefEncode for AVAudioVoiceProcessingSpeechActivityEvent {
69    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
70}
71
72/// Ducking level applied to other (i.e. non-voice) audio by AVAudio voice processing AU.
73///
74/// DuckingLevelDefault = Default ducking level to other audio for typical voice chat.
75/// DuckingLevelMin = minimum ducking to other audio.
76/// DuckingLevelMid = medium ducking to other audio.
77/// DuckingLevelMax = maximum ducking to other audio.
78///
79/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingotheraudioduckinglevel?language=objc)
80// NS_ENUM
81#[repr(transparent)]
82#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
83pub struct AVAudioVoiceProcessingOtherAudioDuckingLevel(pub NSInteger);
84impl AVAudioVoiceProcessingOtherAudioDuckingLevel {
85    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelDefault")]
86    pub const Default: Self = Self(0);
87    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMin")]
88    pub const Min: Self = Self(10);
89    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMid")]
90    pub const Mid: Self = Self(20);
91    #[doc(alias = "AVAudioVoiceProcessingOtherAudioDuckingLevelMax")]
92    pub const Max: Self = Self(30);
93}
94
95unsafe impl Encode for AVAudioVoiceProcessingOtherAudioDuckingLevel {
96    const ENCODING: Encoding = NSInteger::ENCODING;
97}
98
99unsafe impl RefEncode for AVAudioVoiceProcessingOtherAudioDuckingLevel {
100    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
101}
102
103/// The configuration of ducking other (i.e. non-voice) audio
104///
105///
106/// Enables advanced ducking which ducks other audio based on the presence of voice activity from local and/or remote chat participants.
107///
108/// Ducking level of other audio
109///
110/// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiovoiceprocessingotheraudioduckingconfiguration?language=objc)
111#[repr(C)]
112#[derive(Clone, Copy, Debug, PartialEq)]
113pub struct AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
114    pub enableAdvancedDucking: Bool,
115    pub duckingLevel: AVAudioVoiceProcessingOtherAudioDuckingLevel,
116}
117
118unsafe impl Encode for AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
119    const ENCODING: Encoding = Encoding::Struct(
120        "AVAudioVoiceProcessingOtherAudioDuckingConfiguration",
121        &[
122            <Bool>::ENCODING,
123            <AVAudioVoiceProcessingOtherAudioDuckingLevel>::ENCODING,
124        ],
125    );
126}
127
128unsafe impl RefEncode for AVAudioVoiceProcessingOtherAudioDuckingConfiguration {
129    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
130}
131
132extern_class!(
133    /// Base class for a node that performs audio input or output in the engine.
134    ///
135    /// When the engine is configured to render to/from an audio device, on macOS, AVAudioInputNode
136    /// and AVAudioOutputNode communicate with the system's default input and output devices.
137    /// On iOS, they communicate with the devices appropriate to the app's AVAudioSession category
138    /// and other configuration, also considering the user's actions such as
139    /// connecting/disconnecting external devices.
140    ///
141    /// In the manual rendering mode, the AVAudioInputNode and AVAudioOutputNode perform the input
142    /// and output in the engine, in response to client's request.
143    ///
144    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioionode?language=objc)
145    #[unsafe(super(AVAudioNode, NSObject))]
146    #[derive(Debug, PartialEq, Eq, Hash)]
147    #[cfg(feature = "AVAudioNode")]
148    pub struct AVAudioIONode;
149);
150
151#[cfg(feature = "AVAudioNode")]
152unsafe impl NSObjectProtocol for AVAudioIONode {}
153
154#[cfg(feature = "AVAudioNode")]
155impl AVAudioIONode {
156    extern_methods!(
157        /// The presentation or hardware latency, applicable when the engine is rendering to/from an
158        /// audio device.
159        ///
160        /// This corresponds to kAudioDevicePropertyLatency and kAudioStreamPropertyLatency.
161        /// See
162        /// <CoreAudio
163        /// /AudioHardwareBase.h>.
164        #[unsafe(method(presentationLatency))]
165        #[unsafe(method_family = none)]
166        pub unsafe fn presentationLatency(&self) -> NSTimeInterval;
167
168        #[cfg(feature = "objc2-audio-toolbox")]
169        #[cfg(not(target_os = "watchos"))]
170        /// The node's underlying AudioUnit, if any.
171        ///
172        /// This is only necessary for certain advanced usages.
173        #[unsafe(method(audioUnit))]
174        #[unsafe(method_family = none)]
175        pub unsafe fn audioUnit(&self) -> AudioUnit;
176
177        /// Indicates whether voice processing is enabled.
178        #[unsafe(method(isVoiceProcessingEnabled))]
179        #[unsafe(method_family = none)]
180        pub unsafe fn isVoiceProcessingEnabled(&self) -> bool;
181
182        /// Enable or disable voice processing on the IO node.
183        ///
184        /// Parameter `enabled`: Whether voice processing is to be enabled.
185        ///
186        /// Parameter `outError`: On exit, if the IO node cannot enable or diable voice processing, a description of the error
187        ///
188        /// Returns: YES for success
189        ///
190        /// If enabled, the input node does signal processing on the incoming audio (taking out any
191        /// of the audio that is played from the device at a given time from the incoming audio).
192        /// Disabling this mode on either of the IO nodes automatically disabled it on the other IO node.
193        ///
194        /// Voice processing requires both input and output nodes to be in the voice processing mode.
195        /// Enabling this mode on either of the IO nodes automatically enables it on the other IO node.
196        /// Voice processing is only supported when the engine is rendering to the audio device and not
197        /// in the manual rendering mode.
198        /// Voice processing can only be be enabled or disabled when the engine is in a stopped state.
199        ///
200        /// The output format of the input node and the input format of the output node have to be
201        /// the same and they can only be changed when the engine is in a stopped state.
202        #[unsafe(method(setVoiceProcessingEnabled:error:_))]
203        #[unsafe(method_family = none)]
204        pub unsafe fn setVoiceProcessingEnabled_error(
205            &self,
206            enabled: bool,
207        ) -> Result<(), Retained<NSError>>;
208    );
209}
210
211/// Methods declared on superclass `NSObject`.
212#[cfg(feature = "AVAudioNode")]
213impl AVAudioIONode {
214    extern_methods!(
215        #[unsafe(method(init))]
216        #[unsafe(method_family = init)]
217        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
218
219        #[unsafe(method(new))]
220        #[unsafe(method_family = new)]
221        pub unsafe fn new() -> Retained<Self>;
222    );
223}
224
225extern_class!(
226    /// A node that performs audio input in the engine.
227    ///
228    /// When the engine is rendering to/from an audio device, this node connects to the system's
229    /// audio input.
230    /// When the engine is operating in manual rendering mode, this node can be used to supply
231    /// the input data to the engine.
232    ///
233    /// This node has one element.
234    /// The format of the input scope reflects:
235    /// - the audio hardware sample rate and channel count, when connected to the hardware
236    /// - the format of the PCM audio data that the node will supply to the engine, in the
237    /// manual rendering mode (see `setManualRenderingInputPCMFormat:inputBlock:`)
238    ///
239    /// When rendering from an audio device, the input node does not support format conversion.
240    /// Hence the format of the output scope must be same as that of the input, as well as the
241    /// formats for all the nodes connected in the input node chain.
242    ///
243    /// In the manual rendering mode, the format of the output scope is initially the same as that
244    /// of the input, but you may set it to a different format, in which case the node will convert.
245    ///
246    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudioinputnode?language=objc)
247    #[unsafe(super(AVAudioIONode, AVAudioNode, NSObject))]
248    #[derive(Debug, PartialEq, Eq, Hash)]
249    #[cfg(feature = "AVAudioNode")]
250    pub struct AVAudioInputNode;
251);
252
253#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
254unsafe impl AVAudio3DMixing for AVAudioInputNode {}
255
256#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
257unsafe impl AVAudioMixing for AVAudioInputNode {}
258
259#[cfg(all(feature = "AVAudioMixing", feature = "AVAudioNode"))]
260unsafe impl AVAudioStereoMixing for AVAudioInputNode {}
261
262#[cfg(feature = "AVAudioNode")]
263unsafe impl NSObjectProtocol for AVAudioInputNode {}
264
265#[cfg(feature = "AVAudioNode")]
266impl AVAudioInputNode {
267    extern_methods!(
268        #[unsafe(method(init))]
269        #[unsafe(method_family = init)]
270        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
271
272        #[cfg(all(
273            feature = "AVAudioFormat",
274            feature = "AVAudioTypes",
275            feature = "block2",
276            feature = "objc2-core-audio-types"
277        ))]
278        /// Supply the data through the input node to the engine operating in the manual rendering mode.
279        ///
280        /// Parameter `format`: The format of the PCM audio data the block will supply to the engine
281        ///
282        /// Parameter `block`: The block the engine will call on the input node to get the audio to send to the output,
283        /// when operating in the manual rendering mode. See `AVAudioIONodeInputBlock` for more details
284        ///
285        /// Returns: YES for success
286        ///
287        /// This block must be set if the input node is being used when the engine is operating in
288        /// manual rendering mode.
289        /// Switching the engine to render to/from an audio device invalidates any previously set block,
290        /// and makes this method ineffective.
291        #[unsafe(method(setManualRenderingInputPCMFormat:inputBlock:))]
292        #[unsafe(method_family = none)]
293        pub unsafe fn setManualRenderingInputPCMFormat_inputBlock(
294            &self,
295            format: &AVAudioFormat,
296            block: AVAudioIONodeInputBlock,
297        ) -> bool;
298
299        /// Bypass all processing for microphone uplink done by the voice processing unit.
300        ///
301        /// Querying this property when voice processing is disabled will return false.
302        #[unsafe(method(isVoiceProcessingBypassed))]
303        #[unsafe(method_family = none)]
304        pub unsafe fn isVoiceProcessingBypassed(&self) -> bool;
305
306        /// Setter for [`isVoiceProcessingBypassed`][Self::isVoiceProcessingBypassed].
307        #[unsafe(method(setVoiceProcessingBypassed:))]
308        #[unsafe(method_family = none)]
309        pub unsafe fn setVoiceProcessingBypassed(&self, voice_processing_bypassed: bool);
310
311        /// Enable automatic gain control on the processed microphone uplink.
312        /// signal. Enabled by default.
313        ///
314        /// Querying this property when voice processing is disabled will return false.
315        #[unsafe(method(isVoiceProcessingAGCEnabled))]
316        #[unsafe(method_family = none)]
317        pub unsafe fn isVoiceProcessingAGCEnabled(&self) -> bool;
318
319        /// Setter for [`isVoiceProcessingAGCEnabled`][Self::isVoiceProcessingAGCEnabled].
320        #[unsafe(method(setVoiceProcessingAGCEnabled:))]
321        #[unsafe(method_family = none)]
322        pub unsafe fn setVoiceProcessingAGCEnabled(&self, voice_processing_agc_enabled: bool);
323
324        /// Mutes the input of the voice processing unit.
325        ///
326        /// Querying this property when voice processing is disabled will return false.
327        #[unsafe(method(isVoiceProcessingInputMuted))]
328        #[unsafe(method_family = none)]
329        pub unsafe fn isVoiceProcessingInputMuted(&self) -> bool;
330
331        /// Setter for [`isVoiceProcessingInputMuted`][Self::isVoiceProcessingInputMuted].
332        #[unsafe(method(setVoiceProcessingInputMuted:))]
333        #[unsafe(method_family = none)]
334        pub unsafe fn setVoiceProcessingInputMuted(&self, voice_processing_input_muted: bool);
335
336        #[cfg(feature = "block2")]
337        /// Register a listener to be notified when speech activity event occurs while the input is muted.
338        ///
339        /// Parameter `listenerBlock`: The block the engine will call when speech activity event occurs while the input is muted.
340        /// Passing nil will remove an already set block.
341        ///
342        /// Returns: YES for success
343        ///
344        /// Continuous presence of or lack of speech activity during mute will not cause redundant notification.
345        /// In order to use this API, it's expected to implement the mute via the voiceProcessingInputMuted.
346        #[unsafe(method(setMutedSpeechActivityEventListener:))]
347        #[unsafe(method_family = none)]
348        pub unsafe fn setMutedSpeechActivityEventListener(
349            &self,
350            listener_block: Option<
351                &block2::Block<dyn Fn(AVAudioVoiceProcessingSpeechActivityEvent)>,
352            >,
353        ) -> bool;
354
355        /// The configuration of ducking other (i.e. non-voice) audio
356        ///
357        /// Configures the ducking of other (i.e. non-voice) audio, including advanced ducking enablement and ducking level.
358        /// In general, when other audio is played during voice chat, applying a higher level of ducking could increase the intelligibility of the voice chat.
359        /// If not set, the default ducking configuration is to disable advanced ducking, with a ducking level set to AVAudioVoiceProcessingOtherAudioDuckingLevelDefault.
360        #[unsafe(method(voiceProcessingOtherAudioDuckingConfiguration))]
361        #[unsafe(method_family = none)]
362        pub unsafe fn voiceProcessingOtherAudioDuckingConfiguration(
363            &self,
364        ) -> AVAudioVoiceProcessingOtherAudioDuckingConfiguration;
365
366        /// Setter for [`voiceProcessingOtherAudioDuckingConfiguration`][Self::voiceProcessingOtherAudioDuckingConfiguration].
367        #[unsafe(method(setVoiceProcessingOtherAudioDuckingConfiguration:))]
368        #[unsafe(method_family = none)]
369        pub unsafe fn setVoiceProcessingOtherAudioDuckingConfiguration(
370            &self,
371            voice_processing_other_audio_ducking_configuration: AVAudioVoiceProcessingOtherAudioDuckingConfiguration,
372        );
373    );
374}
375
376/// Methods declared on superclass `NSObject`.
377#[cfg(feature = "AVAudioNode")]
378impl AVAudioInputNode {
379    extern_methods!(
380        #[unsafe(method(new))]
381        #[unsafe(method_family = new)]
382        pub unsafe fn new() -> Retained<Self>;
383    );
384}
385
386extern_class!(
387    /// A node that performs audio output in the engine.
388    ///
389    /// When the engine is rendering to/from an audio device, this node connects to the system's
390    /// audio output.
391    /// When the engine is operating in manual rendering mode, this node performs output in
392    /// response to client's requests.
393    ///
394    /// This node has one element.
395    /// The format of the output scope reflects:
396    /// - the audio hardware sample rate and channel count, when connected to the hardware
397    /// - the engine's manual rendering mode output format (see
398    /// `AVAudioEngine(manualRenderingFormat)`), in the manual rendering mode
399    ///
400    /// The format of the input scope is initially the same as that of the
401    /// output, but you may set it to a different format, in which case the node will convert.
402    ///
403    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfaudio/avaudiooutputnode?language=objc)
404    #[unsafe(super(AVAudioIONode, AVAudioNode, NSObject))]
405    #[derive(Debug, PartialEq, Eq, Hash)]
406    #[cfg(feature = "AVAudioNode")]
407    pub struct AVAudioOutputNode;
408);
409
410#[cfg(feature = "AVAudioNode")]
411unsafe impl NSObjectProtocol for AVAudioOutputNode {}
412
413#[cfg(feature = "AVAudioNode")]
414impl AVAudioOutputNode {
415    extern_methods!(
416        #[unsafe(method(init))]
417        #[unsafe(method_family = init)]
418        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
419    );
420}
421
422/// Methods declared on superclass `NSObject`.
423#[cfg(feature = "AVAudioNode")]
424impl AVAudioOutputNode {
425    extern_methods!(
426        #[unsafe(method(new))]
427        #[unsafe(method_family = new)]
428        pub unsafe fn new() -> Retained<Self>;
429    );
430}