objc2_av_foundation/generated/AVSampleBufferAudioRenderer.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-core-media")]
7use objc2_core_media::*;
8use objc2_foundation::*;
9
10use crate::*;
11
12extern_class!(
13 /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorenderer?language=objc)
14 #[unsafe(super(NSObject))]
15 #[derive(Debug, PartialEq, Eq, Hash)]
16 pub struct AVSampleBufferAudioRenderer;
17);
18
19#[cfg(feature = "AVQueuedSampleBufferRendering")]
20unsafe impl AVQueuedSampleBufferRendering for AVSampleBufferAudioRenderer {}
21
22unsafe impl NSObjectProtocol for AVSampleBufferAudioRenderer {}
23
24impl AVSampleBufferAudioRenderer {
25 extern_methods!(
26 #[cfg(feature = "AVQueuedSampleBufferRendering")]
27 #[unsafe(method(status))]
28 #[unsafe(method_family = none)]
29 pub unsafe fn status(&self) -> AVQueuedSampleBufferRenderingStatus;
30
31 #[unsafe(method(error))]
32 #[unsafe(method_family = none)]
33 pub unsafe fn error(&self) -> Option<Retained<NSError>>;
34
35 /// Specifies the unique ID of the Core Audio output device used to play audio.
36 ///
37 /// By default, the value of this property is nil, indicating that the default audio output device is used. Otherwise the value of this property is an NSString containing the unique ID of the Core Audio output device to be used for audio output.
38 ///
39 /// Core Audio's kAudioDevicePropertyDeviceUID is a suitable source of audio output device unique IDs.
40 ///
41 /// Modifying this property while the timebase's rate is not 0.0 may cause the rate to briefly change to 0.0.
42 ///
43 /// On macOS, the audio device clock may be used as the AVSampleBufferRenderSynchronizer's and all attached AVQueuedSampleBufferRendering's timebase's clocks. If the audioOutputDeviceUniqueID is modified, the clocks of all these timebases may also change.
44 ///
45 /// If multiple AVSampleBufferAudioRenderers with different values for audioOutputDeviceUniqueID are attached to the same AVSampleBufferRenderSynchronizer, audio may not stay in sync during playback. To avoid this, ensure that all synchronized AVSampleBufferAudioRenderers are using the same audio output device.
46 #[unsafe(method(audioOutputDeviceUniqueID))]
47 #[unsafe(method_family = none)]
48 pub unsafe fn audioOutputDeviceUniqueID(&self) -> Option<Retained<NSString>>;
49
50 /// Setter for [`audioOutputDeviceUniqueID`][Self::audioOutputDeviceUniqueID].
51 #[unsafe(method(setAudioOutputDeviceUniqueID:))]
52 #[unsafe(method_family = none)]
53 pub unsafe fn setAudioOutputDeviceUniqueID(
54 &self,
55 audio_output_device_unique_id: Option<&NSString>,
56 );
57
58 #[cfg(feature = "AVAudioProcessingSettings")]
59 /// Indicates the processing algorithm used to manage audio pitch at varying rates.
60 ///
61 /// Constants for various time pitch algorithms, e.g. AVAudioTimePitchSpectral, are defined in AVAudioProcessingSettings.h.
62 ///
63 /// The default value for applications linked on or after iOS 15.0 or macOS 12.0 is AVAudioTimePitchAlgorithmTimeDomain. For iOS versions prior to 15.0 the default value is AVAudioTimePitchAlgorithmLowQualityZeroLatency.
64 /// For macOS versions prior to 12.0 the default value is AVAudioTimePitchAlgorithmSpectral.
65 ///
66 /// If the timebase's rate is not supported by the audioTimePitchAlgorithm, audio will be muted.
67 ///
68 /// Modifying this property while the timebase's rate is not 0.0 may cause the rate to briefly change to 0.0.
69 #[unsafe(method(audioTimePitchAlgorithm))]
70 #[unsafe(method_family = none)]
71 pub unsafe fn audioTimePitchAlgorithm(&self) -> Retained<AVAudioTimePitchAlgorithm>;
72
73 #[cfg(feature = "AVAudioProcessingSettings")]
74 /// Setter for [`audioTimePitchAlgorithm`][Self::audioTimePitchAlgorithm].
75 #[unsafe(method(setAudioTimePitchAlgorithm:))]
76 #[unsafe(method_family = none)]
77 pub unsafe fn setAudioTimePitchAlgorithm(
78 &self,
79 audio_time_pitch_algorithm: &AVAudioTimePitchAlgorithm,
80 );
81
82 #[cfg(feature = "AVAudioProcessingSettings")]
83 /// Indicates the source audio channel layouts allowed by the receiver for spatialization.
84 ///
85 /// Spatialization uses psychoacoustic methods to create a more immersive audio rendering when the content is played on specialized headphones and speaker arrangements. When an AVSampleBufferAudioRenderer's allowedAudioSpatializationFormats property is set to AVAudioSpatializationFormatMonoAndStereo the AVSampleBufferAudioRenderer will attempt to spatialize content tagged with a stereo channel layout, two-channel content with no layout specified as well as mono. It is considered incorrect to render a binaural recording with spatialization. A binaural recording is captured using two carefully placed microphones at each ear where the intent, when played on headphones, is to reproduce a naturally occurring spatial effect. Content tagged with a binaural channel layout will ignore this property value. When an AVSampleBufferAudioRenderer's allowedAudioSpatializationFormats property is set to AVAudioSpatializationFormatMultichannel the AVSampleBufferAudioRenderer will attempt to spatialize any decodable multichannel layout. Setting this property to AVAudioSpatializationFormatMonoStereoAndMultichannel indicates that the sender allows the AVSampleBufferAudioRenderer to spatialize any decodable mono, stereo or multichannel layout. This property is not observable. The default value for this property is AVAudioSpatializationFormatMultichannel.
86 #[unsafe(method(allowedAudioSpatializationFormats))]
87 #[unsafe(method_family = none)]
88 pub unsafe fn allowedAudioSpatializationFormats(&self) -> AVAudioSpatializationFormats;
89
90 #[cfg(feature = "AVAudioProcessingSettings")]
91 /// Setter for [`allowedAudioSpatializationFormats`][Self::allowedAudioSpatializationFormats].
92 #[unsafe(method(setAllowedAudioSpatializationFormats:))]
93 #[unsafe(method_family = none)]
94 pub unsafe fn setAllowedAudioSpatializationFormats(
95 &self,
96 allowed_audio_spatialization_formats: AVAudioSpatializationFormats,
97 );
98 );
99}
100
101/// Methods declared on superclass `NSObject`.
102impl AVSampleBufferAudioRenderer {
103 extern_methods!(
104 #[unsafe(method(init))]
105 #[unsafe(method_family = init)]
106 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
107
108 #[unsafe(method(new))]
109 #[unsafe(method_family = new)]
110 pub unsafe fn new() -> Retained<Self>;
111 );
112}
113
114/// AVSampleBufferAudioRendererVolumeControl.
115impl AVSampleBufferAudioRenderer {
116 extern_methods!(
117 #[unsafe(method(volume))]
118 #[unsafe(method_family = none)]
119 pub unsafe fn volume(&self) -> c_float;
120
121 /// Setter for [`volume`][Self::volume].
122 #[unsafe(method(setVolume:))]
123 #[unsafe(method_family = none)]
124 pub unsafe fn setVolume(&self, volume: c_float);
125
126 #[unsafe(method(isMuted))]
127 #[unsafe(method_family = none)]
128 pub unsafe fn isMuted(&self) -> bool;
129
130 /// Setter for [`isMuted`][Self::isMuted].
131 #[unsafe(method(setMuted:))]
132 #[unsafe(method_family = none)]
133 pub unsafe fn setMuted(&self, muted: bool);
134 );
135}
136
137extern "C" {
138 /// A notification that fires whenever the receiver's enqueued media data has been flushed for a reason other than a call to the -flush method.
139 ///
140 /// The renderer may flush enqueued media data when the user routes playback to a new destination. The renderer may also flush enqueued media data when the playback rate of the attached AVSampleBufferRenderSynchronizer is changed (e.g. 1.0 -> 2.0 or 1.0 -> 0.0 -> 2.0), however no flush will occur for normal pauses (non-zero -> 0.0) and resumes (0.0 -> same non-zero rate as before).
141 ///
142 /// When an automatic flush occurs, the attached render synchronizer's timebase will remain running at its current rate. It is typically best to respond to this notification by enqueueing media data with timestamps starting at the timebase's current time. To the listener, this will sound similar to muting the audio for a short period of time. If it is more desirable to ensure that all audio is played than to keep the timeline moving, you may also stop the synchronizer, set the synchronizer's current time to the value of AVSampleBufferAudioRendererFlushTimeKey, start reenqueueing sample buffers with timestamps starting at that time, and restart the synchronizer. To the listener, this will sound similar to pausing the audio for a short period of time.
143 ///
144 /// This notification is delivered on an arbitrary thread. If sample buffers are being enqueued with the renderer concurrently with the receipt of this notification, it is possible that one or more sample buffers will remain enqueued in the renderer. This is generally undesirable, because the sample buffers that remain will likely have timestamps far ahead of the timebase's current time and so won't be rendered for some time. The best practice is to invoke the -flush method, in a manner that is serialized with enqueueing sample buffers, after receiving this notification and before resuming the enqueueing of sample buffers.
145 ///
146 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorendererwasflushedautomaticallynotification?language=objc)
147 pub static AVSampleBufferAudioRendererWasFlushedAutomaticallyNotification:
148 &'static NSNotificationName;
149}
150
151extern "C" {
152 /// A notification that indicates the hardware configuration does not match the enqueued data format.
153 ///
154 /// The output configuration of the playback hardware might change during the playback session if other clients play content with different format. In such cases, if the media content format does not match the hardware configuration it would produce suboptimal rendering of the enqueued media data. When the framework detects such mismatch it will issue this notification, so the client can flush the renderer and re-enqueue the sample buffers from the current media playhead, which will configure the hardware based on the format of newly enqueued sample buffers.
155 ///
156 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorendereroutputconfigurationdidchangenotification?language=objc)
157 pub static AVSampleBufferAudioRendererOutputConfigurationDidChangeNotification:
158 &'static NSNotificationName;
159}
160
161extern "C" {
162 /// The presentation timestamp of the first enqueued sample that was flushed.
163 ///
164 /// The value of this key is an NSValue wrapping a CMTime.
165 ///
166 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorendererflushtimekey?language=objc)
167 pub static AVSampleBufferAudioRendererFlushTimeKey: &'static NSString;
168}
169
170/// AVSampleBufferAudioRendererQueueManagement.
171impl AVSampleBufferAudioRenderer {
172 extern_methods!(
173 #[cfg(all(feature = "block2", feature = "objc2-core-media"))]
174 /// Flushes enqueued sample buffers with presentation time stamps later than or equal to the specified time.
175 ///
176 /// Parameter `completionHandler`: A block that is invoked, possibly asynchronously, after the flush operation completes or fails.
177 ///
178 /// This method can be used to replace media data scheduled to be rendered in the future, without interrupting playback. One example of this is when the data that has already been enqueued is from a sequence of two songs and the second song is swapped for a new song. In this case, this method would be called with the time stamp of the first sample buffer from the second song. After the completion handler is executed with a YES parameter, media data may again be enqueued with timestamps at the specified time.
179 ///
180 /// If NO is provided to the completion handler, the flush did not succeed and the set of enqueued sample buffers remains unchanged. A flush can fail becuse the source time was too close to (or earlier than) the current time or because the current configuration of the receiver does not support flushing at a particular time. In these cases, the caller can choose to flush all enqueued media data by invoking the -flush method.
181 #[unsafe(method(flushFromSourceTime:completionHandler:))]
182 #[unsafe(method_family = none)]
183 pub unsafe fn flushFromSourceTime_completionHandler(
184 &self,
185 time: CMTime,
186 completion_handler: &block2::Block<dyn Fn(Bool)>,
187 );
188 );
189}