objc2_av_foundation/generated/AVSampleBufferAudioRenderer.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-core-media")]
7use objc2_core_media::*;
8use objc2_foundation::*;
9
10use crate::*;
11
12extern_class!(
13 /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorenderer?language=objc)
14 #[unsafe(super(NSObject))]
15 #[derive(Debug, PartialEq, Eq, Hash)]
16 pub struct AVSampleBufferAudioRenderer;
17);
18
19#[cfg(feature = "AVQueuedSampleBufferRendering")]
20extern_conformance!(
21 unsafe impl AVQueuedSampleBufferRendering for AVSampleBufferAudioRenderer {}
22);
23
24extern_conformance!(
25 unsafe impl NSObjectProtocol for AVSampleBufferAudioRenderer {}
26);
27
28impl AVSampleBufferAudioRenderer {
29 extern_methods!(
30 #[cfg(feature = "AVQueuedSampleBufferRendering")]
31 #[unsafe(method(status))]
32 #[unsafe(method_family = none)]
33 pub unsafe fn status(&self) -> AVQueuedSampleBufferRenderingStatus;
34
35 #[unsafe(method(error))]
36 #[unsafe(method_family = none)]
37 pub unsafe fn error(&self) -> Option<Retained<NSError>>;
38
39 /// Specifies the unique ID of the Core Audio output device used to play audio.
40 ///
41 /// By default, the value of this property is nil, indicating that the default audio output device is used. Otherwise the value of this property is an NSString containing the unique ID of the Core Audio output device to be used for audio output.
42 ///
43 /// Core Audio's kAudioDevicePropertyDeviceUID is a suitable source of audio output device unique IDs.
44 ///
45 /// Modifying this property while the timebase's rate is not 0.0 may cause the rate to briefly change to 0.0.
46 ///
47 /// On macOS, the audio device clock may be used as the AVSampleBufferRenderSynchronizer's and all attached AVQueuedSampleBufferRendering's timebase's clocks. If the audioOutputDeviceUniqueID is modified, the clocks of all these timebases may also change.
48 ///
49 /// If multiple AVSampleBufferAudioRenderers with different values for audioOutputDeviceUniqueID are attached to the same AVSampleBufferRenderSynchronizer, audio may not stay in sync during playback. To avoid this, ensure that all synchronized AVSampleBufferAudioRenderers are using the same audio output device.
50 #[unsafe(method(audioOutputDeviceUniqueID))]
51 #[unsafe(method_family = none)]
52 pub unsafe fn audioOutputDeviceUniqueID(&self) -> Option<Retained<NSString>>;
53
54 /// Setter for [`audioOutputDeviceUniqueID`][Self::audioOutputDeviceUniqueID].
55 ///
56 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
57 #[unsafe(method(setAudioOutputDeviceUniqueID:))]
58 #[unsafe(method_family = none)]
59 pub unsafe fn setAudioOutputDeviceUniqueID(
60 &self,
61 audio_output_device_unique_id: Option<&NSString>,
62 );
63
64 #[cfg(feature = "AVAudioProcessingSettings")]
65 /// Indicates the processing algorithm used to manage audio pitch at varying rates.
66 ///
67 /// Constants for various time pitch algorithms, e.g. AVAudioTimePitchSpectral, are defined in AVAudioProcessingSettings.h.
68 ///
69 /// The default value for applications linked on or after iOS 15.0 or macOS 12.0 is AVAudioTimePitchAlgorithmTimeDomain. For iOS versions prior to 15.0 the default value is AVAudioTimePitchAlgorithmLowQualityZeroLatency.
70 /// For macOS versions prior to 12.0 the default value is AVAudioTimePitchAlgorithmSpectral.
71 ///
72 /// If the timebase's rate is not supported by the audioTimePitchAlgorithm, audio will be muted.
73 ///
74 /// Modifying this property while the timebase's rate is not 0.0 may cause the rate to briefly change to 0.0.
75 #[unsafe(method(audioTimePitchAlgorithm))]
76 #[unsafe(method_family = none)]
77 pub unsafe fn audioTimePitchAlgorithm(&self) -> Retained<AVAudioTimePitchAlgorithm>;
78
79 #[cfg(feature = "AVAudioProcessingSettings")]
80 /// Setter for [`audioTimePitchAlgorithm`][Self::audioTimePitchAlgorithm].
81 ///
82 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
83 #[unsafe(method(setAudioTimePitchAlgorithm:))]
84 #[unsafe(method_family = none)]
85 pub unsafe fn setAudioTimePitchAlgorithm(
86 &self,
87 audio_time_pitch_algorithm: &AVAudioTimePitchAlgorithm,
88 );
89
90 #[cfg(feature = "AVAudioProcessingSettings")]
91 /// Indicates the source audio channel layouts allowed by the receiver for spatialization.
92 ///
93 /// Spatialization uses psychoacoustic methods to create a more immersive audio rendering when the content is played on specialized headphones and speaker arrangements. When an AVSampleBufferAudioRenderer's allowedAudioSpatializationFormats property is set to AVAudioSpatializationFormatMonoAndStereo the AVSampleBufferAudioRenderer will attempt to spatialize content tagged with a stereo channel layout, two-channel content with no layout specified as well as mono. It is considered incorrect to render a binaural recording with spatialization. A binaural recording is captured using two carefully placed microphones at each ear where the intent, when played on headphones, is to reproduce a naturally occurring spatial effect. Content tagged with a binaural channel layout will ignore this property value. When an AVSampleBufferAudioRenderer's allowedAudioSpatializationFormats property is set to AVAudioSpatializationFormatMultichannel the AVSampleBufferAudioRenderer will attempt to spatialize any decodable multichannel layout. Setting this property to AVAudioSpatializationFormatMonoStereoAndMultichannel indicates that the sender allows the AVSampleBufferAudioRenderer to spatialize any decodable mono, stereo or multichannel layout. This property is not observable. The default value for this property is AVAudioSpatializationFormatMultichannel.
94 #[unsafe(method(allowedAudioSpatializationFormats))]
95 #[unsafe(method_family = none)]
96 pub unsafe fn allowedAudioSpatializationFormats(&self) -> AVAudioSpatializationFormats;
97
98 #[cfg(feature = "AVAudioProcessingSettings")]
99 /// Setter for [`allowedAudioSpatializationFormats`][Self::allowedAudioSpatializationFormats].
100 #[unsafe(method(setAllowedAudioSpatializationFormats:))]
101 #[unsafe(method_family = none)]
102 pub unsafe fn setAllowedAudioSpatializationFormats(
103 &self,
104 allowed_audio_spatialization_formats: AVAudioSpatializationFormats,
105 );
106 );
107}
108
109/// Methods declared on superclass `NSObject`.
110impl AVSampleBufferAudioRenderer {
111 extern_methods!(
112 #[unsafe(method(init))]
113 #[unsafe(method_family = init)]
114 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
115
116 #[unsafe(method(new))]
117 #[unsafe(method_family = new)]
118 pub unsafe fn new() -> Retained<Self>;
119 );
120}
121
122/// AVSampleBufferAudioRendererVolumeControl.
123impl AVSampleBufferAudioRenderer {
124 extern_methods!(
125 #[unsafe(method(volume))]
126 #[unsafe(method_family = none)]
127 pub unsafe fn volume(&self) -> c_float;
128
129 /// Setter for [`volume`][Self::volume].
130 #[unsafe(method(setVolume:))]
131 #[unsafe(method_family = none)]
132 pub unsafe fn setVolume(&self, volume: c_float);
133
134 #[unsafe(method(isMuted))]
135 #[unsafe(method_family = none)]
136 pub unsafe fn isMuted(&self) -> bool;
137
138 /// Setter for [`isMuted`][Self::isMuted].
139 #[unsafe(method(setMuted:))]
140 #[unsafe(method_family = none)]
141 pub unsafe fn setMuted(&self, muted: bool);
142 );
143}
144
145extern "C" {
146 /// A notification that fires whenever the receiver's enqueued media data has been flushed for a reason other than a call to the -flush method.
147 ///
148 /// The renderer may flush enqueued media data when the user routes playback to a new destination. The renderer may also flush enqueued media data when the playback rate of the attached AVSampleBufferRenderSynchronizer is changed (e.g. 1.0 -> 2.0 or 1.0 -> 0.0 -> 2.0), however no flush will occur for normal pauses (non-zero -> 0.0) and resumes (0.0 -> same non-zero rate as before).
149 ///
150 /// When an automatic flush occurs, the attached render synchronizer's timebase will remain running at its current rate. It is typically best to respond to this notification by enqueueing media data with timestamps starting at the timebase's current time. To the listener, this will sound similar to muting the audio for a short period of time. If it is more desirable to ensure that all audio is played than to keep the timeline moving, you may also stop the synchronizer, set the synchronizer's current time to the value of AVSampleBufferAudioRendererFlushTimeKey, start reenqueueing sample buffers with timestamps starting at that time, and restart the synchronizer. To the listener, this will sound similar to pausing the audio for a short period of time.
151 ///
152 /// This notification is delivered on an arbitrary thread. If sample buffers are being enqueued with the renderer concurrently with the receipt of this notification, it is possible that one or more sample buffers will remain enqueued in the renderer. This is generally undesirable, because the sample buffers that remain will likely have timestamps far ahead of the timebase's current time and so won't be rendered for some time. The best practice is to invoke the -flush method, in a manner that is serialized with enqueueing sample buffers, after receiving this notification and before resuming the enqueueing of sample buffers.
153 ///
154 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorendererwasflushedautomaticallynotification?language=objc)
155 pub static AVSampleBufferAudioRendererWasFlushedAutomaticallyNotification:
156 &'static NSNotificationName;
157}
158
159extern "C" {
160 /// A notification that indicates the hardware configuration does not match the enqueued data format.
161 ///
162 /// The output configuration of the playback hardware might change during the playback session if other clients play content with different format. In such cases, if the media content format does not match the hardware configuration it would produce suboptimal rendering of the enqueued media data. When the framework detects such mismatch it will issue this notification, so the client can flush the renderer and re-enqueue the sample buffers from the current media playhead, which will configure the hardware based on the format of newly enqueued sample buffers.
163 ///
164 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorendereroutputconfigurationdidchangenotification?language=objc)
165 pub static AVSampleBufferAudioRendererOutputConfigurationDidChangeNotification:
166 &'static NSNotificationName;
167}
168
169extern "C" {
170 /// The presentation timestamp of the first enqueued sample that was flushed.
171 ///
172 /// The value of this key is an NSValue wrapping a CMTime.
173 ///
174 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avsamplebufferaudiorendererflushtimekey?language=objc)
175 pub static AVSampleBufferAudioRendererFlushTimeKey: &'static NSString;
176}
177
178/// AVSampleBufferAudioRendererQueueManagement.
179impl AVSampleBufferAudioRenderer {
180 extern_methods!(
181 #[cfg(all(feature = "block2", feature = "objc2-core-media"))]
182 /// Flushes enqueued sample buffers with presentation time stamps later than or equal to the specified time.
183 ///
184 /// Parameter `completionHandler`: A block that is invoked, possibly asynchronously, after the flush operation completes or fails.
185 ///
186 /// This method can be used to replace media data scheduled to be rendered in the future, without interrupting playback. One example of this is when the data that has already been enqueued is from a sequence of two songs and the second song is swapped for a new song. In this case, this method would be called with the time stamp of the first sample buffer from the second song. After the completion handler is executed with a YES parameter, media data may again be enqueued with timestamps at the specified time.
187 ///
188 /// If NO is provided to the completion handler, the flush did not succeed and the set of enqueued sample buffers remains unchanged. A flush can fail becuse the source time was too close to (or earlier than) the current time or because the current configuration of the receiver does not support flushing at a particular time. In these cases, the caller can choose to flush all enqueued media data by invoking the -flush method.
189 ///
190 /// # Safety
191 ///
192 /// `completion_handler` block must be sendable.
193 #[unsafe(method(flushFromSourceTime:completionHandler:))]
194 #[unsafe(method_family = none)]
195 pub unsafe fn flushFromSourceTime_completionHandler(
196 &self,
197 time: CMTime,
198 completion_handler: &block2::DynBlock<dyn Fn(Bool)>,
199 );
200 );
201}