objc2_av_foundation/generated/AVCaptureVideoDataOutput.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5#[cfg(feature = "dispatch2")]
6use dispatch2::*;
7use objc2::__framework_prelude::*;
8#[cfg(feature = "objc2-core-media")]
9use objc2_core_media::*;
10use objc2_foundation::*;
11
12use crate::*;
13
14extern_class!(
15 /// AVCaptureVideoDataOutput is a concrete subclass of AVCaptureOutput that can be used to process uncompressed or compressed frames from the video being captured.
16 ///
17 ///
18 /// Instances of AVCaptureVideoDataOutput produce video frames suitable for processing using other media APIs. Applications can access the frames with the captureOutput:didOutputSampleBuffer:fromConnection: delegate method.
19 ///
20 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avcapturevideodataoutput?language=objc)
21 #[unsafe(super(AVCaptureOutput, NSObject))]
22 #[derive(Debug, PartialEq, Eq, Hash)]
23 #[cfg(feature = "AVCaptureOutputBase")]
24 pub struct AVCaptureVideoDataOutput;
25);
26
27#[cfg(feature = "AVCaptureOutputBase")]
28extern_conformance!(
29 unsafe impl NSObjectProtocol for AVCaptureVideoDataOutput {}
30);
31
32#[cfg(feature = "AVCaptureOutputBase")]
33impl AVCaptureVideoDataOutput {
34 extern_methods!(
35 #[unsafe(method(init))]
36 #[unsafe(method_family = init)]
37 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
38
39 #[unsafe(method(new))]
40 #[unsafe(method_family = new)]
41 pub unsafe fn new() -> Retained<Self>;
42
43 #[cfg(feature = "dispatch2")]
44 /// Sets the receiver's delegate that will accept captured buffers and dispatch queue on which the delegate will be called.
45 ///
46 ///
47 /// Parameter `sampleBufferDelegate`: An object conforming to the AVCaptureVideoDataOutputSampleBufferDelegate protocol that will receive sample buffers after they are captured.
48 ///
49 /// Parameter `sampleBufferCallbackQueue`: A dispatch queue on which all sample buffer delegate methods will be called.
50 ///
51 ///
52 /// When a new video sample buffer is captured it will be vended to the sample buffer delegate using the captureOutput:didOutputSampleBuffer:fromConnection: delegate method. All delegate methods will be called on the specified dispatch queue. If the queue is blocked when new frames are captured, those frames will be automatically dropped at a time determined by the value of the alwaysDiscardsLateVideoFrames property. This allows clients to process existing frames on the same queue without having to manage the potential memory usage increases that would otherwise occur when that processing is unable to keep up with the rate of incoming frames. If their frame processing is consistently unable to keep up with the rate of incoming frames, clients should consider using the minFrameDuration property, which will generally yield better performance characteristics and more consistent frame rates than frame dropping alone.
53 ///
54 /// Clients that need to minimize the chances of frames being dropped should specify a queue on which a sufficiently small amount of processing is being done outside of receiving sample buffers. However, if such clients migrate extra processing to another queue, they are responsible for ensuring that memory usage does not grow without bound from frames that have not been processed.
55 ///
56 /// A serial dispatch queue must be used to guarantee that video frames will be delivered in order. The sampleBufferCallbackQueue parameter may not be NULL, except when setting the sampleBufferDelegate to nil otherwise -setSampleBufferDelegate:queue: throws an NSInvalidArgumentException.
57 ///
58 /// # Safety
59 ///
60 /// `sample_buffer_callback_queue` possibly has additional threading requirements.
61 #[unsafe(method(setSampleBufferDelegate:queue:))]
62 #[unsafe(method_family = none)]
63 pub unsafe fn setSampleBufferDelegate_queue(
64 &self,
65 sample_buffer_delegate: Option<
66 &ProtocolObject<dyn AVCaptureVideoDataOutputSampleBufferDelegate>,
67 >,
68 sample_buffer_callback_queue: Option<&DispatchQueue>,
69 );
70
71 /// The receiver's delegate.
72 ///
73 ///
74 /// The value of this property is an object conforming to the AVCaptureVideoDataOutputSampleBufferDelegate protocol that will receive sample buffers after they are captured. The delegate is set using the setSampleBufferDelegate:queue: method.
75 #[unsafe(method(sampleBufferDelegate))]
76 #[unsafe(method_family = none)]
77 pub unsafe fn sampleBufferDelegate(
78 &self,
79 ) -> Option<Retained<ProtocolObject<dyn AVCaptureVideoDataOutputSampleBufferDelegate>>>;
80
81 #[cfg(feature = "dispatch2")]
82 /// The dispatch queue on which all sample buffer delegate methods will be called.
83 ///
84 ///
85 /// The value of this property is a dispatch_queue_t. The queue is set using the setSampleBufferDelegate:queue: method.
86 #[unsafe(method(sampleBufferCallbackQueue))]
87 #[unsafe(method_family = none)]
88 pub unsafe fn sampleBufferCallbackQueue(&self) -> Option<Retained<DispatchQueue>>;
89
90 /// Specifies the settings used to decode or re-encode video before it is output by the receiver.
91 ///
92 ///
93 /// See AVVideoSettings.h for more information on how to construct a video settings dictionary. To receive samples in their device native format, set this property to an empty dictionary (i.e. [NSDictionary dictionary]). To receive samples in a default uncompressed format, set this property to nil. Note that after this property is set to nil, subsequent querying of this property will yield a non-nil dictionary reflecting the settings used by the AVCaptureSession's current sessionPreset.
94 ///
95 /// On iOS versions prior to iOS 16.0, the only supported key is kCVPixelBufferPixelFormatTypeKey. Use -availableVideoCVPixelFormatTypes for the list of supported pixel formats. For apps linked on or after iOS 16.0, kCVPixelBufferPixelFormatTypeKey, kCVPixelBufferWidthKey, and kCVPixelBufferHeightKey are supported. The width and height must match the videoOrientation specified on the output's AVCaptureConnection or an NSInvalidArgumentException is thrown. The aspect ratio of width and height must match the aspect ratio of the source's activeFormat (corrected for the connection's videoOrientation) or an NSInvalidArgumentException is thrown. If width or height exceeds the source's activeFormat's width or height, an NSInvalidArgumentException is thrown. Changing width and height when deliversPreviewSizedOutputBuffers is set to YES is not supported and throws an NSInvalidArgumentException.
96 #[unsafe(method(videoSettings))]
97 #[unsafe(method_family = none)]
98 pub unsafe fn videoSettings(&self) -> Retained<NSDictionary<NSString, AnyObject>>;
99
100 /// Setter for [`videoSettings`][Self::videoSettings].
101 ///
102 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
103 ///
104 /// # Safety
105 ///
106 /// `video_settings` generic should be of the correct type.
107 #[unsafe(method(setVideoSettings:))]
108 #[unsafe(method_family = none)]
109 pub unsafe fn setVideoSettings(
110 &self,
111 video_settings: Option<&NSDictionary<NSString, AnyObject>>,
112 );
113
114 #[cfg(feature = "AVMediaFormat")]
115 /// Specifies the recommended settings for use with an AVAssetWriterInput.
116 ///
117 ///
118 /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
119 ///
120 /// Returns: A fully populated dictionary of keys and values that are compatible with AVAssetWriter.
121 ///
122 ///
123 /// The value of this property is an NSDictionary containing values for compression settings keys defined in AVVideoSettings.h. This dictionary is suitable for use as the "outputSettings" parameter when creating an AVAssetWriterInput, such as,
124 ///
125 /// [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings sourceFormatHint:hint];
126 ///
127 /// The dictionary returned contains all necessary keys and values needed by AVAssetWriter (see AVAssetWriterInput.h, -initWithMediaType:outputSettings: for a more in depth discussion). For QuickTime movie and ISO file types, the recommended video settings will produce output comparable to that of AVCaptureMovieFileOutput.
128 ///
129 /// Note that the dictionary of settings is dependent on the current configuration of the receiver's AVCaptureSession and its inputs. The settings dictionary may change if the session's configuration changes. As such, you should configure your session first, then query the recommended video settings. As of iOS 8.3, movies produced with these settings successfully import into the iOS camera roll and sync to and from like devices via iTunes.
130 #[unsafe(method(recommendedVideoSettingsForAssetWriterWithOutputFileType:))]
131 #[unsafe(method_family = none)]
132 pub unsafe fn recommendedVideoSettingsForAssetWriterWithOutputFileType(
133 &self,
134 output_file_type: &AVFileType,
135 ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
136
137 #[cfg(all(feature = "AVMediaFormat", feature = "AVVideoSettings"))]
138 /// Specifies the available video codecs for use with AVAssetWriter and a given file type.
139 ///
140 ///
141 /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
142 ///
143 /// Returns: An array of video codecs; see AVVideoSettings.h for a full list.
144 ///
145 ///
146 /// This method allows you to query the available video codecs that may be used when specifying an AVVideoCodecKey in -recommendedVideoSettingsForVideoCodecType:assetWriterOutputFileType:. When specifying an outputFileType of AVFileTypeQuickTimeMovie, video codecs are ordered identically to -[AVCaptureMovieFileOutput availableVideoCodecTypes].
147 #[unsafe(method(availableVideoCodecTypesForAssetWriterWithOutputFileType:))]
148 #[unsafe(method_family = none)]
149 pub unsafe fn availableVideoCodecTypesForAssetWriterWithOutputFileType(
150 &self,
151 output_file_type: &AVFileType,
152 ) -> Retained<NSArray<AVVideoCodecType>>;
153
154 #[cfg(all(feature = "AVMediaFormat", feature = "AVVideoSettings"))]
155 /// Specifies the recommended settings for a particular video codec type, to be used with an AVAssetWriterInput.
156 ///
157 ///
158 /// Parameter `videoCodecType`: Specifies the desired AVVideoCodecKey to be used for compression (see AVVideoSettings.h).
159 ///
160 /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
161 ///
162 /// Returns: A fully populated dictionary of keys and values that are compatible with AVAssetWriter.
163 ///
164 ///
165 /// The value of this property is an NSDictionary containing values for compression settings keys defined in AVVideoSettings.h. This dictionary is suitable for use as the "outputSettings" parameter when creating an AVAssetWriterInput, such as,
166 ///
167 /// [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings sourceFormatHint:hint];
168 ///
169 /// The dictionary returned contains all necessary keys and values needed by AVAssetWriter (see AVAssetWriterInput.h, -initWithMediaType:outputSettings: for a more in depth discussion). For QuickTime movie and ISO file types, the recommended video settings will produce output comparable to that of AVCaptureMovieFileOutput.
170 ///
171 /// The videoCodecType string provided must be present in the availableVideoCodecTypesForAssetWriterWithOutputFileType: array, or an NSInvalidArgumentException is thrown.
172 ///
173 /// Note that the dictionary of settings is dependent on the current configuration of the receiver's AVCaptureSession and its inputs. The settings dictionary may change if the session's configuration changes. As such, you should configure your session first, then query the recommended video settings. As of iOS 8.3, movies produced with these settings successfully import into the iOS camera roll and sync to and from like devices via iTunes.
174 #[unsafe(method(recommendedVideoSettingsForVideoCodecType:assetWriterOutputFileType:))]
175 #[unsafe(method_family = none)]
176 pub unsafe fn recommendedVideoSettingsForVideoCodecType_assetWriterOutputFileType(
177 &self,
178 video_codec_type: &AVVideoCodecType,
179 output_file_type: &AVFileType,
180 ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
181
182 #[cfg(all(feature = "AVMediaFormat", feature = "AVVideoSettings"))]
183 /// Specifies the recommended settings for a particular video codec type with output file URL, to be used with an AVAssetWriterInput.
184 ///
185 ///
186 /// Parameter `videoCodecType`: Specifies the desired AVVideoCodecKey to be used for compression (see AVVideoSettings.h).
187 ///
188 /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
189 ///
190 /// Parameter `outputFileURL`: Specifies the output URL of the file to be written.
191 ///
192 /// If you wish to capture onto an external storage device get an externalStorageDevice of type AVExternalStorageDevice (as defined in AVExternalStorageDevice.h):
193 /// [AVExternalStorageDeviceDiscoverySession sharedSession] externalStorageDevices]
194 ///
195 /// Then use [externalStorageDevice nextAvailableURLsWithPathExtensions:pathExtensions error:
196 /// &error
197 /// ] to get the output file URL.
198 ///
199 ///
200 /// Returns: A fully populated dictionary of keys and values that are compatible with AVAssetWriter.
201 ///
202 ///
203 /// The value of this property is an NSDictionary containing values for compression settings keys defined in AVVideoSettings.h. This dictionary is suitable for use as the "outputSettings" parameter when creating an AVAssetWriterInput, such as,
204 ///
205 /// [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings sourceFormatHint:hint];
206 ///
207 /// The dictionary returned contains all necessary keys and values needed by AVAssetWriter (see AVAssetWriterInput.h, -initWithMediaType:outputSettings: for a more in depth discussion). For QuickTime movie and ISO file types, the recommended video settings will produce output comparable to that of AVCaptureMovieFileOutput.
208 ///
209 /// The videoCodecType string provided must be present in the availableVideoCodecTypesForAssetWriterWithOutputFileType: array, or an NSInvalidArgumentException is thrown.
210 ///
211 /// Note that the dictionary of settings is dependent on the current configuration of the receiver's AVCaptureSession and its inputs. The settings dictionary may change if the session's configuration changes. As such, you should configure your session first, then query the recommended video settings. As of iOS 8.3, movies produced with these settings successfully import into the iOS camera roll and sync to and from like devices via iTunes.
212 #[unsafe(method(recommendedVideoSettingsForVideoCodecType:assetWriterOutputFileType:outputFileURL:))]
213 #[unsafe(method_family = none)]
214 pub unsafe fn recommendedVideoSettingsForVideoCodecType_assetWriterOutputFileType_outputFileURL(
215 &self,
216 video_codec_type: &AVVideoCodecType,
217 output_file_type: &AVFileType,
218 output_file_url: Option<&NSURL>,
219 ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
220
221 #[cfg(all(
222 feature = "AVMediaFormat",
223 feature = "AVMetadataItem",
224 feature = "AVVideoSettings"
225 ))]
226 /// Recommends movie-level metadata for a particular video codec type and output file type, to be used with an asset writer input.
227 ///
228 /// - Parameter videoCodecType: The desired ``AVVideoCodecKey`` to be used for compression (see
229 /// <doc
230 /// ://com.apple.documentation/documentation/avfoundation/video-settings>).
231 /// - Parameter outputFileType: Specifies the UTI of the file type to be written (see
232 /// <doc
233 /// ://com.apple.documentation/documentation/avfoundation/avfiletype>).
234 /// - Returns: A fully populated array of ``AVMetadataItem`` objects compatible with ``AVAssetWriter``.
235 ///
236 /// The value of this property is an array of ``AVMetadataItem`` objects representing the collection of top-level metadata to be written in each output file. This array is suitable to use as the ``AVAssetWriter/metadata`` property before you have called ``AVAssetWriter/startWriting``. For more details see
237 /// <doc
238 /// ://com.apple.documentation/documentation/avfoundation/avassetwriter/startwriting()>.
239 ///
240 /// The ``videoCodecType`` string you provide must be present in ``availableVideoCodecTypesForAssetWriterWithOutputFileType:`` array, or an `NSInvalidArgumentException` is thrown.
241 ///
242 /// For clients writing files using a ProRes Raw codec type, white balance must be locked (call ``AVCaptureDevice/setWhiteBalanceModeLockedWithDeviceWhiteBalanceGains:completionHandler:``) before querying this property, or an `NSIvalidArgumentException` is thrown.
243 ///
244 /// - Note: The array of metadata is dependent on the current configuration of the receiver's ``AVCaptureSession`` and its inputs. The array may change when the session's configuration changes. As such, you should configure and start your session first, then query this method.
245 #[unsafe(method(recommendedMovieMetadataForVideoCodecType:assetWriterOutputFileType:))]
246 #[unsafe(method_family = none)]
247 pub unsafe fn recommendedMovieMetadataForVideoCodecType_assetWriterOutputFileType(
248 &self,
249 video_codec_type: &AVVideoCodecType,
250 output_file_type: &AVFileType,
251 ) -> Option<Retained<NSArray<AVMetadataItem>>>;
252
253 #[cfg(feature = "objc2-core-media")]
254 /// Indicates the recommended media timescale for the video track.
255 ///
256 /// - Returns: The recommended media timescale based on the active capture session's inputs. It is never less than 600. It may or may not be a multiple of 600.
257 #[unsafe(method(recommendedMediaTimeScaleForAssetWriter))]
258 #[unsafe(method_family = none)]
259 pub unsafe fn recommendedMediaTimeScaleForAssetWriter(&self) -> CMTimeScale;
260
261 /// Indicates the supported video pixel formats that can be specified in videoSettings.
262 ///
263 ///
264 /// The value of this property is an NSArray of NSNumbers that can be used as values for the kCVPixelBufferPixelFormatTypeKey in the receiver's videoSettings property. The formats are listed in an unspecified order. This list can may change if the activeFormat of the AVCaptureDevice connected to the receiver changes.
265 #[unsafe(method(availableVideoCVPixelFormatTypes))]
266 #[unsafe(method_family = none)]
267 pub unsafe fn availableVideoCVPixelFormatTypes(&self) -> Retained<NSArray<NSNumber>>;
268
269 #[cfg(feature = "AVVideoSettings")]
270 /// Indicates the supported video codec formats that can be specified in videoSettings.
271 ///
272 ///
273 /// The value of this property is an NSArray of AVVideoCodecTypes that can be used as values for the AVVideoCodecKey in the receiver's videoSettings property.
274 #[unsafe(method(availableVideoCodecTypes))]
275 #[unsafe(method_family = none)]
276 pub unsafe fn availableVideoCodecTypes(&self) -> Retained<NSArray<AVVideoCodecType>>;
277
278 #[cfg(feature = "objc2-core-media")]
279 /// Specifies the minimum time interval between which the receiver should output consecutive video frames.
280 ///
281 ///
282 /// The value of this property is a CMTime specifying the minimum duration of each video frame output by the receiver, placing a lower bound on the amount of time that should separate consecutive frames. This is equivalent to the inverse of the maximum frame rate. A value of kCMTimeZero or kCMTimeInvalid indicates an unlimited maximum frame rate. The default value is kCMTimeInvalid. As of iOS 5.0, minFrameDuration is deprecated. Use AVCaptureConnection's videoMinFrameDuration property instead.
283 #[deprecated = "Use AVCaptureConnection's videoMinFrameDuration property instead."]
284 #[unsafe(method(minFrameDuration))]
285 #[unsafe(method_family = none)]
286 pub unsafe fn minFrameDuration(&self) -> CMTime;
287
288 #[cfg(feature = "objc2-core-media")]
289 /// Setter for [`minFrameDuration`][Self::minFrameDuration].
290 #[deprecated = "Use AVCaptureConnection's videoMinFrameDuration property instead."]
291 #[unsafe(method(setMinFrameDuration:))]
292 #[unsafe(method_family = none)]
293 pub unsafe fn setMinFrameDuration(&self, min_frame_duration: CMTime);
294
295 /// Specifies whether the receiver should always discard any video frame that is not processed before the next frame is captured.
296 ///
297 ///
298 /// When the value of this property is YES, the receiver will immediately discard frames that are captured while the dispatch queue handling existing frames is blocked in the captureOutput:didOutputSampleBuffer:fromConnection: delegate method. When the value of this property is NO, delegates will be allowed more time to process old frames before new frames are discarded, but application memory usage may increase significantly as a result. The default value is YES.
299 #[unsafe(method(alwaysDiscardsLateVideoFrames))]
300 #[unsafe(method_family = none)]
301 pub unsafe fn alwaysDiscardsLateVideoFrames(&self) -> bool;
302
303 /// Setter for [`alwaysDiscardsLateVideoFrames`][Self::alwaysDiscardsLateVideoFrames].
304 #[unsafe(method(setAlwaysDiscardsLateVideoFrames:))]
305 #[unsafe(method_family = none)]
306 pub unsafe fn setAlwaysDiscardsLateVideoFrames(
307 &self,
308 always_discards_late_video_frames: bool,
309 );
310
311 /// Indicates whether the receiver automatically configures the size of output buffers.
312 ///
313 ///
314 /// Default value is YES. In most configurations, AVCaptureVideoDataOutput delivers full-resolution buffers, that is, buffers with the same dimensions as the source AVCaptureDevice's activeFormat's videoDimensions. When this property is set to YES, the receiver is free to configure the dimensions of the buffers delivered to -captureOutput:didOutputSampleBuffer:fromConnection:, such that they are a smaller preview size (roughly the size of the screen). For instance, when the AVCaptureSession's sessionPreset is set to AVCaptureSessionPresetPhoto, it is assumed that video data output buffers are being delivered as a preview proxy. Likewise, if an AVCapturePhotoOutput is present in the session with livePhotoCaptureEnabled, it is assumed that video data output is being used for photo preview, and thus preview-sized buffers are a better choice than full-res buffers. You can query deliversPreviewSizedOutputBuffers to find out whether automatic configuration of output buffer dimensions is currently downscaling buffers to a preview size. You can also query the videoSettings property to find out the exact width and height being delivered. If you wish to manually set deliversPreviewSizedOutputBuffers, you must first set automaticallyConfiguresOutputBufferDimensions to NO.
315 #[unsafe(method(automaticallyConfiguresOutputBufferDimensions))]
316 #[unsafe(method_family = none)]
317 pub unsafe fn automaticallyConfiguresOutputBufferDimensions(&self) -> bool;
318
319 /// Setter for [`automaticallyConfiguresOutputBufferDimensions`][Self::automaticallyConfiguresOutputBufferDimensions].
320 #[unsafe(method(setAutomaticallyConfiguresOutputBufferDimensions:))]
321 #[unsafe(method_family = none)]
322 pub unsafe fn setAutomaticallyConfiguresOutputBufferDimensions(
323 &self,
324 automatically_configures_output_buffer_dimensions: bool,
325 );
326
327 /// Indicates whether the receiver is currently configured to deliver preview sized buffers.
328 ///
329 ///
330 /// If you wish to manually set deliversPreviewSizedOutputBuffers, you must first set automaticallyConfiguresOutputBufferDimensions to NO. When deliversPreviewSizedOutputBuffers is set to YES, auto focus, exposure, and white balance changes are quicker. AVCaptureVideoDataOutput assumes that the buffers are being used for on-screen preview rather than recording.
331 ///
332 /// When AVCaptureDevice.activeFormat supports ProRes Raw video, setting deliversPreviewSizedOutputBuffers gives out buffers with 422 format that can be used for proxy video recording.
333 #[unsafe(method(deliversPreviewSizedOutputBuffers))]
334 #[unsafe(method_family = none)]
335 pub unsafe fn deliversPreviewSizedOutputBuffers(&self) -> bool;
336
337 /// Setter for [`deliversPreviewSizedOutputBuffers`][Self::deliversPreviewSizedOutputBuffers].
338 #[unsafe(method(setDeliversPreviewSizedOutputBuffers:))]
339 #[unsafe(method_family = none)]
340 pub unsafe fn setDeliversPreviewSizedOutputBuffers(
341 &self,
342 delivers_preview_sized_output_buffers: bool,
343 );
344
345 /// Indicates whether the receiver should prepare the cellular radio for imminent network activity.
346 ///
347 /// Apps that scan video data output buffers for information that will result in network activity (such as detecting a QRCode containing a URL) should set this property `true` to allow the cellular radio to prepare for an imminent network request. Enabling this property requires a lengthy reconfiguration of the capture render pipeline, so you should set this property to `true` before calling ``AVCaptureSession/startRunning``.
348 ///
349 /// Using this API requires your app to adopt the entitlement `com.apple.developer.avfoundation.video-data-output-prepares-cellular-radio-for-machine-readable-code-scanning`.
350 #[unsafe(method(preparesCellularRadioForNetworkConnection))]
351 #[unsafe(method_family = none)]
352 pub unsafe fn preparesCellularRadioForNetworkConnection(&self) -> bool;
353
354 /// Setter for [`preparesCellularRadioForNetworkConnection`][Self::preparesCellularRadioForNetworkConnection].
355 #[unsafe(method(setPreparesCellularRadioForNetworkConnection:))]
356 #[unsafe(method_family = none)]
357 pub unsafe fn setPreparesCellularRadioForNetworkConnection(
358 &self,
359 prepares_cellular_radio_for_network_connection: bool,
360 );
361
362 /// Indicates whether the receiver should preserve dynamic HDR metadata as an attachment on the output sample buffer's underlying pixel buffer.
363 ///
364 /// Set this property to `true` if you wish to use ``AVCaptureVideoDataOutput`` with ``AVAssetWriter`` to record HDR movies. You must also set ``kVTCompressionPropertyKey_PreserveDynamicHDRMetadata`` to `true` in the compression settings you pass to your ``AVAssetWriterInput``. These compression settings are represented under the ``AVVideoCompressionPropertiesKey`` sub-dictionary of your top-level AVVideoSettings (see
365 /// <doc
366 /// ://com.apple.documentation/documentation/avfoundation/video-settings>). When you set this key to `true`, performance improves, as the encoder is able to skip HDR metadata calculation for every frame. The default value is `false`.
367 #[unsafe(method(preservesDynamicHDRMetadata))]
368 #[unsafe(method_family = none)]
369 pub unsafe fn preservesDynamicHDRMetadata(&self) -> bool;
370
371 /// Setter for [`preservesDynamicHDRMetadata`][Self::preservesDynamicHDRMetadata].
372 #[unsafe(method(setPreservesDynamicHDRMetadata:))]
373 #[unsafe(method_family = none)]
374 pub unsafe fn setPreservesDynamicHDRMetadata(&self, preserves_dynamic_hdr_metadata: bool);
375 );
376}
377
378extern_protocol!(
379 /// Defines an interface for delegates of AVCaptureVideoDataOutput to receive captured video sample buffers and be notified of late sample buffers that were dropped.
380 ///
381 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avcapturevideodataoutputsamplebufferdelegate?language=objc)
382 pub unsafe trait AVCaptureVideoDataOutputSampleBufferDelegate: NSObjectProtocol {
383 #[cfg(all(
384 feature = "AVCaptureOutputBase",
385 feature = "AVCaptureSession",
386 feature = "objc2-core-media"
387 ))]
388 /// Called whenever an AVCaptureVideoDataOutput instance outputs a new video frame.
389 ///
390 ///
391 /// Parameter `output`: The AVCaptureVideoDataOutput instance that output the frame.
392 ///
393 /// Parameter `sampleBuffer`: A CMSampleBuffer object containing the video frame data and additional information about the frame, such as its format and presentation time.
394 ///
395 /// Parameter `connection`: The AVCaptureConnection from which the video was received.
396 ///
397 ///
398 /// Delegates receive this message whenever the output captures and outputs a new video frame, decoding or re-encoding it as specified by its videoSettings property. Delegates can use the provided video frame in conjunction with other APIs for further processing. This method will be called on the dispatch queue specified by the output's sampleBufferCallbackQueue property. This method is called periodically, so it must be efficient to prevent capture performance problems, including dropped frames.
399 ///
400 /// Clients that need to reference the CMSampleBuffer object outside of the scope of this method must CFRetain it and then CFRelease it when they are finished with it.
401 ///
402 /// Note that to maintain optimal performance, some sample buffers directly reference pools of memory that may need to be reused by the device system and other capture inputs. This is frequently the case for uncompressed device native capture where memory blocks are copied as little as possible. If multiple sample buffers reference such pools of memory for too long, inputs will no longer be able to copy new samples into memory and those samples will be dropped. If your application is causing samples to be dropped by retaining the provided CMSampleBuffer objects for too long, but it needs access to the sample data for a long period of time, consider copying the data into a new buffer and then calling CFRelease on the sample buffer if it was previously retained so that the memory it references can be reused.
403 #[optional]
404 #[unsafe(method(captureOutput:didOutputSampleBuffer:fromConnection:))]
405 #[unsafe(method_family = none)]
406 unsafe fn captureOutput_didOutputSampleBuffer_fromConnection(
407 &self,
408 output: &AVCaptureOutput,
409 sample_buffer: &CMSampleBuffer,
410 connection: &AVCaptureConnection,
411 );
412
413 #[cfg(all(
414 feature = "AVCaptureOutputBase",
415 feature = "AVCaptureSession",
416 feature = "objc2-core-media"
417 ))]
418 /// Called once for each frame that is discarded.
419 ///
420 ///
421 /// Parameter `output`: The AVCaptureVideoDataOutput instance that dropped the frame.
422 ///
423 /// Parameter `sampleBuffer`: A CMSampleBuffer object containing information about the dropped frame, such as its format and presentation time. This sample buffer will contain none of the original video data.
424 ///
425 /// Parameter `connection`: The AVCaptureConnection from which the dropped video frame was received.
426 ///
427 ///
428 /// Delegates receive this message whenever a video frame is dropped. This method is called once for each dropped frame. The CMSampleBuffer object passed to this delegate method will contain metadata about the dropped video frame, such as its duration and presentation time stamp, but will contain no actual video data. On iOS, Included in the sample buffer attachments is the kCMSampleBufferAttachmentKey_DroppedFrameReason, which indicates why the frame was dropped. This method will be called on the dispatch queue specified by the output's sampleBufferCallbackQueue property. Because this method will be called on the same dispatch queue that is responsible for outputting video frames, it must be efficient to prevent further capture performance problems, such as additional dropped video frames.
429 #[optional]
430 #[unsafe(method(captureOutput:didDropSampleBuffer:fromConnection:))]
431 #[unsafe(method_family = none)]
432 unsafe fn captureOutput_didDropSampleBuffer_fromConnection(
433 &self,
434 output: &AVCaptureOutput,
435 sample_buffer: &CMSampleBuffer,
436 connection: &AVCaptureConnection,
437 );
438 }
439);