objc2_av_foundation/generated/
AVCaptureVideoDataOutput.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-core-media")]
7use objc2_core_media::*;
8use objc2_foundation::*;
9
10use crate::*;
11
12extern_class!(
13    /// AVCaptureVideoDataOutput is a concrete subclass of AVCaptureOutput that can be used to process uncompressed or compressed frames from the video being captured.
14    ///
15    ///
16    /// Instances of AVCaptureVideoDataOutput produce video frames suitable for processing using other media APIs. Applications can access the frames with the captureOutput:didOutputSampleBuffer:fromConnection: delegate method.
17    ///
18    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avcapturevideodataoutput?language=objc)
19    #[unsafe(super(AVCaptureOutput, NSObject))]
20    #[derive(Debug, PartialEq, Eq, Hash)]
21    #[cfg(feature = "AVCaptureOutputBase")]
22    pub struct AVCaptureVideoDataOutput;
23);
24
25#[cfg(feature = "AVCaptureOutputBase")]
26unsafe impl NSObjectProtocol for AVCaptureVideoDataOutput {}
27
28#[cfg(feature = "AVCaptureOutputBase")]
29impl AVCaptureVideoDataOutput {
30    extern_methods!(
31        #[unsafe(method(init))]
32        #[unsafe(method_family = init)]
33        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
34
35        #[unsafe(method(new))]
36        #[unsafe(method_family = new)]
37        pub unsafe fn new() -> Retained<Self>;
38
39        /// The receiver's delegate.
40        ///
41        ///
42        /// The value of this property is an object conforming to the AVCaptureVideoDataOutputSampleBufferDelegate protocol that will receive sample buffers after they are captured. The delegate is set using the setSampleBufferDelegate:queue: method.
43        #[unsafe(method(sampleBufferDelegate))]
44        #[unsafe(method_family = none)]
45        pub unsafe fn sampleBufferDelegate(
46            &self,
47        ) -> Option<Retained<ProtocolObject<dyn AVCaptureVideoDataOutputSampleBufferDelegate>>>;
48
49        /// Specifies the settings used to decode or re-encode video before it is output by the receiver.
50        ///
51        ///
52        /// See AVVideoSettings.h for more information on how to construct a video settings dictionary. To receive samples in their device native format, set this property to an empty dictionary (i.e. [NSDictionary dictionary]). To receive samples in a default uncompressed format, set this property to nil. Note that after this property is set to nil, subsequent querying of this property will yield a non-nil dictionary reflecting the settings used by the AVCaptureSession's current sessionPreset.
53        ///
54        /// On iOS versions prior to iOS 16.0, the only supported key is kCVPixelBufferPixelFormatTypeKey. Use -availableVideoCVPixelFormatTypes for the list of supported pixel formats. For apps linked on or after iOS 16.0, kCVPixelBufferPixelFormatTypeKey, kCVPixelBufferWidthKey, and kCVPixelBufferHeightKey are supported. The width and height must match the videoOrientation specified on the output's AVCaptureConnection or an NSInvalidArgumentException is thrown. The aspect ratio of width and height must match the aspect ratio of the source's activeFormat (corrected for the connection's videoOrientation) or an NSInvalidArgumentException is thrown. If width or height exceeds the source's activeFormat's width or height, an NSInvalidArgumentException is thrown. Changing width and height when deliversPreviewSizedOutputBuffers is set to YES is not supported and throws an NSInvalidArgumentException.
55        #[unsafe(method(videoSettings))]
56        #[unsafe(method_family = none)]
57        pub unsafe fn videoSettings(&self) -> Retained<NSDictionary<NSString, AnyObject>>;
58
59        /// Setter for [`videoSettings`][Self::videoSettings].
60        #[unsafe(method(setVideoSettings:))]
61        #[unsafe(method_family = none)]
62        pub unsafe fn setVideoSettings(
63            &self,
64            video_settings: Option<&NSDictionary<NSString, AnyObject>>,
65        );
66
67        #[cfg(feature = "AVMediaFormat")]
68        /// Specifies the recommended settings for use with an AVAssetWriterInput.
69        ///
70        ///
71        /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
72        ///
73        /// Returns: A fully populated dictionary of keys and values that are compatible with AVAssetWriter.
74        ///
75        ///
76        /// The value of this property is an NSDictionary containing values for compression settings keys defined in AVVideoSettings.h. This dictionary is suitable for use as the "outputSettings" parameter when creating an AVAssetWriterInput, such as,
77        ///
78        /// [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings sourceFormatHint:hint];
79        ///
80        /// The dictionary returned contains all necessary keys and values needed by AVAssetWriter (see AVAssetWriterInput.h, -initWithMediaType:outputSettings: for a more in depth discussion). For QuickTime movie and ISO file types, the recommended video settings will produce output comparable to that of AVCaptureMovieFileOutput.
81        ///
82        /// Note that the dictionary of settings is dependent on the current configuration of the receiver's AVCaptureSession and its inputs. The settings dictionary may change if the session's configuration changes. As such, you should configure your session first, then query the recommended video settings. As of iOS 8.3, movies produced with these settings successfully import into the iOS camera roll and sync to and from like devices via iTunes.
83        #[unsafe(method(recommendedVideoSettingsForAssetWriterWithOutputFileType:))]
84        #[unsafe(method_family = none)]
85        pub unsafe fn recommendedVideoSettingsForAssetWriterWithOutputFileType(
86            &self,
87            output_file_type: &AVFileType,
88        ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
89
90        #[cfg(all(feature = "AVMediaFormat", feature = "AVVideoSettings"))]
91        /// Specifies the available video codecs for use with AVAssetWriter and a given file type.
92        ///
93        ///
94        /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
95        ///
96        /// Returns: An array of video codecs; see AVVideoSettings.h for a full list.
97        ///
98        ///
99        /// This method allows you to query the available video codecs that may be used when specifying an AVVideoCodecKey in -recommendedVideoSettingsForVideoCodecType:assetWriterOutputFileType:. When specifying an outputFileType of AVFileTypeQuickTimeMovie, video codecs are ordered identically to -[AVCaptureMovieFileOutput availableVideoCodecTypes].
100        #[unsafe(method(availableVideoCodecTypesForAssetWriterWithOutputFileType:))]
101        #[unsafe(method_family = none)]
102        pub unsafe fn availableVideoCodecTypesForAssetWriterWithOutputFileType(
103            &self,
104            output_file_type: &AVFileType,
105        ) -> Retained<NSArray<AVVideoCodecType>>;
106
107        #[cfg(all(feature = "AVMediaFormat", feature = "AVVideoSettings"))]
108        /// Specifies the recommended settings for a particular video codec type, to be used with an AVAssetWriterInput.
109        ///
110        ///
111        /// Parameter `videoCodecType`: Specifies the desired AVVideoCodecKey to be used for compression (see AVVideoSettings.h).
112        ///
113        /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
114        ///
115        /// Returns: A fully populated dictionary of keys and values that are compatible with AVAssetWriter.
116        ///
117        ///
118        /// The value of this property is an NSDictionary containing values for compression settings keys defined in AVVideoSettings.h. This dictionary is suitable for use as the "outputSettings" parameter when creating an AVAssetWriterInput, such as,
119        ///
120        /// [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings sourceFormatHint:hint];
121        ///
122        /// The dictionary returned contains all necessary keys and values needed by AVAssetWriter (see AVAssetWriterInput.h, -initWithMediaType:outputSettings: for a more in depth discussion). For QuickTime movie and ISO file types, the recommended video settings will produce output comparable to that of AVCaptureMovieFileOutput.
123        ///
124        /// The videoCodecType string provided must be present in the availableVideoCodecTypesForAssetWriterWithOutputFileType: array, or an NSInvalidArgumentException is thrown.
125        ///
126        /// Note that the dictionary of settings is dependent on the current configuration of the receiver's AVCaptureSession and its inputs. The settings dictionary may change if the session's configuration changes. As such, you should configure your session first, then query the recommended video settings. As of iOS 8.3, movies produced with these settings successfully import into the iOS camera roll and sync to and from like devices via iTunes.
127        #[unsafe(method(recommendedVideoSettingsForVideoCodecType:assetWriterOutputFileType:))]
128        #[unsafe(method_family = none)]
129        pub unsafe fn recommendedVideoSettingsForVideoCodecType_assetWriterOutputFileType(
130            &self,
131            video_codec_type: &AVVideoCodecType,
132            output_file_type: &AVFileType,
133        ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
134
135        #[cfg(all(feature = "AVMediaFormat", feature = "AVVideoSettings"))]
136        /// Specifies the recommended settings for a particular video codec type with output file URL, to be used with an AVAssetWriterInput.
137        ///
138        ///
139        /// Parameter `videoCodecType`: Specifies the desired AVVideoCodecKey to be used for compression (see AVVideoSettings.h).
140        ///
141        /// Parameter `outputFileType`: Specifies the UTI of the file type to be written (see AVMediaFormat.h for a list of file format UTIs).
142        ///
143        /// Parameter `outputFileURL`: Specifies the output URL of the file to be written.
144        ///
145        /// If you wish to capture onto an external storage device get an externalStorageDevice of type AVExternalStorageDevice (as defined in AVExternalStorageDevice.h):
146        /// [AVExternalStorageDeviceDiscoverySession sharedSession] externalStorageDevices]
147        ///
148        /// Then use [externalStorageDevice nextAvailableURLsWithPathExtensions:pathExtensions error:
149        /// &error
150        /// ] to get the output file URL.
151        ///
152        ///
153        /// Returns: A fully populated dictionary of keys and values that are compatible with AVAssetWriter.
154        ///
155        ///
156        /// The value of this property is an NSDictionary containing values for compression settings keys defined in AVVideoSettings.h. This dictionary is suitable for use as the "outputSettings" parameter when creating an AVAssetWriterInput, such as,
157        ///
158        /// [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings sourceFormatHint:hint];
159        ///
160        /// The dictionary returned contains all necessary keys and values needed by AVAssetWriter (see AVAssetWriterInput.h, -initWithMediaType:outputSettings: for a more in depth discussion). For QuickTime movie and ISO file types, the recommended video settings will produce output comparable to that of AVCaptureMovieFileOutput.
161        ///
162        /// The videoCodecType string provided must be present in the availableVideoCodecTypesForAssetWriterWithOutputFileType: array, or an NSInvalidArgumentException is thrown.
163        ///
164        /// Note that the dictionary of settings is dependent on the current configuration of the receiver's AVCaptureSession and its inputs. The settings dictionary may change if the session's configuration changes. As such, you should configure your session first, then query the recommended video settings. As of iOS 8.3, movies produced with these settings successfully import into the iOS camera roll and sync to and from like devices via iTunes.
165        #[unsafe(method(recommendedVideoSettingsForVideoCodecType:assetWriterOutputFileType:outputFileURL:))]
166        #[unsafe(method_family = none)]
167        pub unsafe fn recommendedVideoSettingsForVideoCodecType_assetWriterOutputFileType_outputFileURL(
168            &self,
169            video_codec_type: &AVVideoCodecType,
170            output_file_type: &AVFileType,
171            output_file_url: Option<&NSURL>,
172        ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
173
174        /// Indicates the supported video pixel formats that can be specified in videoSettings.
175        ///
176        ///
177        /// The value of this property is an NSArray of NSNumbers that can be used as values for the kCVPixelBufferPixelFormatTypeKey in the receiver's videoSettings property. The formats are listed in an unspecified order. This list can may change if the activeFormat of the AVCaptureDevice connected to the receiver changes.
178        #[unsafe(method(availableVideoCVPixelFormatTypes))]
179        #[unsafe(method_family = none)]
180        pub unsafe fn availableVideoCVPixelFormatTypes(&self) -> Retained<NSArray<NSNumber>>;
181
182        #[cfg(feature = "AVVideoSettings")]
183        /// Indicates the supported video codec formats that can be specified in videoSettings.
184        ///
185        ///
186        /// The value of this property is an NSArray of AVVideoCodecTypes that can be used as values for the AVVideoCodecKey in the receiver's videoSettings property.
187        #[unsafe(method(availableVideoCodecTypes))]
188        #[unsafe(method_family = none)]
189        pub unsafe fn availableVideoCodecTypes(&self) -> Retained<NSArray<AVVideoCodecType>>;
190
191        #[cfg(feature = "objc2-core-media")]
192        /// Specifies the minimum time interval between which the receiver should output consecutive video frames.
193        ///
194        ///
195        /// The value of this property is a CMTime specifying the minimum duration of each video frame output by the receiver, placing a lower bound on the amount of time that should separate consecutive frames. This is equivalent to the inverse of the maximum frame rate. A value of kCMTimeZero or kCMTimeInvalid indicates an unlimited maximum frame rate. The default value is kCMTimeInvalid. As of iOS 5.0, minFrameDuration is deprecated. Use AVCaptureConnection's videoMinFrameDuration property instead.
196        #[deprecated = "Use AVCaptureConnection's videoMinFrameDuration property instead."]
197        #[unsafe(method(minFrameDuration))]
198        #[unsafe(method_family = none)]
199        pub unsafe fn minFrameDuration(&self) -> CMTime;
200
201        #[cfg(feature = "objc2-core-media")]
202        /// Setter for [`minFrameDuration`][Self::minFrameDuration].
203        #[deprecated = "Use AVCaptureConnection's videoMinFrameDuration property instead."]
204        #[unsafe(method(setMinFrameDuration:))]
205        #[unsafe(method_family = none)]
206        pub unsafe fn setMinFrameDuration(&self, min_frame_duration: CMTime);
207
208        /// Specifies whether the receiver should always discard any video frame that is not processed before the next frame is captured.
209        ///
210        ///
211        /// When the value of this property is YES, the receiver will immediately discard frames that are captured while the dispatch queue handling existing frames is blocked in the captureOutput:didOutputSampleBuffer:fromConnection: delegate method. When the value of this property is NO, delegates will be allowed more time to process old frames before new frames are discarded, but application memory usage may increase significantly as a result. The default value is YES.
212        #[unsafe(method(alwaysDiscardsLateVideoFrames))]
213        #[unsafe(method_family = none)]
214        pub unsafe fn alwaysDiscardsLateVideoFrames(&self) -> bool;
215
216        /// Setter for [`alwaysDiscardsLateVideoFrames`][Self::alwaysDiscardsLateVideoFrames].
217        #[unsafe(method(setAlwaysDiscardsLateVideoFrames:))]
218        #[unsafe(method_family = none)]
219        pub unsafe fn setAlwaysDiscardsLateVideoFrames(
220            &self,
221            always_discards_late_video_frames: bool,
222        );
223
224        /// Indicates whether the receiver automatically configures the size of output buffers.
225        ///
226        ///
227        /// Default value is YES. In most configurations, AVCaptureVideoDataOutput delivers full-resolution buffers, that is, buffers with the same dimensions as the source AVCaptureDevice's activeFormat's videoDimensions. When this property is set to YES, the receiver is free to configure the dimensions of the buffers delivered to -captureOutput:didOutputSampleBuffer:fromConnection:, such that they are a smaller preview size (roughly the size of the screen). For instance, when the AVCaptureSession's sessionPreset is set to AVCaptureSessionPresetPhoto, it is assumed that video data output buffers are being delivered as a preview proxy. Likewise, if an AVCapturePhotoOutput is present in the session with livePhotoCaptureEnabled, it is assumed that video data output is being used for photo preview, and thus preview-sized buffers are a better choice than full-res buffers. You can query deliversPreviewSizedOutputBuffers to find out whether automatic configuration of output buffer dimensions is currently downscaling buffers to a preview size. You can also query the videoSettings property to find out the exact width and height being delivered. If you wish to manually set deliversPreviewSizedOutputBuffers, you must first set automaticallyConfiguresOutputBufferDimensions to NO.
228        #[unsafe(method(automaticallyConfiguresOutputBufferDimensions))]
229        #[unsafe(method_family = none)]
230        pub unsafe fn automaticallyConfiguresOutputBufferDimensions(&self) -> bool;
231
232        /// Setter for [`automaticallyConfiguresOutputBufferDimensions`][Self::automaticallyConfiguresOutputBufferDimensions].
233        #[unsafe(method(setAutomaticallyConfiguresOutputBufferDimensions:))]
234        #[unsafe(method_family = none)]
235        pub unsafe fn setAutomaticallyConfiguresOutputBufferDimensions(
236            &self,
237            automatically_configures_output_buffer_dimensions: bool,
238        );
239
240        /// Indicates whether the receiver is currently configured to deliver preview sized buffers.
241        ///
242        ///
243        /// If you wish to manually set deliversPreviewSizedOutputBuffers, you must first set automaticallyConfiguresOutputBufferDimensions to NO. When deliversPreviewSizedOutputBuffers is set to YES, auto focus, exposure, and white balance changes are quicker. AVCaptureVideoDataOutput assumes that the buffers are being used for on-screen preview rather than recording.
244        #[unsafe(method(deliversPreviewSizedOutputBuffers))]
245        #[unsafe(method_family = none)]
246        pub unsafe fn deliversPreviewSizedOutputBuffers(&self) -> bool;
247
248        /// Setter for [`deliversPreviewSizedOutputBuffers`][Self::deliversPreviewSizedOutputBuffers].
249        #[unsafe(method(setDeliversPreviewSizedOutputBuffers:))]
250        #[unsafe(method_family = none)]
251        pub unsafe fn setDeliversPreviewSizedOutputBuffers(
252            &self,
253            delivers_preview_sized_output_buffers: bool,
254        );
255    );
256}
257
258extern_protocol!(
259    /// Defines an interface for delegates of AVCaptureVideoDataOutput to receive captured video sample buffers and be notified of late sample buffers that were dropped.
260    ///
261    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avcapturevideodataoutputsamplebufferdelegate?language=objc)
262    pub unsafe trait AVCaptureVideoDataOutputSampleBufferDelegate: NSObjectProtocol {
263        #[cfg(all(
264            feature = "AVCaptureOutputBase",
265            feature = "AVCaptureSession",
266            feature = "objc2-core-media"
267        ))]
268        /// Called whenever an AVCaptureVideoDataOutput instance outputs a new video frame.
269        ///
270        ///
271        /// Parameter `output`: The AVCaptureVideoDataOutput instance that output the frame.
272        ///
273        /// Parameter `sampleBuffer`: A CMSampleBuffer object containing the video frame data and additional information about the frame, such as its format and presentation time.
274        ///
275        /// Parameter `connection`: The AVCaptureConnection from which the video was received.
276        ///
277        ///
278        /// Delegates receive this message whenever the output captures and outputs a new video frame, decoding or re-encoding it as specified by its videoSettings property. Delegates can use the provided video frame in conjunction with other APIs for further processing. This method will be called on the dispatch queue specified by the output's sampleBufferCallbackQueue property. This method is called periodically, so it must be efficient to prevent capture performance problems, including dropped frames.
279        ///
280        /// Clients that need to reference the CMSampleBuffer object outside of the scope of this method must CFRetain it and then CFRelease it when they are finished with it.
281        ///
282        /// Note that to maintain optimal performance, some sample buffers directly reference pools of memory that may need to be reused by the device system and other capture inputs. This is frequently the case for uncompressed device native capture where memory blocks are copied as little as possible. If multiple sample buffers reference such pools of memory for too long, inputs will no longer be able to copy new samples into memory and those samples will be dropped. If your application is causing samples to be dropped by retaining the provided CMSampleBuffer objects for too long, but it needs access to the sample data for a long period of time, consider copying the data into a new buffer and then calling CFRelease on the sample buffer if it was previously retained so that the memory it references can be reused.
283        #[optional]
284        #[unsafe(method(captureOutput:didOutputSampleBuffer:fromConnection:))]
285        #[unsafe(method_family = none)]
286        unsafe fn captureOutput_didOutputSampleBuffer_fromConnection(
287            &self,
288            output: &AVCaptureOutput,
289            sample_buffer: &CMSampleBuffer,
290            connection: &AVCaptureConnection,
291        );
292
293        #[cfg(all(
294            feature = "AVCaptureOutputBase",
295            feature = "AVCaptureSession",
296            feature = "objc2-core-media"
297        ))]
298        /// Called once for each frame that is discarded.
299        ///
300        ///
301        /// Parameter `output`: The AVCaptureVideoDataOutput instance that dropped the frame.
302        ///
303        /// Parameter `sampleBuffer`: A CMSampleBuffer object containing information about the dropped frame, such as its format and presentation time. This sample buffer will contain none of the original video data.
304        ///
305        /// Parameter `connection`: The AVCaptureConnection from which the dropped video frame was received.
306        ///
307        ///
308        /// Delegates receive this message whenever a video frame is dropped. This method is called once for each dropped frame. The CMSampleBuffer object passed to this delegate method will contain metadata about the dropped video frame, such as its duration and presentation time stamp, but will contain no actual video data. On iOS, Included in the sample buffer attachments is the kCMSampleBufferAttachmentKey_DroppedFrameReason, which indicates why the frame was dropped. This method will be called on the dispatch queue specified by the output's sampleBufferCallbackQueue property. Because this method will be called on the same dispatch queue that is responsible for outputting video frames, it must be efficient to prevent further capture performance problems, such as additional dropped video frames.
309        #[optional]
310        #[unsafe(method(captureOutput:didDropSampleBuffer:fromConnection:))]
311        #[unsafe(method_family = none)]
312        unsafe fn captureOutput_didDropSampleBuffer_fromConnection(
313            &self,
314            output: &AVCaptureOutput,
315            sample_buffer: &CMSampleBuffer,
316            connection: &AVCaptureConnection,
317        );
318    }
319);