objc2_av_foundation/generated/
AVAssetWriterInput.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5#[cfg(feature = "dispatch2")]
6use dispatch2::*;
7use objc2::__framework_prelude::*;
8#[cfg(feature = "objc2-core-foundation")]
9use objc2_core_foundation::*;
10#[cfg(feature = "objc2-core-media")]
11use objc2_core_media::*;
12#[cfg(feature = "objc2-core-video")]
13use objc2_core_video::*;
14use objc2_foundation::*;
15
16use crate::*;
17
18extern_class!(
19    /// AVAssetWriterInput defines an interface for appending either new media samples or references to existing media samples packaged as CMSampleBuffer objects to a single track of the output file of an AVAssetWriter.
20    ///
21    /// Clients that need to write multiple concurrent tracks of media data should use one AVAssetWriterInput instance per track. In order to write multiple concurrent tracks with ideal interleaving of media data, clients should observe the value returned by the readyForMoreMediaData property of each AVAssetWriterInput instance.
22    ///
23    /// AVAssetWriterInput also supports writing per-track metadata collections to the output file.
24    ///
25    /// As of macOS 10.10 and iOS 8.0 AVAssetWriterInput can also be used to create tracks that are not self-contained. Such tracks reference sample data that is located in another file. This is currently supported only for instances of AVAssetWriterInput attached to an instance of AVAssetWriter that writes files of type AVFileTypeQuickTimeMovie.
26    ///
27    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinput?language=objc)
28    #[unsafe(super(NSObject))]
29    #[derive(Debug, PartialEq, Eq, Hash)]
30    pub struct AVAssetWriterInput;
31);
32
33extern_conformance!(
34    unsafe impl NSObjectProtocol for AVAssetWriterInput {}
35);
36
37impl AVAssetWriterInput {
38    extern_methods!(
39        #[unsafe(method(init))]
40        #[unsafe(method_family = init)]
41        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
42
43        #[unsafe(method(new))]
44        #[unsafe(method_family = new)]
45        pub unsafe fn new() -> Retained<Self>;
46
47        #[cfg(feature = "AVMediaFormat")]
48        /// Creates a new input of the specified media type to receive sample buffers for writing to the output file.
49        ///
50        /// Each new input accepts data for a new track of the AVAssetWriter's output file. Inputs are added to an asset writer using -[AVAssetWriter addInput:].
51        ///
52        /// Passing nil for output settings instructs the input to pass through appended samples, doing no processing before they are written to the output file. This is useful if, for example, you are appending buffers that are already in a desirable compressed format. However, if not writing to a QuickTime Movie file (i.e. the AVAssetWriter was initialized with a file type other than AVFileTypeQuickTimeMovie), AVAssetWriter only supports passing through a restricted set of media types and subtypes. In order to pass through media data to files other than AVFileTypeQuickTimeMovie, a non-NULL format hint must be provided using +assetWriterInputWithMediaType:outputSettings:sourceFormatHint: instead of this method.
53        ///
54        /// For AVMediaTypeAudio the following keys are not currently supported in the outputSettings dictionary: AVSampleRateConverterAudioQualityKey. When using this method to construct a new instance, an audio settings dictionary must be fully specified, meaning that it must contain AVFormatIDKey, AVSampleRateKey, and AVNumberOfChannelsKey. If no other channel layout information is available, a value of 1 for AVNumberOfChannelsKey will result in mono output and a value of 2 will result in stereo output. If AVNumberOfChannelsKey specifies a channel count greater than 2, the dictionary must also specify a value for AVChannelLayoutKey. For kAudioFormatLinearPCM, all relevant AVLinearPCM*Key keys must be included, and for kAudioFormatAppleLossless, AVEncoderBitDepthHintKey keys must be included. See +assetWriterInputWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys.
55        ///
56        /// For AVMediaTypeVideo, any output settings dictionary must request a compressed video format. This means that the value passed in for outputSettings must follow the rules for compressed video output, as laid out in AVVideoSettings.h. When using this method to construct a new instance, a video settings dictionary must be fully specified, meaning that it must contain AVVideoCodecKey, AVVideoWidthKey, and AVVideoHeightKey. See +assetWriterInputWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys. On iOS, the only values currently supported for AVVideoCodecKey are AVVideoCodecTypeH264 and AVVideoCodecTypeJPEG. AVVideoCodecTypeH264 is not supported on iPhone 3G. For AVVideoScalingModeKey, the value AVVideoScalingModeFit is not supported.
57        ///
58        /// - Parameter mediaType: The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
59        /// - Parameter outputSettings: The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
60        ///
61        /// - Returns: An instance of AVAssetWriterInput.
62        ///
63        /// # Safety
64        ///
65        /// `output_settings` generic should be of the correct type.
66        #[unsafe(method(assetWriterInputWithMediaType:outputSettings:))]
67        #[unsafe(method_family = none)]
68        pub unsafe fn assetWriterInputWithMediaType_outputSettings(
69            media_type: &AVMediaType,
70            output_settings: Option<&NSDictionary<NSString, AnyObject>>,
71        ) -> Retained<Self>;
72
73        #[cfg(all(feature = "AVMediaFormat", feature = "objc2-core-media"))]
74        /// Creates a new input of the specified media type to receive sample buffers for writing to the output file.
75        ///
76        /// A version of +assetWriterInputWithMediaType:outputSettings: that includes the ability to hint at the format of media data that will be appended to the new instance of AVAssetWriterInput. When a source format hint is provided, the outputSettings dictionary is not required to be fully specified. For AVMediaTypeAudio, this means that AVFormatIDKey is the only required key. For AVMediaTypeVideo, this means that AVVideoCodecKey is the only required key. Values for the remaining keys will be chosen by the asset writer input, with consideration given to the attributes of the source format. To guarantee successful file writing, clients who specify a format hint should ensure that subsequently-appended buffers are of the specified format.
77        ///
78        /// This method throws an exception for any of the following reasons:
79        /// - the media type of the format description does not match the media type passed into this method
80        /// - the width and height of video format hint are not positive
81        /// - the output settings do not match the supplied media type
82        /// - for video inputs, the output settings do not contain a required key (AVVideoCodecKey, AVVideoWidthKey, AVVideoHeightKey)
83        /// - the output scaling mode is AVVideoScalingModeFit
84        /// - the output settings contain AVSampleRateConverterAudioQualityKey or AVVideoDecompressionPropertiesKey
85        ///
86        /// - Parameter mediaType: The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
87        /// - Parameter outputSettings: The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
88        /// - Parameter sourceFormatHint: A hint about the format of media data that will be appended to the new input.
89        ///
90        /// - Returns: An instance of AVAssetWriterInput.
91        ///
92        /// # Safety
93        ///
94        /// `output_settings` generic should be of the correct type.
95        #[unsafe(method(assetWriterInputWithMediaType:outputSettings:sourceFormatHint:))]
96        #[unsafe(method_family = none)]
97        pub unsafe fn assetWriterInputWithMediaType_outputSettings_sourceFormatHint(
98            media_type: &AVMediaType,
99            output_settings: Option<&NSDictionary<NSString, AnyObject>>,
100            source_format_hint: Option<&CMFormatDescription>,
101        ) -> Retained<Self>;
102
103        #[cfg(feature = "AVMediaFormat")]
104        /// Creates a new input of the specified media type to receive sample buffers for writing to the output file.
105        ///
106        /// Each new input accepts data for a new track of the AVAssetWriter's output file. Inputs are added to an asset writer using -[AVAssetWriter addInput:].
107        ///
108        /// Passing nil for output settings instructs the input to pass through appended samples, doing no processing before they are written to the output file. This is useful if, for example, you are appending buffers that are already in a desirable compressed format. However, if not writing to a QuickTime Movie file (i.e. the AVAssetWriter was initialized with a file type other than AVFileTypeQuickTimeMovie), AVAssetWriter only supports passing through a restricted set of media types and subtypes. In order to pass through media data to files other than AVFileTypeQuickTimeMovie, a non-NULL format hint must be provided using -initWithMediaType:outputSettings:sourceFormatHint: instead of this method.
109        ///
110        /// For AVMediaTypeAudio the following keys are not currently supported in the outputSettings dictionary: AVSampleRateConverterAudioQualityKey. When using this initializer, an audio settings dictionary must be fully specified, meaning that it must contain AVFormatIDKey, AVSampleRateKey, and AVNumberOfChannelsKey. If no other channel layout information is available, a value of 1 for AVNumberOfChannelsKey will result in mono output and a value of 2 will result in stereo output. If AVNumberOfChannelsKey specifies a channel count greater than 2, the dictionary must also specify a value for AVChannelLayoutKey. For kAudioFormatLinearPCM, all relevant AVLinearPCM*Key keys must be included, and for kAudioFormatAppleLossless, AVEncoderBitDepthHintKey keys must be included. See -initWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys.
111        ///
112        /// For AVMediaTypeVideo, any output settings dictionary must request a compressed video format. This means that the value passed in for outputSettings must follow the rules for compressed video output, as laid out in AVVideoSettings.h. When using this initializer, a video settings dictionary must be fully specified, meaning that it must contain AVVideoCodecKey, AVVideoWidthKey, and AVVideoHeightKey. See -initWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys. On iOS, the only values currently supported for AVVideoCodecKey are AVVideoCodecTypeH264 and AVVideoCodecTypeJPEG. AVVideoCodecTypeH264 is not supported on iPhone 3G. For AVVideoScalingModeKey, the value AVVideoScalingModeFit is not supported.
113        ///
114        /// This method throws an exception for any of the following reasons:
115        /// - the media type of the format description does not match the media type passed into this method
116        /// - the output settings do not match the supplied media type
117        /// - for video inputs, the output settings do not contain a required key (AVVideoCodecKey, AVVideoWidthKey, AVVideoHeightKey)
118        /// - the output scaling mode is AVVideoScalingModeFit
119        /// - the output settings contain AVSampleRateConverterAudioQualityKey or AVVideoDecompressionPropertiesKey
120        ///
121        /// - Parameter mediaType: The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
122        /// - Parameter outputSettings: The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
123        ///
124        /// - Returns: An instance of AVAssetWriterInput.
125        ///
126        /// # Safety
127        ///
128        /// `output_settings` generic should be of the correct type.
129        #[unsafe(method(initWithMediaType:outputSettings:))]
130        #[unsafe(method_family = init)]
131        pub unsafe fn initWithMediaType_outputSettings(
132            this: Allocated<Self>,
133            media_type: &AVMediaType,
134            output_settings: Option<&NSDictionary<NSString, AnyObject>>,
135        ) -> Retained<Self>;
136
137        #[cfg(all(feature = "AVMediaFormat", feature = "objc2-core-media"))]
138        /// Creates a new input of the specified media type to receive sample buffers for writing to the output file. This is the designated initializer of AVAssetWriterInput.
139        ///
140        /// A version of -initWithMediaType:outputSettings: that includes the ability to hint at the format of media data that will be appended to the new instance of AVAssetWriterInput. When a source format hint is provided, the outputSettings dictionary is not required to be fully specified. For AVMediaTypeAudio, this means that AVFormatIDKey is the only required key. For AVMediaTypeVideo, this means that AVVideoCodecKey is the only required key. Values for the remaining keys will be chosen by the asset writer input, with consideration given to the attributes of the source format. To guarantee successful file writing, clients who specify a format hint should ensure that subsequently-appended buffers are of the specified format.
141        ///
142        /// This method throws an exception for any of the following reasons:
143        /// - the media type of the format description does not match the media type passed into this method
144        /// - the width and height of video format hint are not positive
145        /// - the output settings do not match the supplied media type
146        /// - for video inputs, the output settings do not contain a required key (AVVideoCodecKey, AVVideoWidthKey, AVVideoHeightKey)
147        /// - the output scaling mode is AVVideoScalingModeFit
148        /// - the output settings contain AVSampleRateConverterAudioQualityKey or AVVideoDecompressionPropertiesKey
149        ///
150        /// - Parameter mediaType: The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
151        /// - Parameter outputSettings: The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
152        /// - Parameter sourceFormatHint: A hint about the format of media data that will be appended to the new input.
153        ///
154        /// - Returns: An instance of AVAssetWriterInput.
155        ///
156        /// # Safety
157        ///
158        /// `output_settings` generic should be of the correct type.
159        #[unsafe(method(initWithMediaType:outputSettings:sourceFormatHint:))]
160        #[unsafe(method_family = init)]
161        pub unsafe fn initWithMediaType_outputSettings_sourceFormatHint(
162            this: Allocated<Self>,
163            media_type: &AVMediaType,
164            output_settings: Option<&NSDictionary<NSString, AnyObject>>,
165            source_format_hint: Option<&CMFormatDescription>,
166        ) -> Retained<Self>;
167
168        #[cfg(feature = "AVMediaFormat")]
169        /// The media type of the samples that can be appended to the receiver.
170        ///
171        /// The value of this property is one of the media types defined in AVMediaFormat.h.
172        #[unsafe(method(mediaType))]
173        #[unsafe(method_family = none)]
174        pub unsafe fn mediaType(&self) -> Retained<AVMediaType>;
175
176        /// The settings used for encoding the media appended to the output.
177        ///
178        /// The value of this property is an NSDictionary that contains values for keys as specified by either AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo. A value of nil indicates that the receiver will pass through appended samples, doing no processing before they are written to the output file.
179        #[unsafe(method(outputSettings))]
180        #[unsafe(method_family = none)]
181        pub unsafe fn outputSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
182
183        #[cfg(feature = "objc2-core-media")]
184        /// The hint given at initialization time about the format of incoming media data.
185        ///
186        /// AVAssetWriterInput may be able to use this hint to fill in missing output settings or perform more upfront validation. To guarantee successful file writing, clients who specify a format hint should ensure that subsequently-appended media data are of the specified format.
187        #[unsafe(method(sourceFormatHint))]
188        #[unsafe(method_family = none)]
189        pub unsafe fn sourceFormatHint(&self) -> Option<Retained<CMFormatDescription>>;
190
191        #[cfg(feature = "AVMetadataItem")]
192        /// A collection of metadata to be written to the track corresponding to the receiver.
193        ///
194        /// The value of this property is an array of AVMetadataItem objects representing the collection of track-level metadata to be written in the output file.
195        ///
196        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
197        #[unsafe(method(metadata))]
198        #[unsafe(method_family = none)]
199        pub unsafe fn metadata(&self) -> Retained<NSArray<AVMetadataItem>>;
200
201        #[cfg(feature = "AVMetadataItem")]
202        /// Setter for [`metadata`][Self::metadata].
203        ///
204        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
205        #[unsafe(method(setMetadata:))]
206        #[unsafe(method_family = none)]
207        pub unsafe fn setMetadata(&self, metadata: &NSArray<AVMetadataItem>);
208
209        /// Indicates the readiness of the input to accept more media data.
210        ///
211        /// When there are multiple inputs, AVAssetWriter tries to write media data in an ideal interleaving pattern for efficiency in storage and playback. Each of its inputs signals its readiness to receive media data for writing according to that pattern via the value of readyForMoreMediaData. You can append media data to an input only while its readyForMoreMediaData property is YES.
212        ///
213        /// Clients writing media data from a non-real-time source, such as an instance of AVAssetReader, should hold off on generating or obtaining more media data to append to an input when the value of readyForMoreMediaData is NO. To help with control of the supply of non-real-time media data, such clients can use -requestMediaDataWhenReadyOnQueue:usingBlock in order to specify a block that the input should invoke whenever it's ready for input to be appended.
214        ///
215        /// Clients writing media data from a real-time source, such as an instance of AVCaptureOutput, should set the input's expectsMediaDataInRealTime property to YES to ensure that the value of readyForMoreMediaData is calculated appropriately. When expectsMediaDataInRealTime is YES, readyForMoreMediaData will become NO only when the input cannot process media samples as quickly as they are being provided by the client. If readyForMoreMediaData becomes NO for a real-time source, the client may need to drop samples or consider reducing the data rate of appended samples.
216        ///
217        /// When the value of canPerformMultiplePasses is YES for any input attached to this input's asset writer, the value for this property may start as NO and/or be NO for long periods of time.
218        ///
219        /// The value of readyForMoreMediaData will often change from NO to YES asynchronously, as previously supplied media data is processed and written to the output. It is possible for all of an AVAssetWriter's AVAssetWriterInputs temporarily to return NO for readyForMoreMediaData.
220        ///
221        /// This property is key value observable. Observers should not assume that they will be notified of changes on a specific thread.
222        #[unsafe(method(isReadyForMoreMediaData))]
223        #[unsafe(method_family = none)]
224        pub unsafe fn isReadyForMoreMediaData(&self) -> bool;
225
226        /// Indicates whether the input should tailor its processing of media data for real-time sources.
227        ///
228        /// Clients appending media data to an input from a real-time source, such as an AVCaptureOutput, should set expectsMediaDataInRealTime to YES. This will ensure that readyForMoreMediaData is calculated appropriately for real-time usage.
229        ///
230        /// For best results, do not set both this property and performsMultiPassEncodingIfSupported to YES.
231        ///
232        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
233        #[unsafe(method(expectsMediaDataInRealTime))]
234        #[unsafe(method_family = none)]
235        pub unsafe fn expectsMediaDataInRealTime(&self) -> bool;
236
237        /// Setter for [`expectsMediaDataInRealTime`][Self::expectsMediaDataInRealTime].
238        #[unsafe(method(setExpectsMediaDataInRealTime:))]
239        #[unsafe(method_family = none)]
240        pub unsafe fn setExpectsMediaDataInRealTime(&self, expects_media_data_in_real_time: bool);
241
242        #[cfg(all(feature = "block2", feature = "dispatch2"))]
243        /// Instructs the receiver to invoke a client-supplied block repeatedly, at its convenience, in order to gather media data for writing to the output file.
244        ///
245        /// The block should append media data to the input either until the input's readyForMoreMediaData property becomes NO or until there is no more media data to supply (at which point it may choose to mark the input as finished via -markAsFinished). The block should then exit. After the block exits, if the input has not been marked as finished, once the input has processed the media data it has received and becomes ready for more media data again, it will invoke the block again in order to obtain more.
246        ///
247        /// A typical use of this method, with a block that supplies media data to an input while respecting the input's readyForMoreMediaData property, might look like this:
248        /// ```objc
249        /// [myAVAssetWriterInput requestMediaDataWhenReadyOnQueue:myInputSerialQueue usingBlock:^{
250        /// while ([myAVAssetWriterInput isReadyForMoreMediaData])
251        /// {
252        /// CMSampleBufferRef nextSampleBuffer = [self copyNextSampleBufferToWrite];
253        /// if (nextSampleBuffer)
254        /// {
255        /// [myAVAssetWriterInput appendSampleBuffer:nextSampleBuffer];
256        /// CFRelease(nextSampleBuffer);
257        /// }
258        /// else
259        /// {
260        /// [myAVAssetWriterInput markAsFinished];
261        /// break;
262        /// }
263        /// }
264        /// }];
265        /// ```
266        /// This method is not recommended for use with a push-style buffer source, such as AVCaptureAudioDataOutput or AVCaptureVideoDataOutput, because such a combination will likely require intermediate queueing of buffers. Instead, this method is better suited to a pull-style buffer source such as AVAssetReaderOutput, as illustrated in the above example.
267        ///
268        /// When using a push-style buffer source, it is generally better to immediately append each buffer to the AVAssetWriterInput, directly via -[AVAssetWriter appendSampleBuffer:], as it is received. Using this strategy, it is often possible to avoid having to queue up buffers in between the buffer source and the AVAssetWriterInput. Note that many of these push-style buffer sources also produce buffers in real-time, in which case the client should set expectsMediaDataInRealTime to YES.
269        ///
270        /// Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
271        ///
272        /// This method throws an exception if this method is called more than once.
273        ///
274        /// - Parameter queue: The queue on which the block should be invoked.
275        /// - Parameter block: The block the input should invoke to obtain media data.
276        ///
277        /// # Safety
278        ///
279        /// - `queue` possibly has additional threading requirements.
280        /// - `block` block must be sendable.
281        #[unsafe(method(requestMediaDataWhenReadyOnQueue:usingBlock:))]
282        #[unsafe(method_family = none)]
283        pub unsafe fn requestMediaDataWhenReadyOnQueue_usingBlock(
284            &self,
285            queue: &DispatchQueue,
286            block: &block2::DynBlock<dyn Fn()>,
287        );
288
289        #[cfg(feature = "objc2-core-media")]
290        /// Appends samples to the receiver.
291        ///
292        /// The timing information in the sample buffer, considered relative to the time passed to -[AVAssetWriter startSessionAtSourceTime:], will be used to determine the timing of those samples in the output file.
293        ///
294        /// For track types other than audio tracks, to determine the duration of all samples in the output file other than the very last sample that's appended, the difference between the sample buffer's output DTS and the following sample buffer's output DTS will be used. The duration of the last sample is determined as follows:
295        /// 1. If a marker sample buffer with kCMSampleBufferAttachmentKey_EndsPreviousSampleDuration is appended following the last media-bearing sample, the difference between the output DTS of the marker sample buffer and the output DTS of the last media-bearing sample will be used.
296        /// 2. If the marker sample buffer is not provided and if the output duration of the last media-bearing sample is valid, it will be used.
297        /// 3. if the output duration of the last media-bearing sample is not valid, the duration of the second-to-last sample will be used.
298        ///
299        /// For audio tracks, the properties of each appended sample buffer are used to determine corresponding output durations.
300        ///
301        /// The receiver will retain the CMSampleBuffer until it is done with it, and then release it. Do not modify a CMSampleBuffer or its contents after you have passed it to this method.
302        ///
303        /// If the sample buffer contains audio data and the AVAssetWriterInput was intialized with an outputSettings dictionary then the format must be linear PCM. If the outputSettings dictionary was nil then audio data can be provided in a compressed format, and it will be passed through to the output without any re-compression. Note that advanced formats like AAC will have encoder delay present in their bitstreams. This data is inserted by the encoder and is necessary for proper decoding, but it is not meant to be played back. Clients who provide compressed audio bitstreams must use kCMSampleBufferAttachmentKey_TrimDurationAtStart to mark the encoder delay (generally restricted to the first sample buffer). Packetization can cause there to be extra audio frames in the last packet which are not meant to be played back. These remainder frames should be marked with kCMSampleBufferAttachmentKey_TrimDurationAtEnd. CMSampleBuffers obtained from AVAssetReader will already have the necessary trim attachments. Please see http://developer.apple.com/mac/library/technotes/tn2009/tn2258.html for more information about encoder delay. When attaching trims make sure that the output PTS of the sample buffer is what you expect. For example if you called -[AVAssetWriter startSessionAtSourceTime:kCMTimeZero] and you want your audio to start at time zero in the output file then make sure that the output PTS of the first non-fully trimmed audio sample buffer is kCMTimeZero.
304        ///
305        /// If the sample buffer contains a CVPixelBuffer then the choice of pixel format will affect the performance and quality of the encode. For optimal performance the format of the pixel buffer should match one of the native formats supported by the selected video encoder. Below are some recommendations:
306        ///
307        /// The H.264 and HEVC encoders natively support kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange and kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, which should be used with 8-bit 4:2:0 video and full range input respectively; other related pixel formats in CoreVideo/CVPixelBuffer.h are ideal for 4:2:2 and 4:4:4 (and for HEVC, 10-bit). The JPEG encoder on iOS and Apple Silicon macOS natively supports kCVPixelFormatType_422YpCbCr8FullRange. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended on iOS and macOS.
308        ///
309        /// Pixel buffers not in a natively supported format will be converted internally prior to encoding when possible. Pixel format conversions within the same range (video or full) are generally faster than conversions between different ranges.
310        ///
311        /// The ProRes encoders can preserve high bit depth sources, supporting up to 12bits/ch. ProRes 4444 can contain a mathematically lossless alpha channel and it doesn't do any chroma subsampling. This makes ProRes 4444 ideal for quality critical applications. If you are working with 8bit sources ProRes is also a good format to use due to its high image quality. Use either of the recommended pixel formats above. Note that RGB pixel formats by definition have 4:4:4 chroma sampling.
312        ///
313        /// If you are working with high bit depth sources the following yuv pixel formats are recommended when encoding to ProRes: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, and kCVPixelFormatType_422YpCbCr10. When working in the RGB domain kCVPixelFormatType_64ARGB is recommended. Scaling and color matching are not currently supported when using AVAssetWriter with any of these high bit depth pixel formats. Please make sure that your track's output settings dictionary specifies the same width and height as the buffers you will be appending. Do not include AVVideoScalingModeKey or AVVideoColorPropertiesKey.
314        ///
315        /// As of macOS 10.10 and iOS 8.0, this method can be used to add sample buffers that reference existing data in a file instead of containing media data to be appended to the file. This can be used to generate tracks that are not self-contained. In order to append such a sample reference to the track create a CMSampleBufferRef with a NULL dataBuffer and dataReady set to true and set the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachments on the sample buffer. Further documentation on how to create such a "sample reference" sample buffer can be found in the description of the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachment keys in the CMSampleBuffer documentation.
316        ///
317        /// Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
318        ///
319        /// This method throws an exception if the sample buffer's media type does not match the asset writer input's media type.
320        ///
321        /// - Parameter sampleBuffer: The CMSampleBuffer to be appended.
322        ///
323        /// - Returns: A BOOL value indicating success of appending the sample buffer. If a result of NO is returned, clients can check the value of AVAssetWriter.status to determine whether the writing operation completed, failed, or was cancelled.  If the status is AVAssetWriterStatusFailed, AVAsset.error will contain an instance of NSError that describes the failure.
324        #[unsafe(method(appendSampleBuffer:))]
325        #[unsafe(method_family = none)]
326        pub unsafe fn appendSampleBuffer(&self, sample_buffer: &CMSampleBuffer) -> bool;
327
328        /// Indicates to the AVAssetWriter that no more buffers will be appended to this input.
329        ///
330        /// Clients that are monitoring each input's readyForMoreMediaData value must call markAsFinished on an input when they are done appending buffers to it. This is necessary to prevent other inputs from stalling, as they may otherwise wait forever for that input's media data, attempting to complete the ideal interleaving pattern.
331        ///
332        /// After invoking this method from the serial queue passed to -requestMediaDataWhenReadyOnQueue:usingBlock:, the receiver is guaranteed to issue no more invocations of the block passed to that method. The same is true of -respondToEachPassDescriptionOnQueue:usingBlock:.
333        ///
334        /// Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
335        #[unsafe(method(markAsFinished))]
336        #[unsafe(method_family = none)]
337        pub unsafe fn markAsFinished(&self);
338    );
339}
340
341/// AVAssetWriterInputLanguageProperties.
342impl AVAssetWriterInput {
343    extern_methods!(
344        /// Indicates the language to associate with the track corresponding to the receiver, as an ISO 639-2/T language code; can be nil.
345        ///
346        /// Also see extendedLanguageTag below.
347        ///
348        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
349        ///
350        /// This property throws an exception if a language code is set which does not conform to the ISO 639-2/T language codes.
351        #[unsafe(method(languageCode))]
352        #[unsafe(method_family = none)]
353        pub unsafe fn languageCode(&self) -> Option<Retained<NSString>>;
354
355        /// Setter for [`languageCode`][Self::languageCode].
356        ///
357        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
358        #[unsafe(method(setLanguageCode:))]
359        #[unsafe(method_family = none)]
360        pub unsafe fn setLanguageCode(&self, language_code: Option<&NSString>);
361
362        /// Indicates the language tag to associate with the track corresponding to the receiver, as an IETF BCP 47 (RFC 4646) language identifier; can be nil.
363        ///
364        /// Extended language tags are normally set only when an ISO 639-2/T language code by itself is ambiguous, as in cases in which media data should be distinguished not only by language but also by the regional dialect in use or the writing system employed.
365        ///
366        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
367        ///
368        /// This property throws an exception if an extended language tag is set which does not conform to the IETF BCP 47 (RFC 4646) language identifiers.
369        #[unsafe(method(extendedLanguageTag))]
370        #[unsafe(method_family = none)]
371        pub unsafe fn extendedLanguageTag(&self) -> Option<Retained<NSString>>;
372
373        /// Setter for [`extendedLanguageTag`][Self::extendedLanguageTag].
374        ///
375        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
376        #[unsafe(method(setExtendedLanguageTag:))]
377        #[unsafe(method_family = none)]
378        pub unsafe fn setExtendedLanguageTag(&self, extended_language_tag: Option<&NSString>);
379    );
380}
381
382/// AVAssetWriterInputPropertiesForVisualCharacteristic.
383impl AVAssetWriterInput {
384    extern_methods!(
385        #[cfg(feature = "objc2-core-foundation")]
386        /// The size specified in the output file as the natural dimensions of the visual media data for display purposes.
387        ///
388        /// If the default value, CGSizeZero, is specified, the naturalSize of the track corresponding to the receiver is set according to dimensions indicated by the format descriptions that are ultimately written to the output track.
389        ///
390        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
391        #[unsafe(method(naturalSize))]
392        #[unsafe(method_family = none)]
393        pub unsafe fn naturalSize(&self) -> CGSize;
394
395        #[cfg(feature = "objc2-core-foundation")]
396        /// Setter for [`naturalSize`][Self::naturalSize].
397        #[unsafe(method(setNaturalSize:))]
398        #[unsafe(method_family = none)]
399        pub unsafe fn setNaturalSize(&self, natural_size: CGSize);
400
401        #[cfg(feature = "objc2-core-foundation")]
402        /// The transform specified in the output file as the preferred transformation of the visual media data for display purposes.
403        ///
404        /// If no value is specified, the identity transform is used.
405        ///
406        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
407        #[unsafe(method(transform))]
408        #[unsafe(method_family = none)]
409        pub unsafe fn transform(&self) -> CGAffineTransform;
410
411        #[cfg(feature = "objc2-core-foundation")]
412        /// Setter for [`transform`][Self::transform].
413        #[unsafe(method(setTransform:))]
414        #[unsafe(method_family = none)]
415        pub unsafe fn setTransform(&self, transform: CGAffineTransform);
416    );
417}
418
419/// AVAssetWriterInputPropertiesForAudibleCharacteristic.
420impl AVAssetWriterInput {
421    extern_methods!(
422        /// The preferred volume level to be stored in the output file.
423        ///
424        /// The value for this property should typically be in the range of 0.0 to 1.0. The default value is 1.0, which is equivalent to a "normal" volume level for audio media type. For all other media types the default value is 0.0.
425        ///
426        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
427        #[unsafe(method(preferredVolume))]
428        #[unsafe(method_family = none)]
429        pub unsafe fn preferredVolume(&self) -> c_float;
430
431        /// Setter for [`preferredVolume`][Self::preferredVolume].
432        #[unsafe(method(setPreferredVolume:))]
433        #[unsafe(method_family = none)]
434        pub unsafe fn setPreferredVolume(&self, preferred_volume: c_float);
435    );
436}
437
438/// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputmediadatalocation?language=objc)
439// NS_TYPED_ENUM
440pub type AVAssetWriterInputMediaDataLocation = NSString;
441
442extern "C" {
443    /// Indicates that the media data should be interleaved with all other media data with this constant.
444    ///
445    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputmediadatalocationinterleavedwithmainmediadata?language=objc)
446    pub static AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData:
447        &'static AVAssetWriterInputMediaDataLocation;
448}
449
450extern "C" {
451    /// Indicates that the media data should be laid out before all the media data with AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData and not be interleaved.
452    ///
453    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputmediadatalocationbeforemainmediadatanotinterleaved?language=objc)
454    pub static AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved:
455        &'static AVAssetWriterInputMediaDataLocation;
456}
457
458extern "C" {
459    /// Indicates that there may be large segments of time without any media data from this track. When mediaDataLocation is set to this value, AVAssetWriter will interleave the media data, but will not wait for media data from this track to achieve tight interleaving with other tracks.
460    ///
461    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputmediadatalocationsparselyinterleavedwithmainmediadata?language=objc)
462    pub static AVAssetWriterInputMediaDataLocationSparselyInterleavedWithMainMediaData:
463        &'static AVAssetWriterInputMediaDataLocation;
464}
465
466/// AVAssetWriterInputFileTypeSpecificProperties.
467impl AVAssetWriterInput {
468    extern_methods!(
469        /// For file types that support enabled and disabled tracks, such as QuickTime Movie files, specifies whether the track corresponding to the receiver should be enabled by default for playback and processing. The default value is YES.
470        ///
471        /// When an input group is added to an AVAssetWriter (see -[AVAssetWriter addInputGroup:]), the value of marksOutputTrackAsEnabled will automatically be set to YES for the default input and set to NO for all of the other inputs in the group. In this case, if a new value is set on this property then an exception will be raised.
472        ///
473        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
474        ///
475        /// This property throws an exception if a value is set on an asset writer input that is contained in an input group.
476        #[unsafe(method(marksOutputTrackAsEnabled))]
477        #[unsafe(method_family = none)]
478        pub unsafe fn marksOutputTrackAsEnabled(&self) -> bool;
479
480        /// Setter for [`marksOutputTrackAsEnabled`][Self::marksOutputTrackAsEnabled].
481        #[unsafe(method(setMarksOutputTrackAsEnabled:))]
482        #[unsafe(method_family = none)]
483        pub unsafe fn setMarksOutputTrackAsEnabled(&self, marks_output_track_as_enabled: bool);
484
485        #[cfg(feature = "objc2-core-media")]
486        /// For file types that support media time scales, such as QuickTime Movie files, specifies the media time scale to be used.
487        ///
488        /// The default value is 0, which indicates that the receiver should choose a convenient value, if applicable. It is an error to set a value other than 0 if the receiver has media type AVMediaTypeAudio.
489        ///
490        /// This property cannot be set after writing has started.
491        ///
492        /// This property throws an exception if a value is set on an asset writer input with media type AVMediaTypeAudio.
493        #[unsafe(method(mediaTimeScale))]
494        #[unsafe(method_family = none)]
495        pub unsafe fn mediaTimeScale(&self) -> CMTimeScale;
496
497        #[cfg(feature = "objc2-core-media")]
498        /// Setter for [`mediaTimeScale`][Self::mediaTimeScale].
499        #[unsafe(method(setMediaTimeScale:))]
500        #[unsafe(method_family = none)]
501        pub unsafe fn setMediaTimeScale(&self, media_time_scale: CMTimeScale);
502
503        #[cfg(feature = "objc2-core-media")]
504        /// For file types that support media chunk duration, such as QuickTime Movie files, specifies the duration to be used for each chunk of sample data in the output file.
505        ///
506        /// Chunk duration can influence the granularity of the I/O performed when reading a media file, e.g. during playback. A larger chunk duration can result in fewer reads from disk, at the potential expense of a higher memory footprint.
507        ///
508        /// A "chunk" contains one or more samples. The total duration of the samples in a chunk is no greater than this preferred chunk duration, or the duration of a single sample if the sample's duration is greater than this preferred chunk duration.
509        ///
510        /// The default value is kCMTimeInvalid, which means that the receiver will choose an appropriate default value.
511        ///
512        /// This property cannot be set after -startWriting has been called on the receiver.
513        ///
514        /// This property throws an exception if a duration is set which is non-numeric or non-positive (see CMTIME_IS_NUMERIC).
515        #[unsafe(method(preferredMediaChunkDuration))]
516        #[unsafe(method_family = none)]
517        pub unsafe fn preferredMediaChunkDuration(&self) -> CMTime;
518
519        #[cfg(feature = "objc2-core-media")]
520        /// Setter for [`preferredMediaChunkDuration`][Self::preferredMediaChunkDuration].
521        #[unsafe(method(setPreferredMediaChunkDuration:))]
522        #[unsafe(method_family = none)]
523        pub unsafe fn setPreferredMediaChunkDuration(&self, preferred_media_chunk_duration: CMTime);
524
525        /// For file types that support media chunk alignment, such as QuickTime Movie files, specifies the boundary for media chunk alignment in bytes (e.g. 512).
526        ///
527        /// The default value is 0, which means that the receiver will choose an appropriate default value. A value of 1 implies that no padding should be used to achieve a particular chunk alignment. It is an error to set a negative value for chunk alignment.
528        ///
529        /// This property cannot be set after -startWriting has been called on the receiver.
530        #[unsafe(method(preferredMediaChunkAlignment))]
531        #[unsafe(method_family = none)]
532        pub unsafe fn preferredMediaChunkAlignment(&self) -> NSInteger;
533
534        /// Setter for [`preferredMediaChunkAlignment`][Self::preferredMediaChunkAlignment].
535        #[unsafe(method(setPreferredMediaChunkAlignment:))]
536        #[unsafe(method_family = none)]
537        pub unsafe fn setPreferredMediaChunkAlignment(
538            &self,
539            preferred_media_chunk_alignment: NSInteger,
540        );
541
542        /// For file types that support writing sample references, such as QuickTime Movie files, specifies the base URL sample references are relative to.
543        ///
544        /// If the value of this property can be resolved as an absolute URL, the sample locations written to the file when appending sample references will be relative to this URL. The URL must point to a location that is in a directory that is a parent of the sample reference location.
545        ///
546        /// Usage example:
547        ///
548        /// Setting the sampleReferenceBaseURL property to "file:///User/johnappleseed/Movies/" and appending sample buffers with the kCMSampleBufferAttachmentKey_SampleReferenceURL attachment set to "file:///User/johnappleseed/Movies/data/movie1.mov" will cause the sample reference "data/movie1.mov" to be written to the movie.
549        ///
550        /// If the value of the property cannot be resolved as an absolute URL or if it points to a location that is not in a parent directory of the sample reference location, the location referenced in the sample buffer will be written unmodified.
551        ///
552        /// The default value is nil, which means that the location referenced in the sample buffer will be written unmodified.
553        ///
554        /// This property cannot be set after -startWriting has been called on the receiver.
555        #[unsafe(method(sampleReferenceBaseURL))]
556        #[unsafe(method_family = none)]
557        pub unsafe fn sampleReferenceBaseURL(&self) -> Option<Retained<NSURL>>;
558
559        /// Setter for [`sampleReferenceBaseURL`][Self::sampleReferenceBaseURL].
560        ///
561        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
562        #[unsafe(method(setSampleReferenceBaseURL:))]
563        #[unsafe(method_family = none)]
564        pub unsafe fn setSampleReferenceBaseURL(&self, sample_reference_base_url: Option<&NSURL>);
565
566        /// Specifies where the media data will be laid out and whether the media data will be interleaved as the main media data.
567        ///
568        /// If this value is set to AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved, AVAssetWriter tries to write the media data for this track before all the media data for AVAssetWriterInputs with this property set to AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData.
569        ///
570        /// Use of this property is recommended for optimizing tracks that contain a small amount of data that is needed all at once, independent of playback time, such as chapter name tracks and chapter image tracks.
571        /// Keep it set to AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData for tracks whose media data that's needed only as its presentation time is approaching and, when multiple inputs are present that supply media data that will be played concurrently, should be interleaved for optimal access.
572        ///
573        /// For file types that support preloading media data such as QuickTime movie file, if this value is set to AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved, AVAssetWriter will write an indication such as 'load' atom that the whole media data should be preloaded.
574        ///
575        /// The default value is AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData, which means that the receiver will not write the indication and that the media data will be interleaved.
576        ///
577        /// This property cannot be set after -startWriting has been called on the receiver.
578        #[unsafe(method(mediaDataLocation))]
579        #[unsafe(method_family = none)]
580        pub unsafe fn mediaDataLocation(&self) -> Retained<AVAssetWriterInputMediaDataLocation>;
581
582        /// Setter for [`mediaDataLocation`][Self::mediaDataLocation].
583        ///
584        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
585        #[unsafe(method(setMediaDataLocation:))]
586        #[unsafe(method_family = none)]
587        pub unsafe fn setMediaDataLocation(
588            &self,
589            media_data_location: &AVAssetWriterInputMediaDataLocation,
590        );
591    );
592}
593
594/// AVAssetWriterInputTrackAssociations.
595impl AVAssetWriterInput {
596    extern_methods!(
597        /// Tests whether an association between the tracks corresponding to a pair of inputs is valid.
598        ///
599        /// If the type of association requires tracks of specific media types that don't match the media types of the inputs, or if the output file type does not support track associations, -canAddTrackAssociationWithTrackOfInput:type: will return NO.
600        ///
601        /// - Parameter input: The instance of AVAssetWriterInput with a corresponding track to associate with track corresponding with the receiver.
602        /// - Parameter trackAssociationType: The type of track association to test. Common track association types, such as AVTrackAssociationTypeTimecode, are defined in AVAssetTrack.h.
603        #[unsafe(method(canAddTrackAssociationWithTrackOfInput:type:))]
604        #[unsafe(method_family = none)]
605        pub unsafe fn canAddTrackAssociationWithTrackOfInput_type(
606            &self,
607            input: &AVAssetWriterInput,
608            track_association_type: &NSString,
609        ) -> bool;
610
611        /// Associates the track corresponding to the specified input with the track corresponding with the receiver.
612        ///
613        /// If the type of association requires tracks of specific media types that don't match the media types of the inputs, or if the output file type does not support track associations, an NSInvalidArgumentException is raised.
614        ///
615        /// Track associations cannot be added after writing on the receiver's AVAssetWriter has started.
616        ///
617        /// This method throws an exception if the input and track association type cannot be added (see -canAddTrackAssociationWithTrackOfInput:type:).
618        ///
619        /// - Parameter input: The instance of AVAssetWriterInput with a corresponding track to associate with track corresponding to the receiver.
620        /// - Parameter trackAssociationType: The type of track association to add. Common track association types, such as AVTrackAssociationTypeTimecode, are defined in AVAssetTrack.h.
621        #[unsafe(method(addTrackAssociationWithTrackOfInput:type:))]
622        #[unsafe(method_family = none)]
623        pub unsafe fn addTrackAssociationWithTrackOfInput_type(
624            &self,
625            input: &AVAssetWriterInput,
626            track_association_type: &NSString,
627        );
628    );
629}
630
631/// AVAssetWriterInputMultiPass.
632impl AVAssetWriterInput {
633    extern_methods!(
634        /// Indicates whether the input should attempt to encode the source media data using multiple passes.
635        ///
636        /// The input may be able to achieve higher quality and/or lower data rate by performing multiple passes over the source media. It does this by analyzing the media data that has been appended and re-encoding certain segments with different parameters. In order to do this re-encoding, the media data for these segments must be appended again. See -markCurrentPassAsFinished and the property currentPassDescription for the mechanism by which the input nominates segments for re-appending.
637        ///
638        /// When the value of this property is YES, the value of readyForMoreMediaData for other inputs attached to the same AVAssetWriter may be NO more often and/or for longer periods of time. In particular, the value of readyForMoreMediaData for inputs that do not (or cannot) perform multiple passes may start out as NO after -[AVAssetWriter startWriting] has been called and may not change to YES until after all multi-pass inputs have completed their final pass.
639        ///
640        /// When the value of this property is YES, the input may store data in one or more temporary files before writing compressed samples to the output file. Use the AVAssetWriter property directoryForTemporaryFiles if you need to control the location of temporary file writing.
641        ///
642        /// The default value is NO, meaning that no additional analysis will occur and no segments will be re-encoded. Not all asset writer input configurations (for example, inputs configured with certain media types or to use certain encoders) can benefit from performing multiple passes over the source media. To determine whether the selected encoder can perform multiple passes, query the value of canPerformMultiplePasses after calling -startWriting.
643        ///
644        /// For best results, do not set both this property and expectsMediaDataInRealTime to YES.
645        ///
646        /// This property cannot be set after writing on the receiver's AVAssetWriter has started.
647        #[unsafe(method(performsMultiPassEncodingIfSupported))]
648        #[unsafe(method_family = none)]
649        pub unsafe fn performsMultiPassEncodingIfSupported(&self) -> bool;
650
651        /// Setter for [`performsMultiPassEncodingIfSupported`][Self::performsMultiPassEncodingIfSupported].
652        #[unsafe(method(setPerformsMultiPassEncodingIfSupported:))]
653        #[unsafe(method_family = none)]
654        pub unsafe fn setPerformsMultiPassEncodingIfSupported(
655            &self,
656            performs_multi_pass_encoding_if_supported: bool,
657        );
658
659        /// Indicates whether the input might perform multiple passes over appended media data.
660        ///
661        /// When the value for this property is YES, your source for media data should be configured for random access. After appending all of the media data for the current pass (as specified by the currentPassDescription property), call -markCurrentPassAsFinished to start the process of determining whether additional passes are needed. Note that it is still possible in this case for the input to perform only the initial pass, if it determines that there will be no benefit to performing multiple passes.
662        ///
663        /// When the value for this property is NO, your source for media data only needs to support sequential access. In this case, append all of the source media once and call -markAsFinished.
664        ///
665        /// In the default configuration of AVAssetWriterInput, the value for this property will be NO. Currently the only way for this property to become YES is when performsMultiPassEncodingIfSupported has been set to YES. The final value will be available after -startWriting is called, when a specific encoder has been choosen.
666        ///
667        /// This property is key-value observable.
668        #[unsafe(method(canPerformMultiplePasses))]
669        #[unsafe(method_family = none)]
670        pub unsafe fn canPerformMultiplePasses(&self) -> bool;
671
672        /// Provides an object that describes the requirements, such as source time ranges to append or re-append, for the current pass.
673        ///
674        /// If the value of this property is nil, it means there is no request to be fulfilled and -markAsFinished should be called on the asset writer input.
675        ///
676        /// During the first pass, the request will contain a single time range from zero to positive infinity, indicating that all media from the source should be appended. This will also be true when canPerformMultiplePasses is NO, in which case only one pass will be performed.
677        ///
678        /// The value of this property will be nil before -startWriting is called on the attached asset writer. It will transition to an initial non-nil value during the call to -startWriting. After that, the value of this property will change only after a call to -markCurrentPassAsFinished. For an easy way to be notified at the beginning of each pass, see -respondToEachPassDescriptionOnQueue:usingBlock:.
679        ///
680        /// This property is key-value observable. Observers should not assume that they will be notified of changes on a specific thread.
681        #[unsafe(method(currentPassDescription))]
682        #[unsafe(method_family = none)]
683        pub unsafe fn currentPassDescription(
684            &self,
685        ) -> Option<Retained<AVAssetWriterInputPassDescription>>;
686
687        #[cfg(feature = "dispatch2")]
688        /// Instructs the receiver to invoke a client-supplied block whenever a new pass has begun.
689        ///
690        /// A typical block passed to this method will perform the following steps:
691        ///
692        /// 1. Query the value of the receiver's currentPassDescription property and reconfigure the source of media data (e.g. AVAssetReader) accordingly
693        /// 2. Call -requestMediaDataWhenReadyOnQueue:usingBlock: to begin appending data for the current pass
694        /// 3. Exit
695        ///
696        /// When all media data has been appended for the current request, call -markCurrentPassAsFinished to begin the process of determining whether an additional pass is warranted. If an additional pass is warranted, the block passed to this method will be invoked to begin the next pass. If no additional passes are needed, the block passed to this method will be invoked one final time so the client can invoke -markAsFinished in response to the value of currentPassDescription becoming nil.
697        ///
698        /// Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
699        ///
700        /// This method throws an exception if called more than once.
701        ///
702        /// - Parameter queue: The queue on which the block should be invoked.
703        /// - Parameter block: A block the receiver should invoke whenever a new pass has begun.
704        ///
705        /// # Safety
706        ///
707        /// - `queue` possibly has additional threading requirements.
708        /// - `block` must be a valid pointer.
709        #[unsafe(method(respondToEachPassDescriptionOnQueue:usingBlock:))]
710        #[unsafe(method_family = none)]
711        pub unsafe fn respondToEachPassDescriptionOnQueue_usingBlock(
712            &self,
713            queue: &DispatchQueue,
714            block: dispatch_block_t,
715        );
716
717        /// Instructs the receiver to analyze the media data that has been appended and determine whether the results could be improved by re-encoding certain segments.
718        ///
719        /// When the value of canPerformMultiplePasses is YES, call this method after you have appended all of your media data. After the receiver analyzes whether an additional pass is warranted, the value of currentPassDescription will change (usually asynchronously) to describe how to set up for the next pass. Although it is possible to use key-value observing to determine when the value of currentPassDescription has changed, it is typically more convenient to invoke -respondToEachPassDescriptionOnQueue:usingBlock: in order to start the work for each pass.
720        ///
721        /// After re-appending the media data for all of the time ranges of the new pass, call this method again to determine whether additional segments should be re-appended in another pass.
722        ///
723        /// Calling this method effectively cancels any previous invocation of -requestMediaDataWhenReadyOnQueue:usingBlock:, meaning that -requestMediaDataWhenReadyOnQueue:usingBlock: can be invoked again for each new pass. -respondToEachPassDescriptionOnQueue:usingBlock: provides a convenient way to consolidate these invocations in your code.
724        ///
725        /// After each pass, you have the option of keeping the most recent results by calling -markAsFinished instead of this method. If the value of currentPassDescription is nil at the beginning of a pass, call -markAsFinished to tell the receiver to not expect any further media data.
726        ///
727        /// If the value of canPerformMultiplePasses is NO, the value of currentPassDescription will immediately become nil after calling this method.
728        ///
729        /// Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
730        #[unsafe(method(markCurrentPassAsFinished))]
731        #[unsafe(method_family = none)]
732        pub unsafe fn markCurrentPassAsFinished(&self);
733    );
734}
735
736extern_class!(
737    /// Defines an interface for querying information about the requirements of the current pass, such as the time ranges of media data to append.
738    ///
739    /// Subclasses of this type that are used from Swift must fulfill the requirements of a Sendable type.
740    ///
741    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputpassdescription?language=objc)
742    #[unsafe(super(NSObject))]
743    #[derive(Debug, PartialEq, Eq, Hash)]
744    pub struct AVAssetWriterInputPassDescription;
745);
746
747unsafe impl Send for AVAssetWriterInputPassDescription {}
748
749unsafe impl Sync for AVAssetWriterInputPassDescription {}
750
751extern_conformance!(
752    unsafe impl NSObjectProtocol for AVAssetWriterInputPassDescription {}
753);
754
755impl AVAssetWriterInputPassDescription {
756    extern_methods!(
757        #[unsafe(method(init))]
758        #[unsafe(method_family = init)]
759        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
760
761        #[unsafe(method(new))]
762        #[unsafe(method_family = new)]
763        pub unsafe fn new() -> Retained<Self>;
764
765        /// An NSArray of NSValue objects wrapping CMTimeRange structures, each representing one source time range.
766        ///
767        /// The value of this property is suitable for using as a parameter for -[AVAssetReaderOutput resetForReadingTimeRanges:].
768        ///
769        /// This property is not atomic.
770        ///
771        /// # Safety
772        ///
773        /// This might not be thread-safe.
774        #[unsafe(method(sourceTimeRanges))]
775        #[unsafe(method_family = none)]
776        pub unsafe fn sourceTimeRanges(&self) -> Retained<NSArray<NSValue>>;
777    );
778}
779
780extern_class!(
781    /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputpixelbufferadaptor?language=objc)
782    #[unsafe(super(NSObject))]
783    #[derive(Debug, PartialEq, Eq, Hash)]
784    pub struct AVAssetWriterInputPixelBufferAdaptor;
785);
786
787extern_conformance!(
788    unsafe impl NSObjectProtocol for AVAssetWriterInputPixelBufferAdaptor {}
789);
790
791impl AVAssetWriterInputPixelBufferAdaptor {
792    extern_methods!(
793        #[unsafe(method(init))]
794        #[unsafe(method_family = init)]
795        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
796
797        #[unsafe(method(new))]
798        #[unsafe(method_family = new)]
799        pub unsafe fn new() -> Retained<Self>;
800
801        /// Creates a new pixel buffer adaptor to receive pixel buffers for writing to the output file.
802        ///
803        /// In order to take advantage of the improved efficiency of appending buffers created from the adaptor's pixel buffer pool, clients should specify pixel buffer attributes that most closely accommodate the source format of the video frames being appended.
804        ///
805        /// Pixel buffer attributes keys for the pixel buffer pool are defined in
806        /// <CoreVideo
807        /// /CVPixelBuffer.h>. To specify the pixel format type, the pixelBufferAttributes dictionary should contain a value for kCVPixelBufferPixelFormatTypeKey. For example, use [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] for 8-bit-per-channel BGRA. See the discussion under appendPixelBuffer:withPresentationTime: for advice on choosing a pixel format.
808        ///
809        /// Clients that do not need a pixel buffer pool for allocating buffers should set sourcePixelBufferAttributes to nil.
810        ///
811        /// This method throws an exception if the input is already attached to another asset writer input pixel buffer adaptor or if the input has already started writing (the asset writer has progressed beyond AVAssetWriterStatusUnknown).
812        ///
813        /// - Parameter input: An instance of AVAssetWriterInput to which the receiver should append pixel buffers. Currently, only asset writer inputs that accept media data of type AVMediaTypeVideo can be used to initialize a pixel buffer adaptor.
814        /// - Parameter sourcePixelBufferAttributes: Specifies the attributes of pixel buffers that will be vended by the input's CVPixelBufferPool.
815        ///
816        /// - Returns: An instance of AVAssetWriterInputPixelBufferAdaptor.
817        ///
818        /// # Safety
819        ///
820        /// `source_pixel_buffer_attributes` generic should be of the correct type.
821        #[unsafe(method(assetWriterInputPixelBufferAdaptorWithAssetWriterInput:sourcePixelBufferAttributes:))]
822        #[unsafe(method_family = none)]
823        pub unsafe fn assetWriterInputPixelBufferAdaptorWithAssetWriterInput_sourcePixelBufferAttributes(
824            input: &AVAssetWriterInput,
825            source_pixel_buffer_attributes: Option<&NSDictionary<NSString, AnyObject>>,
826        ) -> Retained<Self>;
827
828        /// Creates a new pixel buffer adaptor to receive pixel buffers for writing to the output file.
829        ///
830        /// In order to take advantage of the improved efficiency of appending buffers created from the adaptor's pixel buffer pool, clients should specify pixel buffer attributes that most closely accommodate the source format of the video frames being appended.
831        ///
832        /// Pixel buffer attributes keys for the pixel buffer pool are defined in
833        /// <CoreVideo
834        /// /CVPixelBuffer.h>. To specify the pixel format type, the pixelBufferAttributes dictionary should contain a value for kCVPixelBufferPixelFormatTypeKey. For example, use [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] for 8-bit-per-channel BGRA. See the discussion under appendPixelBuffer:withPresentationTime: for advice on choosing a pixel format.
835        ///
836        /// Clients that do not need a pixel buffer pool for allocating buffers should set sourcePixelBufferAttributes to nil.
837        ///
838        /// This method throws an exception if the input is already attached to another asset writer input pixel buffer adaptor or if the input has already started writing (the asset writer has progressed beyond AVAssetWriterStatusUnknown).
839        ///
840        /// - Parameter input: An instance of AVAssetWriterInput to which the receiver should append pixel buffers. Currently, only asset writer inputs that accept media data of type AVMediaTypeVideo can be used to initialize a pixel buffer adaptor.
841        /// - Parameter sourcePixelBufferAttributes: Specifies the attributes of pixel buffers that will be vended by the input's CVPixelBufferPool.
842        ///
843        /// - Returns: An instance of AVAssetWriterInputPixelBufferAdaptor.
844        ///
845        /// # Safety
846        ///
847        /// `source_pixel_buffer_attributes` generic should be of the correct type.
848        #[unsafe(method(initWithAssetWriterInput:sourcePixelBufferAttributes:))]
849        #[unsafe(method_family = init)]
850        pub unsafe fn initWithAssetWriterInput_sourcePixelBufferAttributes(
851            this: Allocated<Self>,
852            input: &AVAssetWriterInput,
853            source_pixel_buffer_attributes: Option<&NSDictionary<NSString, AnyObject>>,
854        ) -> Retained<Self>;
855
856        /// The asset writer input to which the receiver should append pixel buffers.
857        #[unsafe(method(assetWriterInput))]
858        #[unsafe(method_family = none)]
859        pub unsafe fn assetWriterInput(&self) -> Retained<AVAssetWriterInput>;
860
861        /// The pixel buffer attributes of pixel buffers that will be vended by the receiver's CVPixelBufferPool.
862        ///
863        /// The value of this property is a dictionary containing pixel buffer attributes keys defined in
864        /// <CoreVideo
865        /// /CVPixelBuffer.h>.
866        #[unsafe(method(sourcePixelBufferAttributes))]
867        #[unsafe(method_family = none)]
868        pub unsafe fn sourcePixelBufferAttributes(
869            &self,
870        ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
871
872        #[cfg(feature = "objc2-core-video")]
873        /// A pixel buffer pool that will vend and efficiently recycle CVPixelBuffer objects that can be appended to the receiver.
874        ///
875        /// For maximum efficiency, clients should create CVPixelBuffer objects for appendPixelBuffer:withPresentationTime: by using this pool with the CVPixelBufferPoolCreatePixelBuffer() function.
876        ///
877        /// The value of this property will be NULL before -[AVAssetWriter startWriting] is called on the associated AVAssetWriter object.
878        ///
879        /// This property is key value observable.
880        ///
881        /// This property throws an exception if a pixel buffer pool cannot be created with this asset writer input pixel buffer adaptor's source pixel buffer attributes (must specify width, height, and either pixel format or pixel format description).
882        #[unsafe(method(pixelBufferPool))]
883        #[unsafe(method_family = none)]
884        pub unsafe fn pixelBufferPool(&self) -> Option<Retained<CVPixelBufferPool>>;
885
886        #[cfg(all(feature = "objc2-core-media", feature = "objc2-core-video"))]
887        /// Appends a pixel buffer to the receiver.
888        ///
889        /// The receiver will retain the CVPixelBuffer until it is done with it, and then release it. Do not modify a CVPixelBuffer or its contents after you have passed it to this method.
890        ///
891        /// For optimal performance the format of the pixel buffer should match one of the native formats supported by the selected video encoder. Below are some recommendations:
892        ///
893        /// The H.264 and HEVC encoders natively support kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange and kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, which should be used with 8-bit 4:2:0 video and full range input respectively; other related pixel formats in CoreVideo/CVPixelBuffer.h are ideal for 4:2:2 and 4:4:4 (and for HEVC, 10-bit). The JPEG encoder on iOS and Apple Silicon macOS natively supports kCVPixelFormatType_422YpCbCr8FullRange. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended on iOS and macOS.
894        ///
895        /// Pixel buffers not in a natively supported format will be converted internally prior to encoding when possible. Pixel format conversions within the same range (video or full) are generally faster than conversions between different ranges.
896        ///
897        /// The ProRes encoders can preserve high bit depth sources, supporting up to 12bits/ch. ProRes 4444 can contain a mathematically lossless alpha channel and it doesn't do any chroma subsampling. This makes ProRes 4444 ideal for quality critical applications. If you are working with 8bit sources ProRes is also a good format to use due to its high image quality. Use either of the recommended pixel formats above. Note that RGB pixel formats by definition have 4:4:4 chroma sampling.
898        ///
899        /// If you are working with high bit depth sources the following yuv pixel formats are recommended when encoding to ProRes: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, and kCVPixelFormatType_422YpCbCr10. When working in the RGB domain kCVPixelFormatType_64ARGB is recommended. Scaling and color matching are not currently supported when using AVAssetWriter with any of these high bit depth pixel formats. Please make sure that your track's output settings dictionary specifies the same width and height as the buffers you will be appending. Do not include AVVideoScalingModeKey or AVVideoColorPropertiesKey.
900        ///
901        /// Before calling this method, you must ensure that the input that underlies the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
902        ///
903        /// This method throws an exception if the presentation time is is non-numeric (see CMTIME_IS_NUMERIC) or if "readyForMoreMediaData" is NO.
904        ///
905        /// - Parameter pixelBuffer: The CVPixelBuffer to be appended.
906        /// - Parameter presentationTime: The presentation time for the pixel buffer to be appended. This time will be considered relative to the time passed to -[AVAssetWriter startSessionAtSourceTime:] to determine the timing of the frame in the output file.
907        ///
908        /// - Returns: A BOOL value indicating success of appending the pixel buffer. If a result of NO is returned, clients can check the value of AVAssetWriter.status to determine whether the writing operation completed, failed, or was cancelled.  If the status is AVAssetWriterStatusFailed, AVAsset.error will contain an instance of NSError that describes the failure.
909        #[unsafe(method(appendPixelBuffer:withPresentationTime:))]
910        #[unsafe(method_family = none)]
911        pub unsafe fn appendPixelBuffer_withPresentationTime(
912            &self,
913            pixel_buffer: &CVPixelBuffer,
914            presentation_time: CMTime,
915        ) -> bool;
916    );
917}
918
919extern_class!(
920    /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputtaggedpixelbuffergroupadaptor?language=objc)
921    #[unsafe(super(NSObject))]
922    #[derive(Debug, PartialEq, Eq, Hash)]
923    pub struct AVAssetWriterInputTaggedPixelBufferGroupAdaptor;
924);
925
926extern_conformance!(
927    unsafe impl NSObjectProtocol for AVAssetWriterInputTaggedPixelBufferGroupAdaptor {}
928);
929
930impl AVAssetWriterInputTaggedPixelBufferGroupAdaptor {
931    extern_methods!(
932        #[unsafe(method(init))]
933        #[unsafe(method_family = init)]
934        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
935
936        #[unsafe(method(new))]
937        #[unsafe(method_family = new)]
938        pub unsafe fn new() -> Retained<Self>;
939
940        /// Creates a new tagged buffer adaptor to receive tagged buffer groups for writing to the output file.
941        ///
942        /// In order to take advantage of the improved efficiency of appending buffers created from the adaptor's pixel buffer pool, clients should specify pixel buffer attributes that most closely accommodate the source format of the video frames being appended.
943        ///
944        /// Pixel buffer attributes keys for the pixel buffer pool are defined in
945        /// <CoreVideo
946        /// /CVPixelBuffer.h>. To specify the pixel format type, the pixelBufferAttributes dictionary should contain a value for kCVPixelBufferPixelFormatTypeKey. For example, use [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] for 8-bit-per-channel BGRA. See the discussion under appendPixelBuffer:withPresentationTime: for advice on choosing a pixel format.
947        ///
948        /// Clients that do not need a pixel buffer pool for allocating buffers should set sourcePixelBufferAttributes to nil.
949        ///
950        /// This method throws an exception if the input is already attached to another asset writer input tagged buffer group adaptor or if the input has already started writing (the asset writer has progressed beyond AVAssetWriterStatusUnknown).
951        ///
952        /// - Parameter input: An instance of AVAssetWriterInput to which the receiver should append tagged buffer groups. Currently, only asset writer inputs that accept media data of type AVMediaTypeVideo can be used to initialize a tagged buffer adaptor.
953        /// - Parameter sourcePixelBufferAttributes: Specifies the attributes of pixel buffers of tagged buffer groups that will be vended by the input's CVPixelBufferPool.
954        ///
955        /// - Returns: An instance of AVAssetWriterInputTaggedPixelBufferGroupAdaptor.
956        ///
957        /// # Safety
958        ///
959        /// `source_pixel_buffer_attributes` generic should be of the correct type.
960        #[unsafe(method(assetWriterInputTaggedPixelBufferGroupAdaptorWithAssetWriterInput:sourcePixelBufferAttributes:))]
961        #[unsafe(method_family = none)]
962        pub unsafe fn assetWriterInputTaggedPixelBufferGroupAdaptorWithAssetWriterInput_sourcePixelBufferAttributes(
963            input: &AVAssetWriterInput,
964            source_pixel_buffer_attributes: Option<&NSDictionary<NSString, AnyObject>>,
965        ) -> Retained<Self>;
966
967        /// Creates a new tagged buffer group adaptor to receive tagged buffer groups for writing to the output file.
968        ///
969        /// In order to take advantage of the improved efficiency of appending buffers created from the adaptor's pixel buffer pool, clients should specify pixel buffer attributes that most closely accommodate the source format of the video frames of tagged buffer groups being appended.
970        ///
971        /// Pixel buffer attributes keys for the pixel buffer pool are defined in
972        /// <CoreVideo
973        /// /CVPixelBuffer.h>. To specify the pixel format type, the pixelBufferAttributes dictionary should contain a value for kCVPixelBufferPixelFormatTypeKey. For example, use [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] for 8-bit-per-channel BGRA. See the discussion under appendPixelBuffer:withPresentationTime: in AVAssetWriterInputPixelBufferAdaptor for advice on choosing a pixel format.
974        ///
975        /// Clients that do not need a pixel buffer pool for allocating buffers should set sourcePixelBufferAttributes to nil.
976        ///
977        /// It is an error to initialize an instance of AVAssetWriterInputTaggedPixelBufferGroupAdaptor with an asset writer input that is already attached to another instance of AVAssetWriterInputTaggedPixelBufferGroupAdaptor. It is also an error to initialize an instance of AVAssetWriterInputTaggedPixelBufferGroupAdaptor with an asset writer input whose asset writer has progressed beyond AVAssetWriterStatusUnknown.
978        ///
979        /// - Parameter input: An instance of AVAssetWriterInput to which the receiver should append tagged buffer groups. In addition to the pixel buffer adaptor, asset writer inputs with media data of type AVMediaTypeVideo can be used to initialize a tagged buffer group adaptor.
980        /// - Parameter sourcePixelBufferAttributes: Specifies the attributes of pixel buffers of tagged buffer groups that will be vended by the input's CVPixelBufferPool.
981        ///
982        /// - Returns: An instance of AVAssetWriterInputTaggedPixelBufferGroupAdaptor.
983        ///
984        /// # Safety
985        ///
986        /// `source_pixel_buffer_attributes` generic should be of the correct type.
987        #[unsafe(method(initWithAssetWriterInput:sourcePixelBufferAttributes:))]
988        #[unsafe(method_family = init)]
989        pub unsafe fn initWithAssetWriterInput_sourcePixelBufferAttributes(
990            this: Allocated<Self>,
991            input: &AVAssetWriterInput,
992            source_pixel_buffer_attributes: Option<&NSDictionary<NSString, AnyObject>>,
993        ) -> Retained<Self>;
994
995        /// The asset writer input to which the receiver should append tagged buffer groups.
996        #[unsafe(method(assetWriterInput))]
997        #[unsafe(method_family = none)]
998        pub unsafe fn assetWriterInput(&self) -> Retained<AVAssetWriterInput>;
999
1000        /// The pixel buffer attributes of pixel buffers that will be vended by the receiver's CVPixelBufferPool.
1001        ///
1002        /// The value of this property is a dictionary containing pixel buffer attributes keys defined in
1003        /// <CoreVideo
1004        /// /CVPixelBuffer.h>.
1005        #[unsafe(method(sourcePixelBufferAttributes))]
1006        #[unsafe(method_family = none)]
1007        pub unsafe fn sourcePixelBufferAttributes(
1008            &self,
1009        ) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
1010
1011        #[cfg(feature = "objc2-core-video")]
1012        /// A pixel buffer pool that will vend and efficiently recycle CVPixelBuffer objects of tagged buffer groups that can be appended to the receiver.
1013        ///
1014        /// For maximum efficiency, clients should create CVPixelBuffer objects of tagged buffer groups for appendTaggedPixelBufferGroup:withPresentationTime: by using this pool with the CVPixelBufferPoolCreatePixelBuffer() function.
1015        ///
1016        /// The value of this property will be NULL before -[AVAssetWriter startWriting] is called on the associated AVAssetWriter object. Clients should read this property after -[AVAssetWriter startWriting] calling to get a non-NULL value.
1017        ///
1018        /// This property is not key value observable.
1019        #[unsafe(method(pixelBufferPool))]
1020        #[unsafe(method_family = none)]
1021        pub unsafe fn pixelBufferPool(&self) -> Option<Retained<CVPixelBufferPool>>;
1022
1023        #[cfg(feature = "objc2-core-media")]
1024        /// Appends a tagged buffer group to the receiver.
1025        ///
1026        /// The receiver will retain the CMTaggedBufferGroup until it is done with it, and then release it. Do not modify a CMTaggedBufferGroup or its contents after you have passed it to this method.
1027        ///
1028        /// Before calling this method, you must ensure that the input that underlies the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
1029        ///
1030        /// In an AVAssetWriterInput instance creation with AVMediaTypeVideo, kVTCompressionPropertyKey_MVHEVCVideoLayerIDs key must be specified as part of the dictionary given for AVVideoCompressionPropertiesKey. It sets video layer IDs to a target multi-image video encoder. This method checks the values for kCMTagCategory_VideoLayerID tag in tag collections of taggedPixelBufferGroup over the array values for kVTCompressionPropertyKey_MVHEVCVideoLayerIDs key. An NSInvalidArgumentException will be raised if the video layer IDs mismatch between the value of kVTCompressionPropertyKey_MVHEVCVideoLayerIDs in the AVVideoCompressionPropertiesKey sub-dictionary of the input's outputSettings property and tag collections of taggedPixelBufferGroup.
1031        ///
1032        /// Below is a sample code sketch focusing on data flow that illustrates how you might append a taggedPixelBufferGroup instance.
1033        /// ```objc
1034        /// // Set up an AVAssetWriterInput and AVAssetWriterInputTaggedPixelBufferGroupAdaptor instance
1035        /// AVAssetWriterInput *assetWriterInput = [[AVAssetWriterInput alloc] initWithMediaType:AVMediaTypeVideo outputSettings:
1036        /// @
1037        /// {
1038        /// ..,
1039        /// AVVideoCompressionPropertiesKey:
1040        /// @
1041        /// { (NSString *)kVTCompressionPropertyKey_MVHEVCVideoLayerIDs : .. }}];
1042        ///
1043        /// AVAssetWriterInputTaggedPixelBufferGroupAdaptor *assetWriterInputAdaptor = [[AVAssetWriterInputTaggedPixelBufferGroupAdaptor alloc] initWithAssetWriterInput:assetWriterInput ..];
1044        /// ```
1045        /// Later, when the writer input is ready for more media data, create and append a tagged buffer group containing one or more pixel buffers and the exact tag values associated with kCMTagCategory_VideoLayerID being specified via kVTCompressionPropertyKey_MVHEVCVideoLayerIDs.
1046        /// ```objc
1047        /// // Set up tag collection buffers
1048        /// CMTag tags[] = CMTagMakeWithSInt64Value(kCMTagCategory_VideoLayerID, ..);
1049        /// CMItemCount tagCount = sizeof(tags) / sizeof(tags[0]);
1050        /// CMTagCollectionCreate(.., tags, tagCount,
1051        /// &tagCollection
1052        /// );
1053        /// CFArrayAppendValue(tagCollectionArray, tagCollection);
1054        ///
1055        /// // Set up pixel buffers
1056        /// CVPixelBufferPoolCreatePixelBuffer(..,
1057        /// &pixelBuffer
1058        /// );
1059        /// CFArrayAppendValue(pixelBufferArray, pixelBuffer);
1060        ///
1061        /// // Append a CMTaggedBufferGroupRef instance to asset writer input
1062        /// CMTaggedBufferGroupCreate(.., tagCollectionArray, pixelBufferArray,
1063        /// &taggedBufferGroup
1064        /// );
1065        /// [assetWriterInputAdaptor appendTaggedPixelBufferGroup:taggedBufferGroup ..];
1066        /// ```
1067        /// - Parameter taggedPixelBufferGroup: The CMTaggedBufferGroup to be appended. All of the buffers in taggedPixelBufferGroup should be CVPixelBuffers, and they should correspond to tag collections that contain kCMTagCategory_VideoLayerID values matching the list set using kVTCompressionPropertyKey_MVHEVCVideoLayerIDs. The pixel buffers should be IOSurface-backed.
1068        /// - Parameter presentationTime: The presentation time for the tagged buffer group to be appended. This time will be considered relative to the time passed to -[AVAssetWriter startSessionAtSourceTime:] to determine the timing of the frame in the output file.
1069        ///
1070        /// - Returns: A BOOL value indicating success of appending the tagged buffer group. If a result of NO is returned, clients can check the value of AVAssetWriter.status to determine whether the writing operation completed, failed, or was cancelled. If the status is AVAssetWriterStatusFailed, AVAssetWriter.error will contain an instance of NSError that describes the failure.
1071        #[unsafe(method(appendTaggedPixelBufferGroup:withPresentationTime:))]
1072        #[unsafe(method_family = none)]
1073        pub unsafe fn appendTaggedPixelBufferGroup_withPresentationTime(
1074            &self,
1075            tagged_pixel_buffer_group: &CMTaggedBufferGroup,
1076            presentation_time: CMTime,
1077        ) -> bool;
1078    );
1079}
1080
1081extern_class!(
1082    /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputmetadataadaptor?language=objc)
1083    #[unsafe(super(NSObject))]
1084    #[derive(Debug, PartialEq, Eq, Hash)]
1085    pub struct AVAssetWriterInputMetadataAdaptor;
1086);
1087
1088extern_conformance!(
1089    unsafe impl NSObjectProtocol for AVAssetWriterInputMetadataAdaptor {}
1090);
1091
1092impl AVAssetWriterInputMetadataAdaptor {
1093    extern_methods!(
1094        #[unsafe(method(init))]
1095        #[unsafe(method_family = init)]
1096        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
1097
1098        #[unsafe(method(new))]
1099        #[unsafe(method_family = new)]
1100        pub unsafe fn new() -> Retained<Self>;
1101
1102        /// Creates a new timed metadata group adaptor to receive instances of AVTimedMetadataGroup for writing to the output file.
1103        ///
1104        /// The instance of AVAssetWriterInput passed in to this method must have been created with a format hint indicating all possible combinations of identifier (or, alternatively, key and keySpace), dataType, and extendedLanguageTag that will be appended to the metadata adaptor. It is an error to append metadata items not represented in the input's format hint.
1105        ///
1106        /// This method throws an exception for any of the following reasons:
1107        /// - input is already attached to another instance of AVAssetWriterInputMetadataAdaptor
1108        /// - input's asset writer has already started writing (progressed beyond AVAssetWriterStatusUnknown)
1109        /// - input's asset writer does not carry a source format hint
1110        /// - input's source format hint media subtype is not kCMMetadataFormatType_Boxed
1111        ///
1112        /// - Parameter input: An instance of AVAssetWriterInput to which the receiver should append groups of timed metadata. Only asset writer inputs that accept media data of type AVMediaTypeMetadata can be used to initialize a timed metadata group adaptor.
1113        ///
1114        /// - Returns: An instance of AVAssetWriterInputMetadataAdaptor.
1115        #[unsafe(method(assetWriterInputMetadataAdaptorWithAssetWriterInput:))]
1116        #[unsafe(method_family = none)]
1117        pub unsafe fn assetWriterInputMetadataAdaptorWithAssetWriterInput(
1118            input: &AVAssetWriterInput,
1119        ) -> Retained<Self>;
1120
1121        /// Creates a new timed metadator group adaptor to receive instances of AVTimedMetadataGroup for writing to the output file.
1122        ///
1123        /// The instance of AVAssetWriterInput passed in to this method must have been created with a format hint indicating all possible combinations of identifier (or, alternatively, key and keySpace), dataType, and extendedLanguageTag that will be appended to the metadata adaptor. It is an error to append metadata items not represented in the input's format hint. For help creating a suitable format hint, see -[AVTimedMetadataGroup copyFormatDescription].
1124        ///
1125        /// This method throws an exception for any of the following reasons:
1126        /// - input is already attached to another instance of AVAssetWriterInputMetadataAdaptor
1127        /// - input's asset writer has already started writing (progressed beyond AVAssetWriterStatusUnknown)
1128        /// - input's asset writer does not carry a source format hint
1129        /// - input's source format hint media subtype is not kCMMetadataFormatType_Boxed
1130        ///
1131        /// - Parameter input: An instance of AVAssetWriterInput to which the receiver should append groups of timed metadata. Only asset writer inputs that accept media data of type AVMediaTypeMetadata can be used to initialize a timed metadata group adaptor.
1132        ///
1133        /// - Returns: An instance of AVAssetWriterInputMetadataAdaptor.
1134        #[unsafe(method(initWithAssetWriterInput:))]
1135        #[unsafe(method_family = init)]
1136        pub unsafe fn initWithAssetWriterInput(
1137            this: Allocated<Self>,
1138            input: &AVAssetWriterInput,
1139        ) -> Retained<Self>;
1140
1141        /// The asset writer input to which the receiver should append timed metadata groups.
1142        #[unsafe(method(assetWriterInput))]
1143        #[unsafe(method_family = none)]
1144        pub unsafe fn assetWriterInput(&self) -> Retained<AVAssetWriterInput>;
1145
1146        #[cfg(feature = "AVTimedMetadataGroup")]
1147        /// Appends a timed metadata group to the receiver.
1148        ///
1149        /// The receiver will retain the AVTimedMetadataGroup until it is done with it, and then release it.
1150        ///
1151        /// The timing of the metadata items in the output asset will correspond to the timeRange of the AVTimedMetadataGroup, regardless of the values of the time and duration properties of the individual items.
1152        ///
1153        /// Before calling this method, you must ensure that the input that underlies the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
1154        ///
1155        /// This method throws an exception if the attached asset writer input has not been added to an asset writer or -startWriting has not been called on that asset writer.
1156        ///
1157        /// - Parameter timedMetadataGroup: The AVTimedMetadataGroup to be appended.
1158        ///
1159        /// - Returns: A BOOL value indicating success of appending the timed metadata group.  If a result of NO is returned, AVAssetWriter.error will contain more information about why apending the timed metadata group failed.
1160        #[unsafe(method(appendTimedMetadataGroup:))]
1161        #[unsafe(method_family = none)]
1162        pub unsafe fn appendTimedMetadataGroup(
1163            &self,
1164            timed_metadata_group: &AVTimedMetadataGroup,
1165        ) -> bool;
1166    );
1167}
1168
1169extern_class!(
1170    /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetwriterinputcaptionadaptor?language=objc)
1171    #[unsafe(super(NSObject))]
1172    #[derive(Debug, PartialEq, Eq, Hash)]
1173    pub struct AVAssetWriterInputCaptionAdaptor;
1174);
1175
1176extern_conformance!(
1177    unsafe impl NSObjectProtocol for AVAssetWriterInputCaptionAdaptor {}
1178);
1179
1180impl AVAssetWriterInputCaptionAdaptor {
1181    extern_methods!(
1182        #[unsafe(method(init))]
1183        #[unsafe(method_family = init)]
1184        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
1185
1186        #[unsafe(method(new))]
1187        #[unsafe(method_family = new)]
1188        pub unsafe fn new() -> Retained<Self>;
1189
1190        /// Creates a new caption adaptor for writing to the specified asset writer input.
1191        #[unsafe(method(assetWriterInputCaptionAdaptorWithAssetWriterInput:))]
1192        #[unsafe(method_family = none)]
1193        pub unsafe fn assetWriterInputCaptionAdaptorWithAssetWriterInput(
1194            input: &AVAssetWriterInput,
1195        ) -> Retained<Self>;
1196
1197        /// Creates a new caption adaptor for writing to the specified asset writer input.
1198        ///
1199        /// This method thows an exception for any of the following reasons:
1200        /// - input is nil
1201        /// - the input's media type is not supported (should use text or closed caption)
1202        /// - the input is already attached to an asset writer caption adaptor
1203        /// - the input has already started writing
1204        #[unsafe(method(initWithAssetWriterInput:))]
1205        #[unsafe(method_family = init)]
1206        pub unsafe fn initWithAssetWriterInput(
1207            this: Allocated<Self>,
1208            input: &AVAssetWriterInput,
1209        ) -> Retained<Self>;
1210
1211        /// The asset writer input that was used to initialize the receiver.
1212        #[unsafe(method(assetWriterInput))]
1213        #[unsafe(method_family = none)]
1214        pub unsafe fn assetWriterInput(&self) -> Retained<AVAssetWriterInput>;
1215
1216        #[cfg(feature = "AVCaption")]
1217        /// Append a single caption to be written.
1218        ///
1219        /// If this method returns NO, check the value of AVAssetWriter.status on the attached asset writer to determine why appending failed.
1220        ///
1221        /// The start time of each caption's timeRange property must be numeric (see CMTIME_IS_NUMERIC) and must be at least as large as the start time of any previous caption (including any captions present in a group appended via -appendCaptionGroup:). In other words, the sequence of captions appended using this method must have monotonically increasing start times.
1222        ///
1223        /// The duration of each caption's timeRange property must be numeric.
1224        ///
1225        /// - Parameter caption: The caption to append.
1226        ///
1227        /// - Returns: Returns YES if the operation succeeded, NO if it failed.
1228        #[unsafe(method(appendCaption:))]
1229        #[unsafe(method_family = none)]
1230        pub unsafe fn appendCaption(&self, caption: &AVCaption) -> bool;
1231
1232        #[cfg(feature = "AVCaptionGroup")]
1233        /// Append a group of captions to be written.
1234        ///
1235        /// If this method returns NO, check the value of AVAssetWriter.status on the attached asset writer to determine why appending failed.
1236        /// When appending a sequence of captions groups, the start time of each group must be equal to or greater than the end time of any previous group. The easiest way to achieve this is to create the group using a caption whose duration is kCMTimeInvalid, in which case the duration will be determined by subtracting the start time of the group from the start time of the next appended group.
1237        /// When mixing calls to -appendCaptionGroup: and -appendCaption:, the start time of each group must be equal to or greater than the end time of any previous captions.
1238        /// To mark a time range containing no captions, append a group containing an empty caption array.
1239        ///
1240        /// - Parameter captionGroup:
1241        ///
1242        /// - Returns: Returns YES if the operation succeeded, NO if it failed.
1243        #[unsafe(method(appendCaptionGroup:))]
1244        #[unsafe(method_family = none)]
1245        pub unsafe fn appendCaptionGroup(&self, caption_group: &AVCaptionGroup) -> bool;
1246    );
1247}