objc2_av_foundation/generated/
AVAssetReaderOutput.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-core-media")]
7use objc2_core_media::*;
8use objc2_foundation::*;
9
10use crate::*;
11
12extern_class!(
13    /// AVAssetReaderOutput is an abstract class that defines an interface for reading a single collection of samples of a common media type from an AVAssetReader.
14    ///
15    ///
16    /// Clients can read the media data of an asset by adding one or more concrete instances of AVAssetReaderOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
17    ///
18    /// IMPORTANT PERFORMANCE NOTE: Make sure to set the alwaysCopiesSampleData property to NO if you do not need to modify the sample data in-place, to avoid unnecessary and inefficient copying.
19    ///
20    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderoutput?language=objc)
21    #[unsafe(super(NSObject))]
22    #[derive(Debug, PartialEq, Eq, Hash)]
23    pub struct AVAssetReaderOutput;
24);
25
26unsafe impl NSObjectProtocol for AVAssetReaderOutput {}
27
28impl AVAssetReaderOutput {
29    extern_methods!(
30        #[cfg(feature = "AVMediaFormat")]
31        /// The media type of the samples that can be read from the receiver.
32        ///
33        ///
34        /// The value of this property is one of the media type strings defined in AVMediaFormat.h.
35        #[unsafe(method(mediaType))]
36        #[unsafe(method_family = none)]
37        pub unsafe fn mediaType(&self) -> Retained<AVMediaType>;
38
39        /// Indicates whether or not the data in buffers gets copied before being vended to the client.
40        ///
41        ///
42        /// When the value of this property is YES, the AVAssetReaderOutput will always vend a buffer with copied data to the client.  Data in such buffers can be freely modified by the client. When the value of this property is NO, the buffers vended to the client may not be copied.  Such buffers may still be referenced by other entities. The result of modifying a buffer whose data hasn't been copied is undefined.  Requesting buffers whose data hasn't been copied when possible can lead to performance improvements.
43        ///
44        /// The default value is YES.
45        ///
46        /// This property throws an exception if a value is set after reading has started (the asset reader has progressed beyond AVAssetReaderStatusUnknown).
47        #[unsafe(method(alwaysCopiesSampleData))]
48        #[unsafe(method_family = none)]
49        pub unsafe fn alwaysCopiesSampleData(&self) -> bool;
50
51        /// Setter for [`alwaysCopiesSampleData`][Self::alwaysCopiesSampleData].
52        #[unsafe(method(setAlwaysCopiesSampleData:))]
53        #[unsafe(method_family = none)]
54        pub unsafe fn setAlwaysCopiesSampleData(&self, always_copies_sample_data: bool);
55
56        #[cfg(feature = "objc2-core-media")]
57        /// Copies the next sample buffer for the output synchronously.
58        ///
59        ///
60        /// Returns: A CMSampleBuffer object referencing the output sample buffer.
61        ///
62        ///
63        /// The client is responsible for calling CFRelease on the returned CMSampleBuffer object when finished with it. This method will return NULL if there are no more sample buffers available for the receiver within the time range specified by its AVAssetReader's timeRange property, or if there is an error that prevents the AVAssetReader from reading more media data. When this method returns NULL, clients should check the value of the associated AVAssetReader's status property to determine why no more samples could be read.
64        ///
65        /// In certain configurations, such as when outputSettings is nil, copyNextSampleBuffer may return marker-only sample buffers as well as sample buffers containing media data. Marker-only sample buffers can be identified by CMSampleBufferGetNumSamples returning 0. Clients who do not need the information attached to marker-only sample buffers may skip them.
66        ///
67        /// This method throws an exception if this output is not added to an instance of AVAssetReader (using -addOutput:) and -startReading is not called on that asset reader.
68        #[unsafe(method(copyNextSampleBuffer))]
69        #[unsafe(method_family = copy)]
70        pub unsafe fn copyNextSampleBuffer(&self) -> Option<Retained<CMSampleBuffer>>;
71    );
72}
73
74/// Methods declared on superclass `NSObject`.
75impl AVAssetReaderOutput {
76    extern_methods!(
77        #[unsafe(method(init))]
78        #[unsafe(method_family = init)]
79        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
80
81        #[unsafe(method(new))]
82        #[unsafe(method_family = new)]
83        pub unsafe fn new() -> Retained<Self>;
84    );
85}
86
87/// AVAssetReaderOutputRandomAccess.
88impl AVAssetReaderOutput {
89    extern_methods!(
90        /// Indicates whether the asset reader output supports reconfiguration of the time ranges to read.
91        ///
92        ///
93        /// When the value of this property is YES, the time ranges read by the asset reader output can be reconfigured during reading using the -resetForReadingTimeRanges: method.  This also prevents the attached AVAssetReader from progressing to AVAssetReaderStatusCompleted until -markConfigurationAsFinal has been invoked.
94        ///
95        /// The default value is NO, which means that the asset reader output may not be reconfigured once reading has begun.  When the value of this property is NO, AVAssetReader may be able to read media data more efficiently, particularly when multiple asset reader outputs are attached.
96        ///
97        /// This property throws an exception if a value is set after reading has started (the asset reader has progressed beyond AVAssetReaderStatusUnknown).
98        #[unsafe(method(supportsRandomAccess))]
99        #[unsafe(method_family = none)]
100        pub unsafe fn supportsRandomAccess(&self) -> bool;
101
102        /// Setter for [`supportsRandomAccess`][Self::supportsRandomAccess].
103        #[unsafe(method(setSupportsRandomAccess:))]
104        #[unsafe(method_family = none)]
105        pub unsafe fn setSupportsRandomAccess(&self, supports_random_access: bool);
106
107        /// Starts reading over with a new set of time ranges.
108        ///
109        ///
110        /// Parameter `timeRanges`: An NSArray of NSValue objects, each representing a single CMTimeRange structure
111        ///
112        ///
113        /// This method may only be used if supportsRandomAccess has been set to YES and may not be called after -markConfigurationAsFinal has been invoked.
114        ///
115        /// This method is often used in conjunction with AVAssetWriter multi-pass (see AVAssetWriterInput category AVAssetWriterInputMultiPass).  In this usage, the caller will invoke -copyNextSampleBuffer until that method returns NULL and then ask the AVAssetWriterInput for a set of time ranges from which it thinks media data should be re-encoded.  These time ranges are then given to this method to set up the asset reader output for the next pass.
116        ///
117        /// The time ranges set here override the time range set on AVAssetReader.timeRange.  Just as with that property, for each time range in the array the intersection of that time range and CMTimeRangeMake(kCMTimeZero, asset.duration) will take effect.
118        ///
119        /// If this method is invoked after the status of the attached AVAssetReader has become AVAssetReaderStatusFailed or AVAssetReaderStatusCancelled, no change in status will occur and the result of the next call to -copyNextSampleBuffer will be NULL.
120        ///
121        /// This method throws an exception if the following conditions are not honored:
122        /// - each item in time ranges must be an NSValue
123        /// - the start of each time range must be numeric - see CMTIME_IS_NUMERIC
124        /// - the duration of each time range must be nonnegative and numeric, or kCMTimePositiveInfinity
125        /// - the start of each time range must be greater than or equal to the end of the previous time range
126        /// - start times must be strictly increasing
127        /// - time ranges must not overlap
128        /// - cannot be called before -startReading has been invoked on the attached asset reader
129        /// - cannot be called until all samples of media data have been read (i.e. copyNextSampleBuffer returns NULL and the asset reader has not entered a failure state)
130        /// - cannot be called without setting "supportsRandomAccess" to YES
131        /// - cannot be called after calling -markConfigurationAsFinal
132        #[unsafe(method(resetForReadingTimeRanges:))]
133        #[unsafe(method_family = none)]
134        pub unsafe fn resetForReadingTimeRanges(&self, time_ranges: &NSArray<NSValue>);
135
136        /// Informs the receiver that no more reconfiguration of time ranges is necessary and allows the attached AVAssetReader to advance to AVAssetReaderStatusCompleted.
137        ///
138        ///
139        /// When the value of supportsRandomAccess is YES, the attached asset reader will not advance to AVAssetReaderStatusCompleted until this method is called.
140        ///
141        /// When the destination of media data vended by the receiver is an AVAssetWriterInput configured for multi-pass encoding, a convenient time to invoke this method is after the asset writer input indicates that no more passes will be performed.
142        ///
143        /// Once this method has been called, further invocations of -resetForReadingTimeRanges: are disallowed.
144        #[unsafe(method(markConfigurationAsFinal))]
145        #[unsafe(method_family = none)]
146        pub unsafe fn markConfigurationAsFinal(&self);
147    );
148}
149
150extern_class!(
151    /// AVAssetReaderTrackOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading media data from a single AVAssetTrack of an AVAssetReader's AVAsset.
152    ///
153    ///
154    /// Clients can read the media data of an asset track by adding an instance of AVAssetReaderTrackOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method. The track's media samples can either be read in the format in which they are stored in the asset, or they can be converted to a different format.
155    ///
156    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadertrackoutput?language=objc)
157    #[unsafe(super(AVAssetReaderOutput, NSObject))]
158    #[derive(Debug, PartialEq, Eq, Hash)]
159    pub struct AVAssetReaderTrackOutput;
160);
161
162unsafe impl NSObjectProtocol for AVAssetReaderTrackOutput {}
163
164impl AVAssetReaderTrackOutput {
165    extern_methods!(
166        #[unsafe(method(init))]
167        #[unsafe(method_family = init)]
168        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
169
170        #[unsafe(method(new))]
171        #[unsafe(method_family = new)]
172        pub unsafe fn new() -> Retained<Self>;
173
174        #[cfg(feature = "AVAssetTrack")]
175        /// Returns an instance of AVAssetReaderTrackOutput for reading from the specified track and supplying media data according to the specified output settings.
176        ///
177        ///
178        /// Parameter `track`: The AVAssetTrack from which the resulting AVAssetReaderTrackOutput should read sample buffers.
179        ///
180        /// Parameter `outputSettings`: An NSDictionary of output settings to be used for sample output.  See AVAudioSettings.h for available output settings for audio tracks or AVVideoSettings.h for available output settings for video tracks and also for more information about how to construct an output settings dictionary.
181        ///
182        /// Returns: An instance of AVAssetReaderTrackOutput.
183        ///
184        ///
185        /// The track must be one of the tracks contained by the target AVAssetReader's asset.
186        ///
187        /// A value of nil for outputSettings configures the output to vend samples in their original format as stored by the specified track.  Initialization will fail if the output settings cannot be used with the specified track.
188        ///
189        /// AVAssetReaderTrackOutput can only produce uncompressed output.  For audio output settings, this means that AVFormatIDKey must be kAudioFormatLinearPCM.  For video output settings, this means that the dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h.  AVAssetReaderTrackOutput does not support the AVAudioSettings.h key AVSampleRateConverterAudioQualityKey or the following AVVideoSettings.h keys:
190        ///
191        /// AVVideoCleanApertureKey
192        /// AVVideoPixelAspectRatioKey
193        /// AVVideoScalingModeKey
194        ///
195        /// When constructing video output settings the choice of pixel format will affect the performance and quality of the decompression. For optimal performance when decompressing video the requested pixel format should be one that the decoder supports natively to avoid unnecessary conversions. Below are some recommendations:
196        ///
197        /// For H.264 use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, or kCVPixelFormatType_420YpCbCr8BiPlanarFullRange if the video is known to be full range.  For JPEG on iOS, use kCVPixelFormatType_420YpCbCr8BiPlanarFullRange.
198        ///
199        /// For other codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when decoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended.
200        ///
201        /// ProRes encoded media can contain up to 12bits/ch. If your source is ProRes encoded and you wish to preserve more than 8bits/ch during decompression then use one of the following pixel formats: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, kCVPixelFormatType_422YpCbCr10, or kCVPixelFormatType_64ARGB.  AVAssetReader does not support scaling with any of these high bit depth pixel formats. If you use them then do not specify kCVPixelBufferWidthKey or kCVPixelBufferHeightKey in your outputSettings dictionary. If you plan to append these sample buffers to an AVAssetWriterInput then note that only the ProRes encoders support these pixel formats.
202        ///
203        /// ProRes 4444 encoded media can contain a mathematically lossless alpha channel. To preserve the alpha channel during decompression use a pixel format with an alpha component such as kCVPixelFormatType_4444AYpCbCr16 or kCVPixelFormatType_64ARGB. To test whether your source contains an alpha channel check that the track's format description has kCMFormatDescriptionExtension_Depth and that its value is 32.
204        #[unsafe(method(assetReaderTrackOutputWithTrack:outputSettings:))]
205        #[unsafe(method_family = none)]
206        pub unsafe fn assetReaderTrackOutputWithTrack_outputSettings(
207            track: &AVAssetTrack,
208            output_settings: Option<&NSDictionary<NSString, AnyObject>>,
209        ) -> Retained<Self>;
210
211        #[cfg(feature = "AVAssetTrack")]
212        /// Returns an instance of AVAssetReaderTrackOutput for reading from the specified track and supplying media data according to the specified output settings.
213        ///
214        ///
215        /// Parameter `track`: The AVAssetTrack from which the resulting AVAssetReaderTrackOutput should read sample buffers.
216        ///
217        /// Parameter `outputSettings`: An NSDictionary of output settings to be used for sample output.  See AVAudioSettings.h for available output settings for audio tracks or AVVideoSettings.h for available output settings for video tracks and also for more information about how to construct an output settings dictionary.
218        ///
219        /// Returns: An instance of AVAssetReaderTrackOutput.
220        ///
221        ///
222        /// The track must be one of the tracks contained by the target AVAssetReader's asset.
223        ///
224        /// A value of nil for outputSettings configures the output to vend samples in their original format as stored by the specified track.  Initialization will fail if the output settings cannot be used with the specified track.
225        ///
226        /// AVAssetReaderTrackOutput can only produce uncompressed output.  For audio output settings, this means that AVFormatIDKey must be kAudioFormatLinearPCM.  For video output settings, this means that the dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h.  AVAssetReaderTrackOutput does not support the AVAudioSettings.h key AVSampleRateConverterAudioQualityKey or the following AVVideoSettings.h keys:
227        ///
228        /// AVVideoCleanApertureKey
229        /// AVVideoPixelAspectRatioKey
230        /// AVVideoScalingModeKey
231        ///
232        /// When constructing video output settings the choice of pixel format will affect the performance and quality of the decompression. For optimal performance when decompressing video the requested pixel format should be one that the decoder supports natively to avoid unnecessary conversions. Below are some recommendations:
233        ///
234        /// For H.264 use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, or kCVPixelFormatType_420YpCbCr8BiPlanarFullRange if the video is known to be full range.  For JPEG on iOS, use kCVPixelFormatType_420YpCbCr8BiPlanarFullRange.
235        ///
236        /// For other codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when decoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended.
237        ///
238        /// ProRes encoded media can contain up to 12bits/ch. If your source is ProRes encoded and you wish to preserve more than 8bits/ch during decompression then use one of the following pixel formats: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, kCVPixelFormatType_422YpCbCr10, or kCVPixelFormatType_64ARGB.  AVAssetReader does not support scaling with any of these high bit depth pixel formats. If you use them then do not specify kCVPixelBufferWidthKey or kCVPixelBufferHeightKey in your outputSettings dictionary. If you plan to append these sample buffers to an AVAssetWriterInput then note that only the ProRes encoders support these pixel formats.
239        ///
240        /// ProRes 4444 encoded media can contain a mathematically lossless alpha channel. To preserve the alpha channel during decompression use a pixel format with an alpha component such as kCVPixelFormatType_4444AYpCbCr16 or kCVPixelFormatType_64ARGB.  To test whether your source contains an alpha channel check that the track's format description has kCMFormatDescriptionExtension_Depth and that its value is 32.
241        ///
242        /// This method throws an exception for any of the following reasons:
243        /// - the output settings dictionary contains an unsupported key mentioned above
244        /// - the output settings dictionary does not contain any recognized key
245        /// - output settings are not compatible with track's media type
246        /// - track output settings would cause the output to yield compressed samples
247        #[unsafe(method(initWithTrack:outputSettings:))]
248        #[unsafe(method_family = init)]
249        pub unsafe fn initWithTrack_outputSettings(
250            this: Allocated<Self>,
251            track: &AVAssetTrack,
252            output_settings: Option<&NSDictionary<NSString, AnyObject>>,
253        ) -> Retained<Self>;
254
255        #[cfg(feature = "AVAssetTrack")]
256        /// The track from which the receiver reads sample buffers.
257        ///
258        ///
259        /// The value of this property is an AVAssetTrack owned by the target AVAssetReader's asset.
260        #[unsafe(method(track))]
261        #[unsafe(method_family = none)]
262        pub unsafe fn track(&self) -> Retained<AVAssetTrack>;
263
264        /// The output settings used by the receiver.
265        ///
266        ///
267        /// The value of this property is an NSDictionary that contains values for keys as specified by either AVAudioSettings.h for audio tracks or AVVideoSettings.h for video tracks.  A value of nil indicates that the receiver will vend samples in their original format as stored in the target track.
268        #[unsafe(method(outputSettings))]
269        #[unsafe(method_family = none)]
270        pub unsafe fn outputSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
271
272        #[cfg(feature = "AVAudioProcessingSettings")]
273        /// Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
274        ///
275        ///
276        /// Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h.  An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
277        ///
278        /// The default value is AVAudioTimePitchAlgorithmSpectral.
279        ///
280        /// This property throws an exception for any of the following reasons:
281        /// - a value is set value after reading has started
282        /// - a value is set other than AVAudioTimePitchAlgorithmSpectral, AVAudioTimePitchAlgorithmTimeDomain, or AVAudioTimePitchAlgorithmVarispeed.
283        #[unsafe(method(audioTimePitchAlgorithm))]
284        #[unsafe(method_family = none)]
285        pub unsafe fn audioTimePitchAlgorithm(&self) -> Retained<AVAudioTimePitchAlgorithm>;
286
287        #[cfg(feature = "AVAudioProcessingSettings")]
288        /// Setter for [`audioTimePitchAlgorithm`][Self::audioTimePitchAlgorithm].
289        #[unsafe(method(setAudioTimePitchAlgorithm:))]
290        #[unsafe(method_family = none)]
291        pub unsafe fn setAudioTimePitchAlgorithm(
292            &self,
293            audio_time_pitch_algorithm: &AVAudioTimePitchAlgorithm,
294        );
295    );
296}
297
298extern_class!(
299    /// AVAssetReaderAudioMixOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading audio samples that result from mixing the audio from one or more AVAssetTracks of an AVAssetReader's AVAsset.
300    ///
301    ///
302    /// Clients can read the audio data mixed from one or more asset tracks by adding an instance of AVAssetReaderAudioMixOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
303    ///
304    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderaudiomixoutput?language=objc)
305    #[unsafe(super(AVAssetReaderOutput, NSObject))]
306    #[derive(Debug, PartialEq, Eq, Hash)]
307    pub struct AVAssetReaderAudioMixOutput;
308);
309
310unsafe impl NSObjectProtocol for AVAssetReaderAudioMixOutput {}
311
312impl AVAssetReaderAudioMixOutput {
313    extern_methods!(
314        #[unsafe(method(init))]
315        #[unsafe(method_family = init)]
316        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
317
318        #[unsafe(method(new))]
319        #[unsafe(method_family = new)]
320        pub unsafe fn new() -> Retained<Self>;
321
322        #[cfg(feature = "AVAssetTrack")]
323        /// Returns an instance of AVAssetReaderAudioMixOutput for reading mixed audio from the specified audio tracks, with optional audio settings.
324        ///
325        ///
326        /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the created object should read sample buffers to be mixed.
327        ///
328        /// Parameter `audioSettings`: An NSDictionary of audio settings to be used for audio output.
329        ///
330        /// Returns: An instance of AVAssetReaderAudioMixOutput.
331        ///
332        ///
333        /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeAudio.
334        ///
335        /// For non-nil values of audioSettings, the audio settings dictionary must contain values for keys in AVAudioSettings.h (linear PCM only). Initialization will fail if the audio settings cannot be used with the specified tracks. AVSampleRateConverterAudioQualityKey is not supported.
336        ///
337        /// A value of nil for audioSettings configures the output to return samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the specified audio tracks as well as other considerations that may vary according to device capabilities, operating system version, and other factors. Therefore if you wish to perform any processing on the output, you must examine the CMAudioFormatDescription of the CMSampleBuffers that are provided in order to ensure that your processing is appropriately configured for the output format.
338        #[unsafe(method(assetReaderAudioMixOutputWithAudioTracks:audioSettings:))]
339        #[unsafe(method_family = none)]
340        pub unsafe fn assetReaderAudioMixOutputWithAudioTracks_audioSettings(
341            audio_tracks: &NSArray<AVAssetTrack>,
342            audio_settings: Option<&NSDictionary<NSString, AnyObject>>,
343        ) -> Retained<Self>;
344
345        #[cfg(feature = "AVAssetTrack")]
346        /// Creates an instance of AVAssetReaderAudioMixOutput for reading mixed audio from the specified audio tracks, with optional audio settings.
347        ///
348        ///
349        /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the created object should read sample buffers to be mixed.
350        ///
351        /// Parameter `audioSettings`: An NSDictionary of audio settings to be used for audio output.
352        ///
353        /// Returns: An instance of AVAssetReaderAudioMixOutput.
354        ///
355        ///
356        /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeAudio.
357        ///
358        /// For non-nil values of audioSettings, the audio settings dictionary must contain values for keys in AVAudioSettings.h (linear PCM only). Initialization will fail if the audio settings cannot be used with the specified tracks. AVSampleRateConverterAudioQualityKey is not supported.
359        ///
360        /// A value of nil for audioSettings configures the output to return samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the specified audio tracks as well as other considerations that may vary according to device capabilities, operating system version, and other factors. Therefore if you wish to perform any processing on the output, you must examine the CMAudioFormatDescription of the CMSampleBuffers that are provided in order to ensure that your processing is appropriately configured for the output format.
361        ///
362        /// This method throws an exception for any of the following reasons:
363        /// - an audio track does not have media type AVMediaTypeAudio
364        /// - an audio track belongs to a different AVAsset
365        /// - the audio settings contains an AVSampleRateConverterAudioQualityKey
366        /// - the output would be compressed
367        #[unsafe(method(initWithAudioTracks:audioSettings:))]
368        #[unsafe(method_family = init)]
369        pub unsafe fn initWithAudioTracks_audioSettings(
370            this: Allocated<Self>,
371            audio_tracks: &NSArray<AVAssetTrack>,
372            audio_settings: Option<&NSDictionary<NSString, AnyObject>>,
373        ) -> Retained<Self>;
374
375        #[cfg(feature = "AVAssetTrack")]
376        /// The tracks from which the receiver reads mixed audio.
377        ///
378        ///
379        /// The value of this property is an NSArray of AVAssetTracks owned by the target AVAssetReader's asset.
380        #[unsafe(method(audioTracks))]
381        #[unsafe(method_family = none)]
382        pub unsafe fn audioTracks(&self) -> Retained<NSArray<AVAssetTrack>>;
383
384        /// The audio settings used by the receiver.
385        ///
386        ///
387        /// The value of this property is an NSDictionary that contains values for keys from AVAudioSettings.h (linear PCM only).  A value of nil indicates that the receiver will return audio samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the receiver's audio tracks.
388        #[unsafe(method(audioSettings))]
389        #[unsafe(method_family = none)]
390        pub unsafe fn audioSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
391
392        #[cfg(feature = "AVAudioMix")]
393        /// The audio mix used by the receiver.
394        ///
395        ///
396        /// The value of this property is an AVAudioMix that can be used to specify how the volume of audio samples read from each source track will change over the timeline of the source asset.
397        ///
398        /// This property throws an exception for any of the following reasons:
399        /// - an audio mix is set after reading has started (the asset reader has progressed beyond AVAssetReaderStatusUnknown)
400        /// - setting an audio mix containing a track that was not used to create the receiver
401        /// - an audio mix is set containing an invalid audio time pitch algorithm
402        #[unsafe(method(audioMix))]
403        #[unsafe(method_family = none)]
404        pub unsafe fn audioMix(&self) -> Option<Retained<AVAudioMix>>;
405
406        #[cfg(feature = "AVAudioMix")]
407        /// Setter for [`audioMix`][Self::audioMix].
408        #[unsafe(method(setAudioMix:))]
409        #[unsafe(method_family = none)]
410        pub unsafe fn setAudioMix(&self, audio_mix: Option<&AVAudioMix>);
411
412        #[cfg(feature = "AVAudioProcessingSettings")]
413        /// Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
414        ///
415        ///
416        /// Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h.  An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
417        ///
418        /// The default value is AVAudioTimePitchAlgorithmSpectral.
419        #[unsafe(method(audioTimePitchAlgorithm))]
420        #[unsafe(method_family = none)]
421        pub unsafe fn audioTimePitchAlgorithm(&self) -> Retained<AVAudioTimePitchAlgorithm>;
422
423        #[cfg(feature = "AVAudioProcessingSettings")]
424        /// Setter for [`audioTimePitchAlgorithm`][Self::audioTimePitchAlgorithm].
425        #[unsafe(method(setAudioTimePitchAlgorithm:))]
426        #[unsafe(method_family = none)]
427        pub unsafe fn setAudioTimePitchAlgorithm(
428            &self,
429            audio_time_pitch_algorithm: &AVAudioTimePitchAlgorithm,
430        );
431    );
432}
433
434extern_class!(
435    /// AVAssetReaderVideoCompositionOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading video frames that have been composited together from the frames in one or more AVAssetTracks of an AVAssetReader's AVAsset.
436    ///
437    ///
438    /// Clients can read the video frames composited from one or more asset tracks by adding an instance of AVAssetReaderVideoCompositionOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
439    ///
440    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadervideocompositionoutput?language=objc)
441    #[unsafe(super(AVAssetReaderOutput, NSObject))]
442    #[derive(Debug, PartialEq, Eq, Hash)]
443    pub struct AVAssetReaderVideoCompositionOutput;
444);
445
446unsafe impl NSObjectProtocol for AVAssetReaderVideoCompositionOutput {}
447
448impl AVAssetReaderVideoCompositionOutput {
449    extern_methods!(
450        #[unsafe(method(init))]
451        #[unsafe(method_family = init)]
452        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
453
454        #[unsafe(method(new))]
455        #[unsafe(method_family = new)]
456        pub unsafe fn new() -> Retained<Self>;
457
458        #[cfg(feature = "AVAssetTrack")]
459        /// Creates an instance of AVAssetReaderVideoCompositionOutput for reading composited video from the specified video tracks and supplying media data according to the specified video settings.
460        ///
461        ///
462        /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the resulting AVAssetReaderVideoCompositionOutput should read video frames for compositing.
463        ///
464        /// Parameter `videoSettings`: An NSDictionary of video settings to be used for video output.  See AVVideoSettings.h for more information about how to construct a video settings dictionary.
465        ///
466        /// Returns: An instance of AVAssetReaderVideoCompositionOutput.
467        ///
468        ///
469        /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeVideo.
470        ///
471        /// A value of nil for videoSettings configures the output to return samples in a convenient uncompressed format, with properties determined according to the properties of the specified video tracks.  Initialization will fail if the video settings cannot be used with the specified tracks.
472        ///
473        /// AVAssetReaderVideoCompositionOutput can only produce uncompressed output.  This means that the video settings dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h.  In addition, the following keys are not supported:
474        ///
475        /// AVVideoCleanApertureKey
476        /// AVVideoPixelAspectRatioKey
477        /// AVVideoScalingModeKey
478        #[unsafe(method(assetReaderVideoCompositionOutputWithVideoTracks:videoSettings:))]
479        #[unsafe(method_family = none)]
480        pub unsafe fn assetReaderVideoCompositionOutputWithVideoTracks_videoSettings(
481            video_tracks: &NSArray<AVAssetTrack>,
482            video_settings: Option<&NSDictionary<NSString, AnyObject>>,
483        ) -> Retained<Self>;
484
485        #[cfg(feature = "AVAssetTrack")]
486        /// Creates an instance of AVAssetReaderVideoCompositionOutput for reading composited video from the specified video tracks and supplying media data according to the specified video settings.
487        ///
488        ///
489        /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the resulting AVAssetReaderVideoCompositionOutput should read video frames for compositing.
490        ///
491        /// Parameter `videoSettings`: An NSDictionary of video settings to be used for video output.  See AVVideoSettings.h for more information about how to construct a video settings dictionary.
492        ///
493        /// Returns: An instance of AVAssetReaderVideoCompositionOutput.
494        ///
495        ///
496        /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeVideo.
497        ///
498        /// A value of nil for videoSettings configures the output to return samples in a convenient uncompressed format, with properties determined according to the properties of the specified video tracks.  Initialization will fail if the video settings cannot be used with the specified tracks.
499        ///
500        /// AVAssetReaderVideoCompositionOutput can only produce uncompressed output.  This means that the video settings dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h.
501        ///
502        /// This method throws an exception for any of the following reasons:
503        /// - any video track is not of media type AVMediaTypeVideo
504        /// - any video track is not part of this asset reader output's AVAsset
505        /// - track output settings would cause the output to yield compressed samples
506        /// - video settings does not follow the rules for uncompressed video output (AVVideoSettings.h)
507        /// - video settings contains any of the following keys:
508        /// - AVVideoCleanApertureKey
509        /// - AVVideoPixelAspectRatioKey
510        /// - AVVideoScalingModeKey
511        /// - AVVideoDecompressionPropertiesKey
512        #[unsafe(method(initWithVideoTracks:videoSettings:))]
513        #[unsafe(method_family = init)]
514        pub unsafe fn initWithVideoTracks_videoSettings(
515            this: Allocated<Self>,
516            video_tracks: &NSArray<AVAssetTrack>,
517            video_settings: Option<&NSDictionary<NSString, AnyObject>>,
518        ) -> Retained<Self>;
519
520        #[cfg(feature = "AVAssetTrack")]
521        /// The tracks from which the receiver reads composited video.
522        ///
523        ///
524        /// The value of this property is an NSArray of AVAssetTracks owned by the target AVAssetReader's asset.
525        #[unsafe(method(videoTracks))]
526        #[unsafe(method_family = none)]
527        pub unsafe fn videoTracks(&self) -> Retained<NSArray<AVAssetTrack>>;
528
529        /// The video settings used by the receiver.
530        ///
531        ///
532        /// The value of this property is an NSDictionary that contains values for keys as specified by AVVideoSettings.h.  A value of nil indicates that the receiver will return video frames in a convenient uncompressed format, with properties determined according to the properties of the receiver's video tracks.
533        #[unsafe(method(videoSettings))]
534        #[unsafe(method_family = none)]
535        pub unsafe fn videoSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
536
537        #[cfg(feature = "AVVideoComposition")]
538        /// The composition of video used by the receiver.
539        ///
540        ///
541        /// The value of this property is an AVVideoComposition that can be used to specify the visual arrangement of video frames read from each source track over the timeline of the source asset.
542        ///
543        /// This property throws an exception if a value is set after reading has started.
544        #[unsafe(method(videoComposition))]
545        #[unsafe(method_family = none)]
546        pub unsafe fn videoComposition(&self) -> Option<Retained<AVVideoComposition>>;
547
548        #[cfg(feature = "AVVideoComposition")]
549        /// Setter for [`videoComposition`][Self::videoComposition].
550        #[unsafe(method(setVideoComposition:))]
551        #[unsafe(method_family = none)]
552        pub unsafe fn setVideoComposition(&self, video_composition: Option<&AVVideoComposition>);
553
554        #[cfg(feature = "AVVideoCompositing")]
555        /// Indicates the custom video compositor instance used by the receiver.
556        ///
557        ///
558        /// This property is nil if there is no video compositor, or if the internal video compositor is in use.
559        #[unsafe(method(customVideoCompositor))]
560        #[unsafe(method_family = none)]
561        pub unsafe fn customVideoCompositor(
562            &self,
563        ) -> Option<Retained<ProtocolObject<dyn AVVideoCompositing>>>;
564    );
565}
566
567extern_class!(
568    /// Defines an interface for reading metadata, packaged as instances of AVTimedMetadataGroup, from a single AVAssetReaderTrackOutput object.
569    ///
570    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderoutputmetadataadaptor?language=objc)
571    #[unsafe(super(NSObject))]
572    #[derive(Debug, PartialEq, Eq, Hash)]
573    pub struct AVAssetReaderOutputMetadataAdaptor;
574);
575
576unsafe impl NSObjectProtocol for AVAssetReaderOutputMetadataAdaptor {}
577
578impl AVAssetReaderOutputMetadataAdaptor {
579    extern_methods!(
580        #[unsafe(method(init))]
581        #[unsafe(method_family = init)]
582        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
583
584        #[unsafe(method(new))]
585        #[unsafe(method_family = new)]
586        pub unsafe fn new() -> Retained<Self>;
587
588        /// Creates a new timed metadata group adaptor for retrieving timed metadata group objects from an asset reader output.
589        ///
590        ///
591        /// Parameter `assetReaderOutput`: An instance of AVAssetReaderTrackOutput that vends sample buffers containing metadata, e.g. an AVAssetReaderTrackOutput object initialized with a track of media type AVMediaTypeMetadata and nil outputSettings.
592        ///
593        /// Returns: An instance of AVAssetReaderOutputMetadataAdaptor
594        ///
595        ///
596        /// It is an error to create a timed metadata group adaptor with an asset reader output that does not vend metadata.  It is also an error to create a timed metadata group adaptor with an asset reader output whose asset reader has already started reading, or an asset reader output that already has been used to initialize another timed metadata group adaptor.
597        ///
598        /// Clients should not mix calls to -[AVAssetReaderTrackOutput copyNextSampleBuffer] and -[AVAssetReaderOutputMetadataAdaptor nextTimedMetadataGroup].  Once an AVAssetReaderTrackOutput instance has been used to initialize an AVAssetReaderOutputMetadataAdaptor, calling -copyNextSampleBuffer on that instance will result in an exception being thrown.
599        #[unsafe(method(assetReaderOutputMetadataAdaptorWithAssetReaderTrackOutput:))]
600        #[unsafe(method_family = none)]
601        pub unsafe fn assetReaderOutputMetadataAdaptorWithAssetReaderTrackOutput(
602            track_output: &AVAssetReaderTrackOutput,
603        ) -> Retained<Self>;
604
605        /// Creates a new timed metadata group adaptor for retrieving timed metadata group objects from an asset reader output.
606        ///
607        ///
608        /// Parameter `assetReaderOutput`: An instance of AVAssetReaderTrackOutput that vends sample buffers containing metadata, e.g. an AVAssetReaderTrackOutput object initialized with a track of media type AVMediaTypeMetadata and nil outputSettings.
609        ///
610        /// Returns: An instance of AVAssetReaderOutputMetadataAdaptor
611        ///
612        ///
613        /// It is an error to create a timed metadata group adaptor with an asset reader output that does not vend metadata.  It is also an error to create a timed metadata group adaptor with an asset reader output whose asset reader has already started reading, or an asset reader output that already has been used to initialize another timed metadata group adaptor.
614        ///
615        /// Clients should not mix calls to -[AVAssetReaderTrackOutput copyNextSampleBuffer] and -[AVAssetReaderOutputMetadataAdaptor nextTimedMetadataGroup].  Once an AVAssetReaderTrackOutput instance has been used to initialize an AVAssetReaderOutputMetadataAdaptor, calling -copyNextSampleBuffer on that instance will result in an exception being thrown.
616        ///
617        /// This method throws an exception if the track's output was used to initialize another adaptor or if the track output's asset reader has already started reading.
618        #[unsafe(method(initWithAssetReaderTrackOutput:))]
619        #[unsafe(method_family = init)]
620        pub unsafe fn initWithAssetReaderTrackOutput(
621            this: Allocated<Self>,
622            track_output: &AVAssetReaderTrackOutput,
623        ) -> Retained<Self>;
624
625        /// The asset reader track output from which the receiver pulls timed metadata groups.
626        #[unsafe(method(assetReaderTrackOutput))]
627        #[unsafe(method_family = none)]
628        pub unsafe fn assetReaderTrackOutput(&self) -> Retained<AVAssetReaderTrackOutput>;
629
630        #[cfg(feature = "AVTimedMetadataGroup")]
631        /// Returns the next timed metadata group for the asset reader output, synchronously.
632        ///
633        ///
634        /// Returns: An instance of AVTimedMetadataGroup, representing the next logical segment of metadata coming from the source asset reader output.
635        ///
636        ///
637        /// This method will return nil when all timed metadata groups have been read from the asset reader output, or if there is an error that prevents the timed metadata group adaptor from reading more timed metadata groups.  When this method returns nil, clients should check the value of the associated AVAssetReader's status property to determine why no more samples could be read.
638        ///
639        /// Unlike -[AVAssetReaderTrackOutput copyNextSampleBuffer], this method returns an autoreleased object.
640        ///
641        /// Before calling this method, you must ensure that the output which underlies the receiver is attached to an AVAssetReader via a prior call to -addOutput: and that -startReading has been called on the asset reader.
642        ///
643        /// This method throws an exception if track output is not attached to an asset reader and reading has not yet begun.
644        #[unsafe(method(nextTimedMetadataGroup))]
645        #[unsafe(method_family = none)]
646        pub unsafe fn nextTimedMetadataGroup(&self) -> Option<Retained<AVTimedMetadataGroup>>;
647    );
648}
649
650extern_class!(
651    /// An adaptor class for reading instances of AVCaptionGroup from a track containing timed text (i.e. subtitles or closed captions).
652    ///
653    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderoutputcaptionadaptor?language=objc)
654    #[unsafe(super(NSObject))]
655    #[derive(Debug, PartialEq, Eq, Hash)]
656    pub struct AVAssetReaderOutputCaptionAdaptor;
657);
658
659unsafe impl NSObjectProtocol for AVAssetReaderOutputCaptionAdaptor {}
660
661impl AVAssetReaderOutputCaptionAdaptor {
662    extern_methods!(
663        #[unsafe(method(init))]
664        #[unsafe(method_family = init)]
665        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
666
667        #[unsafe(method(new))]
668        #[unsafe(method_family = new)]
669        pub unsafe fn new() -> Retained<Self>;
670
671        /// Creates a new caption adaptor for reading from the given track output.
672        ///
673        /// Parameter `trackOutput`: The track output from which to read captions.
674        ///
675        /// Returns: A new instance of AVAssetReaderOutputCaptionAdaptor, configured to read captions from the given AVAssetReaderTrackOutput.
676        ///
677        /// It is an error to pass nil to this method.
678        #[unsafe(method(assetReaderOutputCaptionAdaptorWithAssetReaderTrackOutput:))]
679        #[unsafe(method_family = none)]
680        pub unsafe fn assetReaderOutputCaptionAdaptorWithAssetReaderTrackOutput(
681            track_output: &AVAssetReaderTrackOutput,
682        ) -> Retained<Self>;
683
684        /// Creates a new caption adaptor for reading from the given track output.
685        ///
686        /// Parameter `trackOutput`: The track output from which to read captions.
687        ///
688        /// Returns: A new instance of AVAssetReaderOutputCaptionAdaptor, configured to read captions from the given AVAssetReaderTrackOutput.
689        ///
690        /// It is an error to pass nil to this method.
691        #[unsafe(method(initWithAssetReaderTrackOutput:))]
692        #[unsafe(method_family = init)]
693        pub unsafe fn initWithAssetReaderTrackOutput(
694            this: Allocated<Self>,
695            track_output: &AVAssetReaderTrackOutput,
696        ) -> Retained<Self>;
697
698        /// The track output used to create the receiver.
699        #[unsafe(method(assetReaderTrackOutput))]
700        #[unsafe(method_family = none)]
701        pub unsafe fn assetReaderTrackOutput(&self) -> Retained<AVAssetReaderTrackOutput>;
702
703        #[cfg(feature = "AVCaptionGroup")]
704        /// Returns the next caption.
705        ///
706        /// Returns: An instance of AVCaption representing the next caption.
707        ///
708        /// The method returns the next caption group.
709        ///
710        /// This method throws an exception if the track output is not attached to an asset reader and reading has not yet begun.
711        #[unsafe(method(nextCaptionGroup))]
712        #[unsafe(method_family = none)]
713        pub unsafe fn nextCaptionGroup(&self) -> Option<Retained<AVCaptionGroup>>;
714
715        #[cfg(all(feature = "AVCaption", feature = "AVCaptionGroup"))]
716        /// Returns the set of captions that are present in the given group but were not present in any group previously vended by calls to -nextCaptionGroup: on the receiver.
717        ///
718        /// Parameter `captionGroup`: The group containing the captions of interest.
719        ///
720        /// Returns: An array of AVCaption objects.
721        ///
722        /// The returned array contains the set of captions in the given group whose time ranges have the same start time as the group.  This method is provided as a convenience for clients who want to process captions one-by-one and do not need a complete view of the set of captions active at a given time.
723        #[unsafe(method(captionsNotPresentInPreviousGroupsInCaptionGroup:))]
724        #[unsafe(method_family = none)]
725        pub unsafe fn captionsNotPresentInPreviousGroupsInCaptionGroup(
726            &self,
727            caption_group: &AVCaptionGroup,
728        ) -> Retained<NSArray<AVCaption>>;
729    );
730}
731
732/// AVAssetReaderCaptionValidation.
733/// Category of AVAssetReaderOutputCaptionAdaptor for caption validation handling
734impl AVAssetReaderOutputCaptionAdaptor {
735    extern_methods!(
736        /// Register caption validation handling callback protocol to the caption adaptor.
737        #[unsafe(method(validationDelegate))]
738        #[unsafe(method_family = none)]
739        pub unsafe fn validationDelegate(
740            &self,
741        ) -> Option<Retained<ProtocolObject<dyn AVAssetReaderCaptionValidationHandling>>>;
742
743        /// This is a [weak property][objc2::topics::weak_property].
744        /// Setter for [`validationDelegate`][Self::validationDelegate].
745        #[unsafe(method(setValidationDelegate:))]
746        #[unsafe(method_family = none)]
747        pub unsafe fn setValidationDelegate(
748            &self,
749            validation_delegate: Option<
750                &ProtocolObject<dyn AVAssetReaderCaptionValidationHandling>,
751            >,
752        );
753    );
754}
755
756extern_protocol!(
757    /// A protocol to receive caption validation notifications
758    ///
759    /// A client can implement the protocol on its own class which processes the caption validation calls.
760    ///
761    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadercaptionvalidationhandling?language=objc)
762    pub unsafe trait AVAssetReaderCaptionValidationHandling: NSObjectProtocol {
763        #[cfg(feature = "AVCaption")]
764        /// Called when one or more syntax elements were ignored in the process of creating the caption object.
765        ///
766        ///
767        /// Parameter `adaptor`: The caption adaptor object
768        ///
769        /// Parameter `caption`: The caption object. The parser skipped unsupported syntax elements when creating this object.
770        ///
771        /// Parameter `syntaxElements`: Array of NSString to represent the skipped syntax.
772        ///
773        ///
774        /// While the reported string content is human readable, it is highly technical and probably meaningful only to clients who are familiar with the source caption format. It is primarily designed for logging purpose and would not be suitable for UI purpose.
775        #[optional]
776        #[unsafe(method(captionAdaptor:didVendCaption:skippingUnsupportedSourceSyntaxElements:))]
777        #[unsafe(method_family = none)]
778        unsafe fn captionAdaptor_didVendCaption_skippingUnsupportedSourceSyntaxElements(
779            &self,
780            adaptor: &AVAssetReaderOutputCaptionAdaptor,
781            caption: &AVCaption,
782            syntax_elements: &NSArray<NSString>,
783        );
784    }
785);
786
787extern_class!(
788    /// AVAssetReaderSampleReferenceOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading sample references from a single AVAssetTrack of an AVAssetReader's AVAsset.
789    ///
790    /// Clients can extract information about the location (file URL and offset) of samples in a track by adding an instance of AVAssetReaderSampleReferenceOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method. No actual sample data can be extracted using this class. The location of the sample data is described by the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachments on the extracted sample buffers. More information about sample buffers describing sample references can be found in the CMSampleBuffer documentation.
791    ///
792    /// Sample buffers extracted using this class can also be appended to an AVAssetWriterInput to create movie tracks that are not self-contained and reference data in the original file instead.  Currently, only instances of AVAssetWriter configured to write files of type AVFileTypeQuickTimeMovie can be used to write tracks that are not self-contained.
793    ///
794    /// Since no sample data is ever returned by instances of AVAssetReaderSampleReferenceOutput, the value of the alwaysCopiesSampleData property is ignored.
795    ///
796    /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadersamplereferenceoutput?language=objc)
797    #[unsafe(super(AVAssetReaderOutput, NSObject))]
798    #[derive(Debug, PartialEq, Eq, Hash)]
799    pub struct AVAssetReaderSampleReferenceOutput;
800);
801
802unsafe impl NSObjectProtocol for AVAssetReaderSampleReferenceOutput {}
803
804impl AVAssetReaderSampleReferenceOutput {
805    extern_methods!(
806        #[unsafe(method(init))]
807        #[unsafe(method_family = init)]
808        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
809
810        #[unsafe(method(new))]
811        #[unsafe(method_family = new)]
812        pub unsafe fn new() -> Retained<Self>;
813
814        #[cfg(feature = "AVAssetTrack")]
815        /// Returns an instance of AVAssetReaderSampleReferenceOutput for supplying sample references.
816        ///
817        ///
818        /// Parameter `track`: The AVAssetTrack for which the resulting AVAssetReaderSampleReferenceOutput should provide sample references.
819        ///
820        /// Returns: An instance of AVAssetReaderSampleReferenceOutput.
821        ///
822        ///
823        /// The track must be one of the tracks contained by the target AVAssetReader's asset.
824        #[unsafe(method(assetReaderSampleReferenceOutputWithTrack:))]
825        #[unsafe(method_family = none)]
826        pub unsafe fn assetReaderSampleReferenceOutputWithTrack(
827            track: &AVAssetTrack,
828        ) -> Retained<Self>;
829
830        #[cfg(feature = "AVAssetTrack")]
831        /// Returns an instance of AVAssetReaderSampleReferenceOutput for supplying sample references.
832        ///
833        ///
834        /// Parameter `track`: The AVAssetTrack for which the resulting AVAssetReaderSampleReferenceOutput should provide sample references.
835        ///
836        /// Returns: An instance of AVAssetReaderTrackOutput.
837        ///
838        ///
839        /// The track must be one of the tracks contained by the target AVAssetReader's asset.
840        #[unsafe(method(initWithTrack:))]
841        #[unsafe(method_family = init)]
842        pub unsafe fn initWithTrack(this: Allocated<Self>, track: &AVAssetTrack) -> Retained<Self>;
843
844        #[cfg(feature = "AVAssetTrack")]
845        /// The track from which the receiver extracts sample references.
846        ///
847        ///
848        /// The value of this property is an AVAssetTrack owned by the target AVAssetReader's asset.
849        #[unsafe(method(track))]
850        #[unsafe(method_family = none)]
851        pub unsafe fn track(&self) -> Retained<AVAssetTrack>;
852    );
853}