objc2_av_foundation/generated/AVAssetReaderOutput.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-core-media")]
7use objc2_core_media::*;
8use objc2_foundation::*;
9
10use crate::*;
11
12extern_class!(
13 /// AVAssetReaderOutput is an abstract class that defines an interface for reading a single collection of samples of a common media type from an AVAssetReader.
14 ///
15 ///
16 /// Clients can read the media data of an asset by adding one or more concrete instances of AVAssetReaderOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
17 ///
18 /// IMPORTANT PERFORMANCE NOTE: Make sure to set the alwaysCopiesSampleData property to NO if you do not need to modify the sample data in-place, to avoid unnecessary and inefficient copying.
19 ///
20 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderoutput?language=objc)
21 #[unsafe(super(NSObject))]
22 #[derive(Debug, PartialEq, Eq, Hash)]
23 pub struct AVAssetReaderOutput;
24);
25
26extern_conformance!(
27 unsafe impl NSObjectProtocol for AVAssetReaderOutput {}
28);
29
30impl AVAssetReaderOutput {
31 extern_methods!(
32 #[cfg(feature = "AVMediaFormat")]
33 /// The media type of the samples that can be read from the receiver.
34 ///
35 ///
36 /// The value of this property is one of the media type strings defined in AVMediaFormat.h.
37 #[unsafe(method(mediaType))]
38 #[unsafe(method_family = none)]
39 pub unsafe fn mediaType(&self) -> Retained<AVMediaType>;
40
41 /// Indicates whether or not the data in buffers gets copied before being vended to the client.
42 ///
43 ///
44 /// When the value of this property is YES, the AVAssetReaderOutput will always vend a buffer with copied data to the client. Data in such buffers can be freely modified by the client. When the value of this property is NO, the buffers vended to the client may not be copied. Such buffers may still be referenced by other entities. The result of modifying a buffer whose data hasn't been copied is undefined. Requesting buffers whose data hasn't been copied when possible can lead to performance improvements.
45 ///
46 /// The default value is YES.
47 ///
48 /// This property throws an exception if a value is set after reading has started (the asset reader has progressed beyond AVAssetReaderStatusUnknown).
49 #[unsafe(method(alwaysCopiesSampleData))]
50 #[unsafe(method_family = none)]
51 pub unsafe fn alwaysCopiesSampleData(&self) -> bool;
52
53 /// Setter for [`alwaysCopiesSampleData`][Self::alwaysCopiesSampleData].
54 #[unsafe(method(setAlwaysCopiesSampleData:))]
55 #[unsafe(method_family = none)]
56 pub unsafe fn setAlwaysCopiesSampleData(&self, always_copies_sample_data: bool);
57
58 #[cfg(feature = "objc2-core-media")]
59 /// Copies the next sample buffer for the output synchronously.
60 ///
61 ///
62 /// Returns: A CMSampleBuffer object referencing the output sample buffer.
63 ///
64 ///
65 /// The client is responsible for calling CFRelease on the returned CMSampleBuffer object when finished with it. This method will return NULL if there are no more sample buffers available for the receiver within the time range specified by its AVAssetReader's timeRange property, or if there is an error that prevents the AVAssetReader from reading more media data. When this method returns NULL, clients should check the value of the associated AVAssetReader's status property to determine why no more samples could be read.
66 ///
67 /// In certain configurations, such as when outputSettings is nil, copyNextSampleBuffer may return marker-only sample buffers as well as sample buffers containing media data. Marker-only sample buffers can be identified by CMSampleBufferGetNumSamples returning 0. Clients who do not need the information attached to marker-only sample buffers may skip them.
68 ///
69 /// This method throws an exception if this output is not added to an instance of AVAssetReader (using -addOutput:) and -startReading is not called on that asset reader.
70 #[unsafe(method(copyNextSampleBuffer))]
71 #[unsafe(method_family = copy)]
72 pub unsafe fn copyNextSampleBuffer(&self) -> Option<Retained<CMSampleBuffer>>;
73 );
74}
75
76/// Methods declared on superclass `NSObject`.
77impl AVAssetReaderOutput {
78 extern_methods!(
79 #[unsafe(method(init))]
80 #[unsafe(method_family = init)]
81 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
82
83 #[unsafe(method(new))]
84 #[unsafe(method_family = new)]
85 pub unsafe fn new() -> Retained<Self>;
86 );
87}
88
89/// AVAssetReaderOutputRandomAccess.
90impl AVAssetReaderOutput {
91 extern_methods!(
92 /// Indicates whether the asset reader output supports reconfiguration of the time ranges to read.
93 ///
94 ///
95 /// When the value of this property is YES, the time ranges read by the asset reader output can be reconfigured during reading using the -resetForReadingTimeRanges: method. This also prevents the attached AVAssetReader from progressing to AVAssetReaderStatusCompleted until -markConfigurationAsFinal has been invoked.
96 ///
97 /// The default value is NO, which means that the asset reader output may not be reconfigured once reading has begun. When the value of this property is NO, AVAssetReader may be able to read media data more efficiently, particularly when multiple asset reader outputs are attached.
98 ///
99 /// This property throws an exception if a value is set after reading has started (the asset reader has progressed beyond AVAssetReaderStatusUnknown) or after an AVAssetReaderOutput.Provider is attached.
100 #[unsafe(method(supportsRandomAccess))]
101 #[unsafe(method_family = none)]
102 pub unsafe fn supportsRandomAccess(&self) -> bool;
103
104 /// Setter for [`supportsRandomAccess`][Self::supportsRandomAccess].
105 #[unsafe(method(setSupportsRandomAccess:))]
106 #[unsafe(method_family = none)]
107 pub unsafe fn setSupportsRandomAccess(&self, supports_random_access: bool);
108
109 /// Starts reading over with a new set of time ranges.
110 ///
111 ///
112 /// Parameter `timeRanges`: An NSArray of NSValue objects, each representing a single CMTimeRange structure
113 ///
114 ///
115 /// This method may only be used if supportsRandomAccess has been set to YES and may not be called after -markConfigurationAsFinal has been invoked.
116 ///
117 /// This method is often used in conjunction with AVAssetWriter multi-pass (see AVAssetWriterInput category AVAssetWriterInputMultiPass). In this usage, the caller will invoke -copyNextSampleBuffer until that method returns NULL and then ask the AVAssetWriterInput for a set of time ranges from which it thinks media data should be re-encoded. These time ranges are then given to this method to set up the asset reader output for the next pass.
118 ///
119 /// The time ranges set here override the time range set on AVAssetReader.timeRange. Just as with that property, for each time range in the array the intersection of that time range and CMTimeRangeMake(kCMTimeZero, asset.duration) will take effect.
120 ///
121 /// If this method is invoked after the status of the attached AVAssetReader has become AVAssetReaderStatusFailed or AVAssetReaderStatusCancelled, no change in status will occur and the result of the next call to -copyNextSampleBuffer will be NULL.
122 ///
123 /// This method throws an exception if the following conditions are not honored:
124 /// - each item in time ranges must be an NSValue
125 /// - the start of each time range must be numeric - see CMTIME_IS_NUMERIC
126 /// - the duration of each time range must be nonnegative and numeric, or kCMTimePositiveInfinity
127 /// - the start of each time range must be greater than or equal to the end of the previous time range
128 /// - start times must be strictly increasing
129 /// - time ranges must not overlap
130 /// - cannot be called before -startReading has been invoked on the attached asset reader
131 /// - cannot be called until all samples of media data have been read (i.e. copyNextSampleBuffer returns NULL and the asset reader has not entered a failure state)
132 /// - cannot be called without setting "supportsRandomAccess" to YES
133 /// - cannot be called after calling -markConfigurationAsFinal
134 #[unsafe(method(resetForReadingTimeRanges:))]
135 #[unsafe(method_family = none)]
136 pub unsafe fn resetForReadingTimeRanges(&self, time_ranges: &NSArray<NSValue>);
137
138 /// Informs the receiver that no more reconfiguration of time ranges is necessary and allows the attached AVAssetReader to advance to AVAssetReaderStatusCompleted.
139 ///
140 ///
141 /// When the value of supportsRandomAccess is YES, the attached asset reader will not advance to AVAssetReaderStatusCompleted until this method is called.
142 ///
143 /// When the destination of media data vended by the receiver is an AVAssetWriterInput configured for multi-pass encoding, a convenient time to invoke this method is after the asset writer input indicates that no more passes will be performed.
144 ///
145 /// Once this method has been called, further invocations of -resetForReadingTimeRanges: are disallowed.
146 #[unsafe(method(markConfigurationAsFinal))]
147 #[unsafe(method_family = none)]
148 pub unsafe fn markConfigurationAsFinal(&self);
149 );
150}
151
152extern_class!(
153 /// AVAssetReaderTrackOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading media data from a single AVAssetTrack of an AVAssetReader's AVAsset.
154 ///
155 ///
156 /// Clients can read the media data of an asset track by adding an instance of AVAssetReaderTrackOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method. The track's media samples can either be read in the format in which they are stored in the asset, or they can be converted to a different format.
157 ///
158 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadertrackoutput?language=objc)
159 #[unsafe(super(AVAssetReaderOutput, NSObject))]
160 #[derive(Debug, PartialEq, Eq, Hash)]
161 pub struct AVAssetReaderTrackOutput;
162);
163
164extern_conformance!(
165 unsafe impl NSObjectProtocol for AVAssetReaderTrackOutput {}
166);
167
168impl AVAssetReaderTrackOutput {
169 extern_methods!(
170 #[unsafe(method(init))]
171 #[unsafe(method_family = init)]
172 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
173
174 #[unsafe(method(new))]
175 #[unsafe(method_family = new)]
176 pub unsafe fn new() -> Retained<Self>;
177
178 #[cfg(feature = "AVAssetTrack")]
179 /// Returns an instance of AVAssetReaderTrackOutput for reading from the specified track and supplying media data according to the specified output settings.
180 ///
181 ///
182 /// Parameter `track`: The AVAssetTrack from which the resulting AVAssetReaderTrackOutput should read sample buffers.
183 ///
184 /// Parameter `outputSettings`: An NSDictionary of output settings to be used for sample output. See AVAudioSettings.h for available output settings for audio tracks or AVVideoSettings.h for available output settings for video tracks and also for more information about how to construct an output settings dictionary.
185 ///
186 /// Returns: An instance of AVAssetReaderTrackOutput.
187 ///
188 ///
189 /// The track must be one of the tracks contained by the target AVAssetReader's asset.
190 ///
191 /// A value of nil for outputSettings configures the output to vend samples in their original format as stored by the specified track. Initialization will fail if the output settings cannot be used with the specified track.
192 ///
193 /// AVAssetReaderTrackOutput can only produce uncompressed output. For audio output settings, this means that AVFormatIDKey must be kAudioFormatLinearPCM. For video output settings, this means that the dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. AVAssetReaderTrackOutput does not support the AVAudioSettings.h key AVSampleRateConverterAudioQualityKey or the following AVVideoSettings.h keys:
194 ///
195 /// AVVideoCleanApertureKey
196 /// AVVideoPixelAspectRatioKey
197 /// AVVideoScalingModeKey
198 ///
199 /// When constructing video output settings the choice of pixel format will affect the performance and quality of the decompression. For optimal performance when decompressing video the requested pixel format should be one that the decoder supports natively to avoid unnecessary conversions. Below are some recommendations:
200 ///
201 /// For H.264 use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, or kCVPixelFormatType_420YpCbCr8BiPlanarFullRange if the video is known to be full range. For JPEG on iOS, use kCVPixelFormatType_420YpCbCr8BiPlanarFullRange.
202 ///
203 /// For other codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when decoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended.
204 ///
205 /// ProRes encoded media can contain up to 12bits/ch. If your source is ProRes encoded and you wish to preserve more than 8bits/ch during decompression then use one of the following pixel formats: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, kCVPixelFormatType_422YpCbCr10, or kCVPixelFormatType_64ARGB. AVAssetReader does not support scaling with any of these high bit depth pixel formats. If you use them then do not specify kCVPixelBufferWidthKey or kCVPixelBufferHeightKey in your outputSettings dictionary. If you plan to append these sample buffers to an AVAssetWriterInput then note that only the ProRes encoders support these pixel formats.
206 ///
207 /// ProRes 4444 encoded media can contain a mathematically lossless alpha channel. To preserve the alpha channel during decompression use a pixel format with an alpha component such as kCVPixelFormatType_4444AYpCbCr16 or kCVPixelFormatType_64ARGB. To test whether your source contains an alpha channel check that the track's format description has kCMFormatDescriptionExtension_Depth and that its value is 32.
208 ///
209 /// # Safety
210 ///
211 /// `output_settings` generic should be of the correct type.
212 #[unsafe(method(assetReaderTrackOutputWithTrack:outputSettings:))]
213 #[unsafe(method_family = none)]
214 pub unsafe fn assetReaderTrackOutputWithTrack_outputSettings(
215 track: &AVAssetTrack,
216 output_settings: Option<&NSDictionary<NSString, AnyObject>>,
217 ) -> Retained<Self>;
218
219 #[cfg(feature = "AVAssetTrack")]
220 /// Returns an instance of AVAssetReaderTrackOutput for reading from the specified track and supplying media data according to the specified output settings.
221 ///
222 ///
223 /// Parameter `track`: The AVAssetTrack from which the resulting AVAssetReaderTrackOutput should read sample buffers.
224 ///
225 /// Parameter `outputSettings`: An NSDictionary of output settings to be used for sample output. See AVAudioSettings.h for available output settings for audio tracks or AVVideoSettings.h for available output settings for video tracks and also for more information about how to construct an output settings dictionary.
226 ///
227 /// Returns: An instance of AVAssetReaderTrackOutput.
228 ///
229 ///
230 /// The track must be one of the tracks contained by the target AVAssetReader's asset.
231 ///
232 /// A value of nil for outputSettings configures the output to vend samples in their original format as stored by the specified track. Initialization will fail if the output settings cannot be used with the specified track.
233 ///
234 /// AVAssetReaderTrackOutput can only produce uncompressed output. For audio output settings, this means that AVFormatIDKey must be kAudioFormatLinearPCM. For video output settings, this means that the dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. AVAssetReaderTrackOutput does not support the AVAudioSettings.h key AVSampleRateConverterAudioQualityKey or the following AVVideoSettings.h keys:
235 ///
236 /// AVVideoCleanApertureKey
237 /// AVVideoPixelAspectRatioKey
238 /// AVVideoScalingModeKey
239 ///
240 /// When constructing video output settings the choice of pixel format will affect the performance and quality of the decompression. For optimal performance when decompressing video the requested pixel format should be one that the decoder supports natively to avoid unnecessary conversions. Below are some recommendations:
241 ///
242 /// For H.264 use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, or kCVPixelFormatType_420YpCbCr8BiPlanarFullRange if the video is known to be full range. For JPEG on iOS, use kCVPixelFormatType_420YpCbCr8BiPlanarFullRange.
243 ///
244 /// For other codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when decoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended.
245 ///
246 /// ProRes encoded media can contain up to 12bits/ch. If your source is ProRes encoded and you wish to preserve more than 8bits/ch during decompression then use one of the following pixel formats: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, kCVPixelFormatType_422YpCbCr10, or kCVPixelFormatType_64ARGB. AVAssetReader does not support scaling with any of these high bit depth pixel formats. If you use them then do not specify kCVPixelBufferWidthKey or kCVPixelBufferHeightKey in your outputSettings dictionary. If you plan to append these sample buffers to an AVAssetWriterInput then note that only the ProRes encoders support these pixel formats.
247 ///
248 /// ProRes 4444 encoded media can contain a mathematically lossless alpha channel. To preserve the alpha channel during decompression use a pixel format with an alpha component such as kCVPixelFormatType_4444AYpCbCr16 or kCVPixelFormatType_64ARGB. To test whether your source contains an alpha channel check that the track's format description has kCMFormatDescriptionExtension_Depth and that its value is 32.
249 ///
250 /// This method throws an exception for any of the following reasons:
251 /// - the output settings dictionary contains an unsupported key mentioned above
252 /// - the output settings dictionary does not contain any recognized key
253 /// - output settings are not compatible with track's media type
254 /// - track output settings would cause the output to yield compressed samples
255 ///
256 /// # Safety
257 ///
258 /// `output_settings` generic should be of the correct type.
259 #[unsafe(method(initWithTrack:outputSettings:))]
260 #[unsafe(method_family = init)]
261 pub unsafe fn initWithTrack_outputSettings(
262 this: Allocated<Self>,
263 track: &AVAssetTrack,
264 output_settings: Option<&NSDictionary<NSString, AnyObject>>,
265 ) -> Retained<Self>;
266
267 #[cfg(feature = "AVAssetTrack")]
268 /// The track from which the receiver reads sample buffers.
269 ///
270 ///
271 /// The value of this property is an AVAssetTrack owned by the target AVAssetReader's asset.
272 #[unsafe(method(track))]
273 #[unsafe(method_family = none)]
274 pub unsafe fn track(&self) -> Retained<AVAssetTrack>;
275
276 /// The output settings used by the receiver.
277 ///
278 ///
279 /// The value of this property is an NSDictionary that contains values for keys as specified by either AVAudioSettings.h for audio tracks or AVVideoSettings.h for video tracks. A value of nil indicates that the receiver will vend samples in their original format as stored in the target track.
280 #[unsafe(method(outputSettings))]
281 #[unsafe(method_family = none)]
282 pub unsafe fn outputSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
283
284 #[cfg(feature = "AVAudioProcessingSettings")]
285 /// Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
286 ///
287 ///
288 /// Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h. An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
289 ///
290 /// The default value is AVAudioTimePitchAlgorithmSpectral.
291 ///
292 /// This property throws an exception for any of the following reasons:
293 /// - a value is set value after reading has started
294 /// - a value is set other than AVAudioTimePitchAlgorithmSpectral, AVAudioTimePitchAlgorithmTimeDomain, or AVAudioTimePitchAlgorithmVarispeed.
295 #[unsafe(method(audioTimePitchAlgorithm))]
296 #[unsafe(method_family = none)]
297 pub unsafe fn audioTimePitchAlgorithm(&self) -> Retained<AVAudioTimePitchAlgorithm>;
298
299 #[cfg(feature = "AVAudioProcessingSettings")]
300 /// Setter for [`audioTimePitchAlgorithm`][Self::audioTimePitchAlgorithm].
301 ///
302 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
303 #[unsafe(method(setAudioTimePitchAlgorithm:))]
304 #[unsafe(method_family = none)]
305 pub unsafe fn setAudioTimePitchAlgorithm(
306 &self,
307 audio_time_pitch_algorithm: &AVAudioTimePitchAlgorithm,
308 );
309 );
310}
311
312extern_class!(
313 /// AVAssetReaderAudioMixOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading audio samples that result from mixing the audio from one or more AVAssetTracks of an AVAssetReader's AVAsset.
314 ///
315 ///
316 /// Clients can read the audio data mixed from one or more asset tracks by adding an instance of AVAssetReaderAudioMixOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
317 ///
318 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderaudiomixoutput?language=objc)
319 #[unsafe(super(AVAssetReaderOutput, NSObject))]
320 #[derive(Debug, PartialEq, Eq, Hash)]
321 pub struct AVAssetReaderAudioMixOutput;
322);
323
324extern_conformance!(
325 unsafe impl NSObjectProtocol for AVAssetReaderAudioMixOutput {}
326);
327
328impl AVAssetReaderAudioMixOutput {
329 extern_methods!(
330 #[unsafe(method(init))]
331 #[unsafe(method_family = init)]
332 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
333
334 #[unsafe(method(new))]
335 #[unsafe(method_family = new)]
336 pub unsafe fn new() -> Retained<Self>;
337
338 #[cfg(feature = "AVAssetTrack")]
339 /// Returns an instance of AVAssetReaderAudioMixOutput for reading mixed audio from the specified audio tracks, with optional audio settings.
340 ///
341 ///
342 /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the created object should read sample buffers to be mixed.
343 ///
344 /// Parameter `audioSettings`: An NSDictionary of audio settings to be used for audio output.
345 ///
346 /// Returns: An instance of AVAssetReaderAudioMixOutput.
347 ///
348 ///
349 /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeAudio.
350 ///
351 /// For non-nil values of audioSettings, the audio settings dictionary must contain values for keys in AVAudioSettings.h (linear PCM only). Initialization will fail if the audio settings cannot be used with the specified tracks. AVSampleRateConverterAudioQualityKey is not supported.
352 ///
353 /// A value of nil for audioSettings configures the output to return samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the specified audio tracks as well as other considerations that may vary according to device capabilities, operating system version, and other factors. Therefore if you wish to perform any processing on the output, you must examine the CMAudioFormatDescription of the CMSampleBuffers that are provided in order to ensure that your processing is appropriately configured for the output format.
354 ///
355 /// # Safety
356 ///
357 /// `audio_settings` generic should be of the correct type.
358 #[unsafe(method(assetReaderAudioMixOutputWithAudioTracks:audioSettings:))]
359 #[unsafe(method_family = none)]
360 pub unsafe fn assetReaderAudioMixOutputWithAudioTracks_audioSettings(
361 audio_tracks: &NSArray<AVAssetTrack>,
362 audio_settings: Option<&NSDictionary<NSString, AnyObject>>,
363 ) -> Retained<Self>;
364
365 #[cfg(feature = "AVAssetTrack")]
366 /// Creates an instance of AVAssetReaderAudioMixOutput for reading mixed audio from the specified audio tracks, with optional audio settings.
367 ///
368 ///
369 /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the created object should read sample buffers to be mixed.
370 ///
371 /// Parameter `audioSettings`: An NSDictionary of audio settings to be used for audio output.
372 ///
373 /// Returns: An instance of AVAssetReaderAudioMixOutput.
374 ///
375 ///
376 /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeAudio.
377 ///
378 /// For non-nil values of audioSettings, the audio settings dictionary must contain values for keys in AVAudioSettings.h (linear PCM only). Initialization will fail if the audio settings cannot be used with the specified tracks. AVSampleRateConverterAudioQualityKey is not supported.
379 ///
380 /// A value of nil for audioSettings configures the output to return samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the specified audio tracks as well as other considerations that may vary according to device capabilities, operating system version, and other factors. Therefore if you wish to perform any processing on the output, you must examine the CMAudioFormatDescription of the CMSampleBuffers that are provided in order to ensure that your processing is appropriately configured for the output format.
381 ///
382 /// This method throws an exception for any of the following reasons:
383 /// - an audio track does not have media type AVMediaTypeAudio
384 /// - an audio track belongs to a different AVAsset
385 /// - the audio settings contains an AVSampleRateConverterAudioQualityKey
386 /// - the output would be compressed
387 ///
388 /// # Safety
389 ///
390 /// `audio_settings` generic should be of the correct type.
391 #[unsafe(method(initWithAudioTracks:audioSettings:))]
392 #[unsafe(method_family = init)]
393 pub unsafe fn initWithAudioTracks_audioSettings(
394 this: Allocated<Self>,
395 audio_tracks: &NSArray<AVAssetTrack>,
396 audio_settings: Option<&NSDictionary<NSString, AnyObject>>,
397 ) -> Retained<Self>;
398
399 #[cfg(feature = "AVAssetTrack")]
400 /// The tracks from which the receiver reads mixed audio.
401 ///
402 ///
403 /// The value of this property is an NSArray of AVAssetTracks owned by the target AVAssetReader's asset.
404 #[unsafe(method(audioTracks))]
405 #[unsafe(method_family = none)]
406 pub unsafe fn audioTracks(&self) -> Retained<NSArray<AVAssetTrack>>;
407
408 /// The audio settings used by the receiver.
409 ///
410 ///
411 /// The value of this property is an NSDictionary that contains values for keys from AVAudioSettings.h (linear PCM only). A value of nil indicates that the receiver will return audio samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the receiver's audio tracks.
412 #[unsafe(method(audioSettings))]
413 #[unsafe(method_family = none)]
414 pub unsafe fn audioSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
415
416 #[cfg(feature = "AVAudioMix")]
417 /// The audio mix used by the receiver.
418 ///
419 ///
420 /// The value of this property is an AVAudioMix that can be used to specify how the volume of audio samples read from each source track will change over the timeline of the source asset.
421 ///
422 /// This property throws an exception for any of the following reasons:
423 /// - an audio mix is set after reading has started (the asset reader has progressed beyond AVAssetReaderStatusUnknown)
424 /// - setting an audio mix containing a track that was not used to create the receiver
425 /// - an audio mix is set containing an invalid audio time pitch algorithm
426 #[unsafe(method(audioMix))]
427 #[unsafe(method_family = none)]
428 pub unsafe fn audioMix(&self) -> Option<Retained<AVAudioMix>>;
429
430 #[cfg(feature = "AVAudioMix")]
431 /// Setter for [`audioMix`][Self::audioMix].
432 ///
433 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
434 #[unsafe(method(setAudioMix:))]
435 #[unsafe(method_family = none)]
436 pub unsafe fn setAudioMix(&self, audio_mix: Option<&AVAudioMix>);
437
438 #[cfg(feature = "AVAudioProcessingSettings")]
439 /// Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
440 ///
441 ///
442 /// Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h. An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
443 ///
444 /// The default value is AVAudioTimePitchAlgorithmSpectral.
445 #[unsafe(method(audioTimePitchAlgorithm))]
446 #[unsafe(method_family = none)]
447 pub unsafe fn audioTimePitchAlgorithm(&self) -> Retained<AVAudioTimePitchAlgorithm>;
448
449 #[cfg(feature = "AVAudioProcessingSettings")]
450 /// Setter for [`audioTimePitchAlgorithm`][Self::audioTimePitchAlgorithm].
451 ///
452 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
453 #[unsafe(method(setAudioTimePitchAlgorithm:))]
454 #[unsafe(method_family = none)]
455 pub unsafe fn setAudioTimePitchAlgorithm(
456 &self,
457 audio_time_pitch_algorithm: &AVAudioTimePitchAlgorithm,
458 );
459 );
460}
461
462extern_class!(
463 /// AVAssetReaderVideoCompositionOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading video frames that have been composited together from the frames in one or more AVAssetTracks of an AVAssetReader's AVAsset.
464 ///
465 ///
466 /// Clients can read the video frames composited from one or more asset tracks by adding an instance of AVAssetReaderVideoCompositionOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
467 ///
468 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadervideocompositionoutput?language=objc)
469 #[unsafe(super(AVAssetReaderOutput, NSObject))]
470 #[derive(Debug, PartialEq, Eq, Hash)]
471 pub struct AVAssetReaderVideoCompositionOutput;
472);
473
474extern_conformance!(
475 unsafe impl NSObjectProtocol for AVAssetReaderVideoCompositionOutput {}
476);
477
478impl AVAssetReaderVideoCompositionOutput {
479 extern_methods!(
480 #[unsafe(method(init))]
481 #[unsafe(method_family = init)]
482 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
483
484 #[unsafe(method(new))]
485 #[unsafe(method_family = new)]
486 pub unsafe fn new() -> Retained<Self>;
487
488 #[cfg(feature = "AVAssetTrack")]
489 /// Creates an instance of AVAssetReaderVideoCompositionOutput for reading composited video from the specified video tracks and supplying media data according to the specified video settings.
490 ///
491 ///
492 /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the resulting AVAssetReaderVideoCompositionOutput should read video frames for compositing.
493 ///
494 /// Parameter `videoSettings`: An NSDictionary of video settings to be used for video output. See AVVideoSettings.h for more information about how to construct a video settings dictionary.
495 ///
496 /// Returns: An instance of AVAssetReaderVideoCompositionOutput.
497 ///
498 ///
499 /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeVideo.
500 ///
501 /// A value of nil for videoSettings configures the output to return samples in a convenient uncompressed format, with properties determined according to the properties of the specified video tracks. Initialization will fail if the video settings cannot be used with the specified tracks.
502 ///
503 /// AVAssetReaderVideoCompositionOutput can only produce uncompressed output. This means that the video settings dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. In addition, the following keys are not supported:
504 ///
505 /// AVVideoCleanApertureKey
506 /// AVVideoPixelAspectRatioKey
507 /// AVVideoScalingModeKey
508 ///
509 /// # Safety
510 ///
511 /// `video_settings` generic should be of the correct type.
512 #[unsafe(method(assetReaderVideoCompositionOutputWithVideoTracks:videoSettings:))]
513 #[unsafe(method_family = none)]
514 pub unsafe fn assetReaderVideoCompositionOutputWithVideoTracks_videoSettings(
515 video_tracks: &NSArray<AVAssetTrack>,
516 video_settings: Option<&NSDictionary<NSString, AnyObject>>,
517 ) -> Retained<Self>;
518
519 #[cfg(feature = "AVAssetTrack")]
520 /// Creates an instance of AVAssetReaderVideoCompositionOutput for reading composited video from the specified video tracks and supplying media data according to the specified video settings.
521 ///
522 ///
523 /// Parameter `tracks`: An NSArray of AVAssetTrack objects from which the resulting AVAssetReaderVideoCompositionOutput should read video frames for compositing.
524 ///
525 /// Parameter `videoSettings`: An NSDictionary of video settings to be used for video output. See AVVideoSettings.h for more information about how to construct a video settings dictionary.
526 ///
527 /// Returns: An instance of AVAssetReaderVideoCompositionOutput.
528 ///
529 ///
530 /// Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeVideo.
531 ///
532 /// A value of nil for videoSettings configures the output to return samples in a convenient uncompressed format, with properties determined according to the properties of the specified video tracks. Initialization will fail if the video settings cannot be used with the specified tracks.
533 ///
534 /// AVAssetReaderVideoCompositionOutput can only produce uncompressed output. This means that the video settings dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h.
535 ///
536 /// This method throws an exception for any of the following reasons:
537 /// - any video track is not of media type AVMediaTypeVideo
538 /// - any video track is not part of this asset reader output's AVAsset
539 /// - track output settings would cause the output to yield compressed samples
540 /// - video settings does not follow the rules for uncompressed video output (AVVideoSettings.h)
541 /// - video settings contains any of the following keys:
542 /// - AVVideoCleanApertureKey
543 /// - AVVideoPixelAspectRatioKey
544 /// - AVVideoScalingModeKey
545 /// - AVVideoDecompressionPropertiesKey
546 ///
547 /// # Safety
548 ///
549 /// `video_settings` generic should be of the correct type.
550 #[unsafe(method(initWithVideoTracks:videoSettings:))]
551 #[unsafe(method_family = init)]
552 pub unsafe fn initWithVideoTracks_videoSettings(
553 this: Allocated<Self>,
554 video_tracks: &NSArray<AVAssetTrack>,
555 video_settings: Option<&NSDictionary<NSString, AnyObject>>,
556 ) -> Retained<Self>;
557
558 #[cfg(feature = "AVAssetTrack")]
559 /// The tracks from which the receiver reads composited video.
560 ///
561 ///
562 /// The value of this property is an NSArray of AVAssetTracks owned by the target AVAssetReader's asset.
563 #[unsafe(method(videoTracks))]
564 #[unsafe(method_family = none)]
565 pub unsafe fn videoTracks(&self) -> Retained<NSArray<AVAssetTrack>>;
566
567 /// The video settings used by the receiver.
568 ///
569 ///
570 /// The value of this property is an NSDictionary that contains values for keys as specified by AVVideoSettings.h. A value of nil indicates that the receiver will return video frames in a convenient uncompressed format, with properties determined according to the properties of the receiver's video tracks.
571 #[unsafe(method(videoSettings))]
572 #[unsafe(method_family = none)]
573 pub unsafe fn videoSettings(&self) -> Option<Retained<NSDictionary<NSString, AnyObject>>>;
574
575 #[cfg(feature = "AVVideoComposition")]
576 /// The composition of video used by the receiver.
577 ///
578 ///
579 /// The value of this property is an AVVideoComposition that can be used to specify the visual arrangement of video frames read from each source track over the timeline of the source asset.
580 ///
581 /// This property throws an exception if a value is set after reading has started.
582 #[unsafe(method(videoComposition))]
583 #[unsafe(method_family = none)]
584 pub unsafe fn videoComposition(&self) -> Option<Retained<AVVideoComposition>>;
585
586 #[cfg(feature = "AVVideoComposition")]
587 /// Setter for [`videoComposition`][Self::videoComposition].
588 ///
589 /// This is [copied][objc2_foundation::NSCopying::copy] when set.
590 #[unsafe(method(setVideoComposition:))]
591 #[unsafe(method_family = none)]
592 pub unsafe fn setVideoComposition(&self, video_composition: Option<&AVVideoComposition>);
593
594 #[cfg(feature = "AVVideoCompositing")]
595 /// Indicates the custom video compositor instance used by the receiver.
596 ///
597 ///
598 /// This property is nil if there is no video compositor, or if the internal video compositor is in use.
599 #[unsafe(method(customVideoCompositor))]
600 #[unsafe(method_family = none)]
601 pub unsafe fn customVideoCompositor(
602 &self,
603 ) -> Option<Retained<ProtocolObject<dyn AVVideoCompositing>>>;
604 );
605}
606
607extern_class!(
608 /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderoutputmetadataadaptor?language=objc)
609 #[unsafe(super(NSObject))]
610 #[derive(Debug, PartialEq, Eq, Hash)]
611 pub struct AVAssetReaderOutputMetadataAdaptor;
612);
613
614extern_conformance!(
615 unsafe impl NSObjectProtocol for AVAssetReaderOutputMetadataAdaptor {}
616);
617
618impl AVAssetReaderOutputMetadataAdaptor {
619 extern_methods!(
620 #[unsafe(method(init))]
621 #[unsafe(method_family = init)]
622 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
623
624 #[unsafe(method(new))]
625 #[unsafe(method_family = new)]
626 pub unsafe fn new() -> Retained<Self>;
627
628 /// Creates a new timed metadata group adaptor for retrieving timed metadata group objects from an asset reader output.
629 ///
630 ///
631 /// Parameter `assetReaderOutput`: An instance of AVAssetReaderTrackOutput that vends sample buffers containing metadata, e.g. an AVAssetReaderTrackOutput object initialized with a track of media type AVMediaTypeMetadata and nil outputSettings.
632 ///
633 /// Returns: An instance of AVAssetReaderOutputMetadataAdaptor
634 ///
635 ///
636 /// It is an error to create a timed metadata group adaptor with an asset reader output that does not vend metadata. It is also an error to create a timed metadata group adaptor with an asset reader output whose asset reader has already started reading, or an asset reader output that already has been used to initialize another timed metadata group adaptor.
637 ///
638 /// Clients should not mix calls to -[AVAssetReaderTrackOutput copyNextSampleBuffer] and -[AVAssetReaderOutputMetadataAdaptor nextTimedMetadataGroup]. Once an AVAssetReaderTrackOutput instance has been used to initialize an AVAssetReaderOutputMetadataAdaptor, calling -copyNextSampleBuffer on that instance will result in an exception being thrown.
639 #[unsafe(method(assetReaderOutputMetadataAdaptorWithAssetReaderTrackOutput:))]
640 #[unsafe(method_family = none)]
641 pub unsafe fn assetReaderOutputMetadataAdaptorWithAssetReaderTrackOutput(
642 track_output: &AVAssetReaderTrackOutput,
643 ) -> Retained<Self>;
644
645 /// Creates a new timed metadata group adaptor for retrieving timed metadata group objects from an asset reader output.
646 ///
647 ///
648 /// Parameter `assetReaderOutput`: An instance of AVAssetReaderTrackOutput that vends sample buffers containing metadata, e.g. an AVAssetReaderTrackOutput object initialized with a track of media type AVMediaTypeMetadata and nil outputSettings.
649 ///
650 /// Returns: An instance of AVAssetReaderOutputMetadataAdaptor
651 ///
652 ///
653 /// It is an error to create a timed metadata group adaptor with an asset reader output that does not vend metadata. It is also an error to create a timed metadata group adaptor with an asset reader output whose asset reader has already started reading, or an asset reader output that already has been used to initialize another timed metadata group adaptor.
654 ///
655 /// Clients should not mix calls to -[AVAssetReaderTrackOutput copyNextSampleBuffer] and -[AVAssetReaderOutputMetadataAdaptor nextTimedMetadataGroup]. Once an AVAssetReaderTrackOutput instance has been used to initialize an AVAssetReaderOutputMetadataAdaptor, calling -copyNextSampleBuffer on that instance will result in an exception being thrown.
656 ///
657 /// This method throws an exception if the track's output was used to initialize another adaptor or if the track output's asset reader has already started reading.
658 #[unsafe(method(initWithAssetReaderTrackOutput:))]
659 #[unsafe(method_family = init)]
660 pub unsafe fn initWithAssetReaderTrackOutput(
661 this: Allocated<Self>,
662 track_output: &AVAssetReaderTrackOutput,
663 ) -> Retained<Self>;
664
665 /// The asset reader track output from which the receiver pulls timed metadata groups.
666 #[unsafe(method(assetReaderTrackOutput))]
667 #[unsafe(method_family = none)]
668 pub unsafe fn assetReaderTrackOutput(&self) -> Retained<AVAssetReaderTrackOutput>;
669
670 #[cfg(feature = "AVTimedMetadataGroup")]
671 /// Returns the next timed metadata group for the asset reader output, synchronously.
672 ///
673 ///
674 /// Returns: An instance of AVTimedMetadataGroup, representing the next logical segment of metadata coming from the source asset reader output.
675 ///
676 ///
677 /// This method will return nil when all timed metadata groups have been read from the asset reader output, or if there is an error that prevents the timed metadata group adaptor from reading more timed metadata groups. When this method returns nil, clients should check the value of the associated AVAssetReader's status property to determine why no more samples could be read.
678 ///
679 /// Unlike -[AVAssetReaderTrackOutput copyNextSampleBuffer], this method returns an autoreleased object.
680 ///
681 /// Before calling this method, you must ensure that the output which underlies the receiver is attached to an AVAssetReader via a prior call to -addOutput: and that -startReading has been called on the asset reader.
682 ///
683 /// This method throws an exception if track output is not attached to an asset reader and reading has not yet begun.
684 #[unsafe(method(nextTimedMetadataGroup))]
685 #[unsafe(method_family = none)]
686 pub unsafe fn nextTimedMetadataGroup(&self) -> Option<Retained<AVTimedMetadataGroup>>;
687 );
688}
689
690extern_class!(
691 /// [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreaderoutputcaptionadaptor?language=objc)
692 #[unsafe(super(NSObject))]
693 #[derive(Debug, PartialEq, Eq, Hash)]
694 pub struct AVAssetReaderOutputCaptionAdaptor;
695);
696
697extern_conformance!(
698 unsafe impl NSObjectProtocol for AVAssetReaderOutputCaptionAdaptor {}
699);
700
701impl AVAssetReaderOutputCaptionAdaptor {
702 extern_methods!(
703 #[unsafe(method(init))]
704 #[unsafe(method_family = init)]
705 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
706
707 #[unsafe(method(new))]
708 #[unsafe(method_family = new)]
709 pub unsafe fn new() -> Retained<Self>;
710
711 /// Creates a new caption adaptor for reading from the given track output.
712 ///
713 /// Parameter `trackOutput`: The track output from which to read captions.
714 ///
715 /// Returns: A new instance of AVAssetReaderOutputCaptionAdaptor, configured to read captions from the given AVAssetReaderTrackOutput.
716 ///
717 /// It is an error to pass nil to this method.
718 #[unsafe(method(assetReaderOutputCaptionAdaptorWithAssetReaderTrackOutput:))]
719 #[unsafe(method_family = none)]
720 pub unsafe fn assetReaderOutputCaptionAdaptorWithAssetReaderTrackOutput(
721 track_output: &AVAssetReaderTrackOutput,
722 ) -> Retained<Self>;
723
724 /// Creates a new caption adaptor for reading from the given track output.
725 ///
726 /// Parameter `trackOutput`: The track output from which to read captions.
727 ///
728 /// Returns: A new instance of AVAssetReaderOutputCaptionAdaptor, configured to read captions from the given AVAssetReaderTrackOutput.
729 ///
730 /// It is an error to pass nil to this method.
731 #[unsafe(method(initWithAssetReaderTrackOutput:))]
732 #[unsafe(method_family = init)]
733 pub unsafe fn initWithAssetReaderTrackOutput(
734 this: Allocated<Self>,
735 track_output: &AVAssetReaderTrackOutput,
736 ) -> Retained<Self>;
737
738 /// The track output used to create the receiver.
739 #[unsafe(method(assetReaderTrackOutput))]
740 #[unsafe(method_family = none)]
741 pub unsafe fn assetReaderTrackOutput(&self) -> Retained<AVAssetReaderTrackOutput>;
742
743 #[cfg(feature = "AVCaptionGroup")]
744 /// Returns the next caption.
745 ///
746 /// Returns: An instance of AVCaption representing the next caption.
747 ///
748 /// The method returns the next caption group.
749 ///
750 /// This method throws an exception if the track output is not attached to an asset reader and reading has not yet begun.
751 #[unsafe(method(nextCaptionGroup))]
752 #[unsafe(method_family = none)]
753 pub unsafe fn nextCaptionGroup(&self) -> Option<Retained<AVCaptionGroup>>;
754
755 #[cfg(all(feature = "AVCaption", feature = "AVCaptionGroup"))]
756 /// Returns the set of captions that are present in the given group but were not present in any group previously vended by calls to -nextCaptionGroup: on the receiver.
757 ///
758 /// Parameter `captionGroup`: The group containing the captions of interest.
759 ///
760 /// Returns: An array of AVCaption objects.
761 ///
762 /// The returned array contains the set of captions in the given group whose time ranges have the same start time as the group. This method is provided as a convenience for clients who want to process captions one-by-one and do not need a complete view of the set of captions active at a given time.
763 #[unsafe(method(captionsNotPresentInPreviousGroupsInCaptionGroup:))]
764 #[unsafe(method_family = none)]
765 pub unsafe fn captionsNotPresentInPreviousGroupsInCaptionGroup(
766 &self,
767 caption_group: &AVCaptionGroup,
768 ) -> Retained<NSArray<AVCaption>>;
769 );
770}
771
772/// AVAssetReaderCaptionValidation.
773///
774/// Category of AVAssetReaderOutputCaptionAdaptor for caption validation handling
775impl AVAssetReaderOutputCaptionAdaptor {
776 extern_methods!(
777 /// Register caption validation handling callback protocol to the caption adaptor.
778 #[unsafe(method(validationDelegate))]
779 #[unsafe(method_family = none)]
780 pub unsafe fn validationDelegate(
781 &self,
782 ) -> Option<Retained<ProtocolObject<dyn AVAssetReaderCaptionValidationHandling>>>;
783
784 /// Setter for [`validationDelegate`][Self::validationDelegate].
785 ///
786 /// This is a [weak property][objc2::topics::weak_property].
787 #[unsafe(method(setValidationDelegate:))]
788 #[unsafe(method_family = none)]
789 pub unsafe fn setValidationDelegate(
790 &self,
791 validation_delegate: Option<
792 &ProtocolObject<dyn AVAssetReaderCaptionValidationHandling>,
793 >,
794 );
795 );
796}
797
798extern_protocol!(
799 /// A protocol to receive caption validation notifications
800 ///
801 /// A client can implement the protocol on its own class which processes the caption validation calls.
802 ///
803 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadercaptionvalidationhandling?language=objc)
804 pub unsafe trait AVAssetReaderCaptionValidationHandling: NSObjectProtocol {
805 #[cfg(feature = "AVCaption")]
806 /// Called when one or more syntax elements were ignored in the process of creating the caption object.
807 ///
808 ///
809 /// Parameter `adaptor`: The caption adaptor object
810 ///
811 /// Parameter `caption`: The caption object. The parser skipped unsupported syntax elements when creating this object.
812 ///
813 /// Parameter `syntaxElements`: Array of NSString to represent the skipped syntax.
814 ///
815 ///
816 /// While the reported string content is human readable, it is highly technical and probably meaningful only to clients who are familiar with the source caption format. It is primarily designed for logging purposes and would not be suitable for UI purposes.
817 #[optional]
818 #[unsafe(method(captionAdaptor:didVendCaption:skippingUnsupportedSourceSyntaxElements:))]
819 #[unsafe(method_family = none)]
820 unsafe fn captionAdaptor_didVendCaption_skippingUnsupportedSourceSyntaxElements(
821 &self,
822 adaptor: &AVAssetReaderOutputCaptionAdaptor,
823 caption: &AVCaption,
824 syntax_elements: &NSArray<NSString>,
825 );
826 }
827);
828
829extern_class!(
830 /// AVAssetReaderSampleReferenceOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading sample references from a single AVAssetTrack of an AVAssetReader's AVAsset.
831 ///
832 /// Clients can extract information about the location (file URL and offset) of samples in a track by adding an instance of AVAssetReaderSampleReferenceOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method. No actual sample data can be extracted using this class. The location of the sample data is described by the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachments on the extracted sample buffers. More information about sample buffers describing sample references can be found in the CMSampleBuffer documentation.
833 ///
834 /// Sample buffers extracted using this class can also be appended to an AVAssetWriterInput to create movie tracks that are not self-contained and reference data in the original file instead. Currently, only instances of AVAssetWriter configured to write files of type AVFileTypeQuickTimeMovie can be used to write tracks that are not self-contained.
835 ///
836 /// Since no sample data is ever returned by instances of AVAssetReaderSampleReferenceOutput, the value of the alwaysCopiesSampleData property is ignored.
837 ///
838 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avassetreadersamplereferenceoutput?language=objc)
839 #[unsafe(super(AVAssetReaderOutput, NSObject))]
840 #[derive(Debug, PartialEq, Eq, Hash)]
841 pub struct AVAssetReaderSampleReferenceOutput;
842);
843
844extern_conformance!(
845 unsafe impl NSObjectProtocol for AVAssetReaderSampleReferenceOutput {}
846);
847
848impl AVAssetReaderSampleReferenceOutput {
849 extern_methods!(
850 #[unsafe(method(init))]
851 #[unsafe(method_family = init)]
852 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
853
854 #[unsafe(method(new))]
855 #[unsafe(method_family = new)]
856 pub unsafe fn new() -> Retained<Self>;
857
858 #[cfg(feature = "AVAssetTrack")]
859 /// Returns an instance of AVAssetReaderSampleReferenceOutput for supplying sample references.
860 ///
861 ///
862 /// Parameter `track`: The AVAssetTrack for which the resulting AVAssetReaderSampleReferenceOutput should provide sample references.
863 ///
864 /// Returns: An instance of AVAssetReaderSampleReferenceOutput.
865 ///
866 ///
867 /// The track must be one of the tracks contained by the target AVAssetReader's asset.
868 #[unsafe(method(assetReaderSampleReferenceOutputWithTrack:))]
869 #[unsafe(method_family = none)]
870 pub unsafe fn assetReaderSampleReferenceOutputWithTrack(
871 track: &AVAssetTrack,
872 ) -> Retained<Self>;
873
874 #[cfg(feature = "AVAssetTrack")]
875 /// Returns an instance of AVAssetReaderSampleReferenceOutput for supplying sample references.
876 ///
877 ///
878 /// Parameter `track`: The AVAssetTrack for which the resulting AVAssetReaderSampleReferenceOutput should provide sample references.
879 ///
880 /// Returns: An instance of AVAssetReaderTrackOutput.
881 ///
882 ///
883 /// The track must be one of the tracks contained by the target AVAssetReader's asset.
884 #[unsafe(method(initWithTrack:))]
885 #[unsafe(method_family = init)]
886 pub unsafe fn initWithTrack(this: Allocated<Self>, track: &AVAssetTrack) -> Retained<Self>;
887
888 #[cfg(feature = "AVAssetTrack")]
889 /// The track from which the receiver extracts sample references.
890 ///
891 ///
892 /// The value of this property is an AVAssetTrack owned by the target AVAssetReader's asset.
893 #[unsafe(method(track))]
894 #[unsafe(method_family = none)]
895 pub unsafe fn track(&self) -> Retained<AVAssetTrack>;
896 );
897}