objc2_av_foundation/generated/AVRenderedCaptionImage.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ptr::NonNull;
4use objc2::__framework_prelude::*;
5#[cfg(feature = "objc2-core-foundation")]
6use objc2_core_foundation::*;
7#[cfg(feature = "objc2-core-video")]
8use objc2_core_video::*;
9
10use crate::*;
11
12extern_class!(
13 /// AVRenderedCaptionImage is a wrapper class vended out to the client for reading a rendered caption image (CVPixelBuffer) and its associated position (in pixels). The position is relative to the videoDisplaySize (in pixels) provided by the client during the initialization of AVPlayerItemRenderedLegibleOutput, and accordinging to the upper-left-origin coordinate system (ULO). The CVPixelBuffer will be backed by an IOSurface enabling it to be converted to MTLTexture using CVMetalTextureCache.
14 ///
15 ///
16 /// Display scale is a property of the screen on which the client UI elements are displayed. This value defines the mapping between the logical coordinate space (measured in points) and the physical coordinate space (measured in pixels). Higher scale factors indicate that each point is represented by more than one pixel at render time. For example, if the display scale factor is 2.0 and the bounds of caption rectangle are 50 x 50 points, the size of the CVPixelBufferRef for the caption is 100 x 100 pixels. The client shall provide videoDisplaySize value in pixels only and the position value of the caption image shall also be returned in pixels only.
17 ///
18 /// Subclasses of this type that are used from Swift must fulfill the requirements of a Sendable type.
19 ///
20 /// See also [Apple's documentation](https://developer.apple.com/documentation/avfoundation/avrenderedcaptionimage?language=objc)
21 #[unsafe(super(NSObject))]
22 #[derive(Debug, PartialEq, Eq, Hash)]
23 pub struct AVRenderedCaptionImage;
24);
25
26extern_conformance!(
27 unsafe impl NSObjectProtocol for AVRenderedCaptionImage {}
28);
29
30impl AVRenderedCaptionImage {
31 extern_methods!(
32 #[unsafe(method(init))]
33 #[unsafe(method_family = init)]
34 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
35
36 #[unsafe(method(new))]
37 #[unsafe(method_family = new)]
38 pub unsafe fn new() -> Retained<Self>;
39
40 #[cfg(feature = "objc2-core-video")]
41 /// A CVPixelBufferRef that contains pixel data for the rendered caption.
42 ///
43 /// If the client reads a pixelBuffer and wants to use it longer than AVRenderedCaptionImage, it must retain the pixelBuffer. The pixel buffer can be converted to MTLTexture using CVMetalTextureCache. The pixel format is fixed to kCVPixelFormatType_32BGRA defined in
44 /// <CoreVideo
45 /// /CVPixelBuffer.h>.
46 #[unsafe(method(pixelBuffer))]
47 #[unsafe(method_family = none)]
48 pub unsafe fn pixelBuffer(&self) -> Retained<CVPixelBuffer>;
49
50 #[cfg(feature = "objc2-core-foundation")]
51 /// A CGPoint that defines the position (in pixels) of the rendered caption image relative to the video frame
52 ///
53 /// To place the caption image correcly, the size of pixel buffer can be extracted from CVPixelBufferGetWidth and CVPixelBufferGetHeight. Origin is assumed at upper-left. So, a caption image is rendered to the right and bottom of the origin point.
54 #[unsafe(method(position))]
55 #[unsafe(method_family = none)]
56 pub unsafe fn position(&self) -> CGPoint;
57 );
58}