use super::metal_atlas::MetalAtlas;
use crate::{
AtlasTextureId, BackdropBlur, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite,
PaintSurface, Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow,
Size, Surface, TransformationMatrix, Underline, point, size,
};
use anyhow::Result;
use block::ConcreteBlock;
use cocoa::{
base::{NO, YES},
foundation::{NSSize, NSUInteger},
quartzcore::AutoresizingMask,
};
use core_foundation::base::TCFType;
use core_video::{
metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
};
use foreign_types::{ForeignType, ForeignTypeRef};
use metal::{
CAMetalLayer, CommandQueue, MTLOrigin, MTLPixelFormat, MTLResourceOptions, MTLSize, NSRange,
RenderPassColorAttachmentDescriptorRef,
};
use objc::{self, msg_send, sel, sel_impl};
use parking_lot::Mutex;
use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
pub(crate) type PointF = crate::Point<f32>;
#[cfg(not(feature = "runtime_shaders"))]
const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
#[cfg(feature = "runtime_shaders")]
const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
const PATH_SAMPLE_COUNT: u32 = 4;
pub type Context = Arc<Mutex<InstanceBufferPool>>;
pub type Renderer = MetalRenderer;
pub unsafe fn new_renderer(
context: self::Context,
_native_window: *mut c_void,
_native_view: *mut c_void,
_bounds: crate::Size<f32>,
transparent: bool,
) -> Renderer {
MetalRenderer::new(context, transparent)
}
pub(crate) struct InstanceBufferPool {
buffer_size: usize,
buffers: Vec<metal::Buffer>,
}
impl Default for InstanceBufferPool {
fn default() -> Self {
Self {
buffer_size: 2 * 1024 * 1024,
buffers: Vec::new(),
}
}
}
pub(crate) struct InstanceBuffer {
metal_buffer: metal::Buffer,
size: usize,
}
impl InstanceBufferPool {
pub(crate) fn reset(&mut self, buffer_size: usize) {
self.buffer_size = buffer_size;
self.buffers.clear();
}
pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
let buffer = self.buffers.pop().unwrap_or_else(|| {
device.new_buffer(
self.buffer_size as u64,
MTLResourceOptions::StorageModeManaged,
)
});
InstanceBuffer {
metal_buffer: buffer,
size: self.buffer_size,
}
}
pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
if buffer.size == self.buffer_size {
self.buffers.push(buffer.metal_buffer)
}
}
}
pub(crate) struct MetalRenderer {
device: metal::Device,
layer: metal::MetalLayer,
presents_with_transaction: bool,
command_queue: CommandQueue,
paths_rasterization_pipeline_state: metal::RenderPipelineState,
path_sprites_pipeline_state: metal::RenderPipelineState,
shadows_pipeline_state: metal::RenderPipelineState,
quads_pipeline_state: metal::RenderPipelineState,
backdrop_blurs_pipeline_state: metal::RenderPipelineState,
underlines_pipeline_state: metal::RenderPipelineState,
monochrome_sprites_pipeline_state: metal::RenderPipelineState,
polychrome_sprites_pipeline_state: metal::RenderPipelineState,
surfaces_pipeline_state: metal::RenderPipelineState,
unit_vertices: metal::Buffer,
#[allow(clippy::arc_with_non_send_sync)]
instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
sprite_atlas: Arc<MetalAtlas>,
core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
path_intermediate_texture: Option<metal::Texture>,
path_intermediate_msaa_texture: Option<metal::Texture>,
backdrop_texture: Option<metal::Texture>,
path_sample_count: u32,
}
#[repr(C)]
pub struct PathRasterizationVertex {
pub xy_position: Point<ScaledPixels>,
pub st_position: Point<f32>,
pub color: Background,
pub bounds: Bounds<ScaledPixels>,
}
impl MetalRenderer {
pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>, transparent: bool) -> Self {
let device = if let Some(d) = metal::Device::all()
.into_iter()
.min_by_key(|d| (d.is_removable(), !d.is_low_power()))
{
d
} else {
log::error!(
"Unable to enumerate Metal devices; attempting to use system default device"
);
metal::Device::system_default().unwrap_or_else(|| {
log::error!("unable to access a compatible graphics device");
std::process::exit(1);
})
};
let layer = metal::MetalLayer::new();
layer.set_device(&device);
layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
layer.set_opaque(!transparent);
layer.set_maximum_drawable_count(3);
unsafe {
let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
let _: () = msg_send![
&*layer,
setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
| AutoresizingMask::HEIGHT_SIZABLE
];
}
#[cfg(feature = "runtime_shaders")]
let library = device
.new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
.expect("error building metal library");
#[cfg(not(feature = "runtime_shaders"))]
let library = device
.new_library_with_data(SHADERS_METALLIB)
.expect("error building metal library");
fn to_float2_bits(point: PointF) -> u64 {
let mut output = point.y.to_bits() as u64;
output <<= 32;
output |= point.x.to_bits() as u64;
output
}
let unit_vertices = [
to_float2_bits(point(0., 0.)),
to_float2_bits(point(1., 0.)),
to_float2_bits(point(0., 1.)),
to_float2_bits(point(0., 1.)),
to_float2_bits(point(1., 0.)),
to_float2_bits(point(1., 1.)),
];
let unit_vertices = device.new_buffer_with_data(
unit_vertices.as_ptr() as *const c_void,
mem::size_of_val(&unit_vertices) as u64,
MTLResourceOptions::StorageModeManaged,
);
let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
&device,
&library,
"paths_rasterization",
"path_rasterization_vertex",
"path_rasterization_fragment",
MTLPixelFormat::BGRA8Unorm,
PATH_SAMPLE_COUNT,
);
let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
&device,
&library,
"path_sprites",
"path_sprite_vertex",
"path_sprite_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let shadows_pipeline_state = build_pipeline_state(
&device,
&library,
"shadows",
"shadow_vertex",
"shadow_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let quads_pipeline_state = build_pipeline_state(
&device,
&library,
"quads",
"quad_vertex",
"quad_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let backdrop_blurs_pipeline_state = build_pipeline_state(
&device,
&library,
"backdrop_blurs",
"backdrop_blur_vertex",
"backdrop_blur_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let underlines_pipeline_state = build_pipeline_state(
&device,
&library,
"underlines",
"underline_vertex",
"underline_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let monochrome_sprites_pipeline_state = build_pipeline_state(
&device,
&library,
"monochrome_sprites",
"monochrome_sprite_vertex",
"monochrome_sprite_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let polychrome_sprites_pipeline_state = build_pipeline_state(
&device,
&library,
"polychrome_sprites",
"polychrome_sprite_vertex",
"polychrome_sprite_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let surfaces_pipeline_state = build_pipeline_state(
&device,
&library,
"surfaces",
"surface_vertex",
"surface_fragment",
MTLPixelFormat::BGRA8Unorm,
);
let command_queue = device.new_command_queue();
let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
let core_video_texture_cache =
CVMetalTextureCache::new(None, device.clone(), None).unwrap();
Self {
device,
layer,
presents_with_transaction: false,
command_queue,
paths_rasterization_pipeline_state,
path_sprites_pipeline_state,
shadows_pipeline_state,
quads_pipeline_state,
backdrop_blurs_pipeline_state,
underlines_pipeline_state,
monochrome_sprites_pipeline_state,
polychrome_sprites_pipeline_state,
surfaces_pipeline_state,
unit_vertices,
instance_buffer_pool,
sprite_atlas,
core_video_texture_cache,
path_intermediate_texture: None,
path_intermediate_msaa_texture: None,
backdrop_texture: None,
path_sample_count: PATH_SAMPLE_COUNT,
}
}
pub fn layer(&self) -> &metal::MetalLayerRef {
&self.layer
}
pub fn layer_ptr(&self) -> *mut CAMetalLayer {
self.layer.as_ptr()
}
pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
&self.sprite_atlas
}
pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
self.presents_with_transaction = presents_with_transaction;
self.layer
.set_presents_with_transaction(presents_with_transaction);
}
pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
let size = NSSize {
width: size.width.0 as f64,
height: size.height.0 as f64,
};
unsafe {
let _: () = msg_send![
self.layer(),
setDrawableSize: size
];
}
let device_pixels_size = Size {
width: DevicePixels(size.width as i32),
height: DevicePixels(size.height as i32),
};
self.update_path_intermediate_textures(device_pixels_size);
self.update_backdrop_texture(device_pixels_size);
}
fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
if size.width.0 <= 0 || size.height.0 <= 0 {
self.path_intermediate_texture = None;
self.path_intermediate_msaa_texture = None;
return;
}
let texture_descriptor = metal::TextureDescriptor::new();
texture_descriptor.set_width(size.width.0 as u64);
texture_descriptor.set_height(size.height.0 as u64);
texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
texture_descriptor
.set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
if self.path_sample_count > 1 {
let mut msaa_descriptor = texture_descriptor;
msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
msaa_descriptor.set_sample_count(self.path_sample_count as _);
self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
} else {
self.path_intermediate_msaa_texture = None;
}
}
fn update_backdrop_texture(&mut self, size: Size<DevicePixels>) {
if size.width.0 <= 0 || size.height.0 <= 0 {
self.backdrop_texture = None;
return;
}
let texture_descriptor = metal::TextureDescriptor::new();
texture_descriptor.set_width(size.width.0 as u64);
texture_descriptor.set_height(size.height.0 as u64);
texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
texture_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
texture_descriptor
.set_usage(metal::MTLTextureUsage::ShaderRead | metal::MTLTextureUsage::RenderTarget);
self.backdrop_texture = Some(self.device.new_texture(&texture_descriptor));
}
pub fn update_transparency(&self, transparent: bool) {
self.layer.set_opaque(!transparent);
}
pub fn destroy(&self) {
}
pub fn draw(&mut self, scene: &Scene) {
let layer = self.layer.clone();
let viewport_size = layer.drawable_size();
let viewport_size: Size<DevicePixels> = size(
(viewport_size.width.ceil() as i32).into(),
(viewport_size.height.ceil() as i32).into(),
);
let needs_backdrop_texture = match &self.backdrop_texture {
Some(t) => {
t.width() != viewport_size.width.0 as u64
|| t.height() != viewport_size.height.0 as u64
}
None => true,
};
if needs_backdrop_texture {
self.update_backdrop_texture(viewport_size);
}
let drawable = if let Some(drawable) = layer.next_drawable() {
drawable
} else {
log::error!(
"failed to retrieve next drawable, drawable size: {:?}",
viewport_size
);
return;
};
loop {
let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
let command_buffer =
self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
match command_buffer {
Ok(command_buffer) => {
let instance_buffer_pool = self.instance_buffer_pool.clone();
let instance_buffer = Cell::new(Some(instance_buffer));
let block = ConcreteBlock::new(move |_| {
if let Some(instance_buffer) = instance_buffer.take() {
instance_buffer_pool.lock().release(instance_buffer);
}
});
let block = block.copy();
command_buffer.add_completed_handler(&block);
if self.presents_with_transaction {
command_buffer.commit();
command_buffer.wait_until_scheduled();
drawable.present();
} else {
command_buffer.present_drawable(drawable);
command_buffer.commit();
}
return;
}
Err(err) => {
log::error!(
"failed to render: {}. retrying with larger instance buffer size",
err
);
let mut instance_buffer_pool = self.instance_buffer_pool.lock();
let buffer_size = instance_buffer_pool.buffer_size;
if buffer_size >= 256 * 1024 * 1024 {
log::error!("instance buffer size grew too large: {}", buffer_size);
break;
}
instance_buffer_pool.reset(buffer_size * 2);
log::info!(
"increased instance buffer size to {}",
instance_buffer_pool.buffer_size
);
}
}
}
}
fn copy_drawable_to_backdrop(
&self,
drawable: &metal::MetalDrawableRef,
viewport_size: Size<DevicePixels>,
command_buffer: &metal::CommandBufferRef,
) -> bool {
let Some(backdrop_texture) = &self.backdrop_texture else {
return false;
};
if viewport_size.width.0 <= 0 || viewport_size.height.0 <= 0 {
return false;
}
let blit = command_buffer.new_blit_command_encoder();
let origin = MTLOrigin { x: 0, y: 0, z: 0 };
let size = MTLSize {
width: viewport_size.width.0 as u64,
height: viewport_size.height.0 as u64,
depth: 1,
};
blit.copy_from_texture(
drawable.texture(),
0,
0,
origin,
size,
backdrop_texture,
0,
0,
origin,
);
blit.end_encoding();
true
}
fn draw_primitives(
&mut self,
scene: &Scene,
instance_buffer: &mut InstanceBuffer,
drawable: &metal::MetalDrawableRef,
viewport_size: Size<DevicePixels>,
) -> Result<metal::CommandBuffer> {
let command_queue = self.command_queue.clone();
let command_buffer = command_queue.new_command_buffer();
let alpha = if self.layer.is_opaque() { 1. } else { 0. };
let mut instance_offset = 0;
let mut command_encoder = new_command_encoder(
command_buffer,
drawable,
viewport_size,
|color_attachment| {
color_attachment.set_load_action(metal::MTLLoadAction::Clear);
color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
},
);
for batch in scene.batches() {
let ok = match batch {
PrimitiveBatch::Shadows(shadows, transforms) => self.draw_shadows(
shadows,
transforms,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
),
PrimitiveBatch::Quads(quads, transforms) => self.draw_quads(
quads,
transforms,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
),
PrimitiveBatch::BackdropBlurs(blurs, transforms) => {
command_encoder.end_encoding();
let did_copy =
self.copy_drawable_to_backdrop(drawable, viewport_size, command_buffer);
command_encoder = new_command_encoder(
command_buffer,
drawable,
viewport_size,
|color_attachment| {
color_attachment.set_load_action(metal::MTLLoadAction::Load);
},
);
if did_copy {
self.draw_backdrop_blurs(
blurs,
transforms,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
)
} else {
false
}
}
PrimitiveBatch::Paths(paths) => {
command_encoder.end_encoding();
let did_draw = self.draw_paths_to_intermediate(
paths,
instance_buffer,
&mut instance_offset,
viewport_size,
command_buffer,
);
command_encoder = new_command_encoder(
command_buffer,
drawable,
viewport_size,
|color_attachment| {
color_attachment.set_load_action(metal::MTLLoadAction::Load);
},
);
if did_draw {
self.draw_paths_from_intermediate(
paths,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
)
} else {
false
}
}
PrimitiveBatch::Underlines(underlines, transforms) => self.draw_underlines(
underlines,
transforms,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
),
PrimitiveBatch::MonochromeSprites {
texture_id,
sprites,
} => self.draw_monochrome_sprites(
texture_id,
sprites,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
),
PrimitiveBatch::PolychromeSprites {
texture_id,
sprites,
transforms,
} => self.draw_polychrome_sprites(
texture_id,
sprites,
transforms,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
),
PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
surfaces,
instance_buffer,
&mut instance_offset,
viewport_size,
command_encoder,
),
};
if !ok {
command_encoder.end_encoding();
anyhow::bail!(
"scene too large: {} paths, {} shadows, {} quads, {} blurs, {} underlines, {} mono, {} poly, {} surfaces",
scene.paths.len(),
scene.shadows.len(),
scene.quads.len(),
scene.backdrop_blurs.len(),
scene.underlines.len(),
scene.monochrome_sprites.len(),
scene.polychrome_sprites.len(),
scene.surfaces.len(),
);
}
}
command_encoder.end_encoding();
instance_buffer.metal_buffer.did_modify_range(NSRange {
location: 0,
length: instance_offset as NSUInteger,
});
Ok(command_buffer.to_owned())
}
fn draw_paths_to_intermediate(
&self,
paths: &[Path<ScaledPixels>],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_buffer: &metal::CommandBufferRef,
) -> bool {
if paths.is_empty() {
return true;
}
let Some(intermediate_texture) = &self.path_intermediate_texture else {
return false;
};
let render_pass_descriptor = metal::RenderPassDescriptor::new();
let color_attachment = render_pass_descriptor
.color_attachments()
.object_at(0)
.unwrap();
color_attachment.set_load_action(metal::MTLLoadAction::Clear);
color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
color_attachment.set_texture(Some(msaa_texture));
color_attachment.set_resolve_texture(Some(intermediate_texture));
color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
} else {
color_attachment.set_texture(Some(intermediate_texture));
color_attachment.set_store_action(metal::MTLStoreAction::Store);
}
let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
align_offset(instance_offset);
let mut vertices = Vec::new();
for path in paths {
vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
xy_position: v.xy_position,
st_position: v.st_position,
color: path.color,
bounds: path.bounds.intersect(&path.content_mask.bounds),
}));
}
let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
let next_offset = *instance_offset + vertices_bytes_len;
if next_offset > instance_buffer.size {
command_encoder.end_encoding();
return false;
}
command_encoder.set_vertex_buffer(
PathRasterizationInputIndex::Vertices as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
command_encoder.set_vertex_bytes(
PathRasterizationInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_buffer(
PathRasterizationInputIndex::Vertices as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
let buffer_contents =
unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
unsafe {
ptr::copy_nonoverlapping(
vertices.as_ptr() as *const u8,
buffer_contents,
vertices_bytes_len,
);
}
command_encoder.draw_primitives(
metal::MTLPrimitiveType::Triangle,
0,
vertices.len() as u64,
);
*instance_offset = next_offset;
command_encoder.end_encoding();
true
}
fn draw_shadows(
&self,
shadows: &[Shadow],
shadow_transforms: &[TransformationMatrix],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
if shadows.is_empty() {
return true;
}
debug_assert_eq!(shadows.len(), shadow_transforms.len());
align_offset(instance_offset);
command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
command_encoder.set_vertex_buffer(
ShadowInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
let shadows_offset = *instance_offset;
command_encoder.set_vertex_bytes(
ShadowInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
let shadow_bytes_len = mem::size_of_val(shadows);
let mut transforms_offset = shadows_offset + shadow_bytes_len;
align_offset(&mut transforms_offset);
let transform_bytes_len = mem::size_of_val(shadow_transforms);
let next_offset = transforms_offset + transform_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
ShadowInputIndex::Shadows as u64,
Some(&instance_buffer.metal_buffer),
shadows_offset as u64,
);
command_encoder.set_fragment_buffer(
ShadowInputIndex::Shadows as u64,
Some(&instance_buffer.metal_buffer),
shadows_offset as u64,
);
command_encoder.set_vertex_buffer(
ShadowInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
command_encoder.set_fragment_buffer(
ShadowInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
let shadow_contents = unsafe {
(instance_buffer.metal_buffer.contents() as *mut u8).add(shadows_offset)
};
let transform_contents = unsafe {
(instance_buffer.metal_buffer.contents() as *mut u8).add(transforms_offset)
};
unsafe {
ptr::copy_nonoverlapping(
shadows.as_ptr() as *const u8,
shadow_contents,
shadow_bytes_len,
);
ptr::copy_nonoverlapping(
shadow_transforms.as_ptr() as *const u8,
transform_contents,
transform_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
shadows.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_quads(
&self,
quads: &[Quad],
quad_transforms: &[TransformationMatrix],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
if quads.is_empty() {
return true;
}
debug_assert_eq!(quads.len(), quad_transforms.len());
align_offset(instance_offset);
command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
command_encoder.set_vertex_buffer(
QuadInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
let quads_offset = *instance_offset;
command_encoder.set_vertex_bytes(
QuadInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
let quad_bytes_len = mem::size_of_val(quads);
let mut transforms_offset = quads_offset + quad_bytes_len;
align_offset(&mut transforms_offset);
let transform_bytes_len = mem::size_of_val(quad_transforms);
let next_offset = transforms_offset + transform_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
QuadInputIndex::Quads as u64,
Some(&instance_buffer.metal_buffer),
quads_offset as u64,
);
command_encoder.set_fragment_buffer(
QuadInputIndex::Quads as u64,
Some(&instance_buffer.metal_buffer),
quads_offset as u64,
);
command_encoder.set_vertex_buffer(
QuadInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
command_encoder.set_fragment_buffer(
QuadInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
unsafe {
let quad_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(quads_offset);
ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, quad_contents, quad_bytes_len);
let transform_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(transforms_offset);
ptr::copy_nonoverlapping(
quad_transforms.as_ptr() as *const u8,
transform_contents,
transform_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
quads.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_backdrop_blurs(
&self,
blurs: &[BackdropBlur],
blur_transforms: &[TransformationMatrix],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
if blurs.is_empty() {
return true;
}
debug_assert_eq!(blurs.len(), blur_transforms.len());
align_offset(instance_offset);
let Some(backdrop_texture) = &self.backdrop_texture else {
return false;
};
command_encoder.set_render_pipeline_state(&self.backdrop_blurs_pipeline_state);
command_encoder.set_vertex_buffer(
BackdropBlurInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
let blurs_offset = *instance_offset;
command_encoder.set_vertex_bytes(
BackdropBlurInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_bytes(
BackdropBlurInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_texture(
BackdropBlurInputIndex::BackdropTexture as u64,
Some(backdrop_texture),
);
let blur_bytes_len = mem::size_of_val(blurs);
let mut transforms_offset = blurs_offset + blur_bytes_len;
align_offset(&mut transforms_offset);
let transform_bytes_len = mem::size_of_val(blur_transforms);
let next_offset = transforms_offset + transform_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
BackdropBlurInputIndex::BackdropBlurs as u64,
Some(&instance_buffer.metal_buffer),
blurs_offset as u64,
);
command_encoder.set_fragment_buffer(
BackdropBlurInputIndex::BackdropBlurs as u64,
Some(&instance_buffer.metal_buffer),
blurs_offset as u64,
);
command_encoder.set_vertex_buffer(
BackdropBlurInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
command_encoder.set_fragment_buffer(
BackdropBlurInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
unsafe {
let blur_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(blurs_offset);
ptr::copy_nonoverlapping(blurs.as_ptr() as *const u8, blur_contents, blur_bytes_len);
let transform_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(transforms_offset);
ptr::copy_nonoverlapping(
blur_transforms.as_ptr() as *const u8,
transform_contents,
transform_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
blurs.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_paths_from_intermediate(
&self,
paths: &[Path<ScaledPixels>],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
let Some(first_path) = paths.first() else {
return true;
};
let Some(ref intermediate_texture) = self.path_intermediate_texture else {
return false;
};
command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
command_encoder.set_vertex_buffer(
SpriteInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
command_encoder.set_vertex_bytes(
SpriteInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_texture(
SpriteInputIndex::AtlasTexture as u64,
Some(intermediate_texture),
);
let sprites;
if paths.last().unwrap().order == first_path.order {
sprites = paths
.iter()
.map(|path| PathSprite {
bounds: path.clipped_bounds(),
})
.collect();
} else {
let mut bounds = first_path.clipped_bounds();
for path in paths.iter().skip(1) {
bounds = bounds.union(&path.clipped_bounds());
}
sprites = vec![PathSprite { bounds }];
}
align_offset(instance_offset);
let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
let next_offset = *instance_offset + sprite_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
SpriteInputIndex::Sprites as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
let buffer_contents =
unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
unsafe {
ptr::copy_nonoverlapping(
sprites.as_ptr() as *const u8,
buffer_contents,
sprite_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
sprites.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_underlines(
&self,
underlines: &[Underline],
underline_transforms: &[TransformationMatrix],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
if underlines.is_empty() {
return true;
}
debug_assert_eq!(underlines.len(), underline_transforms.len());
align_offset(instance_offset);
command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
command_encoder.set_vertex_buffer(
UnderlineInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
let underlines_offset = *instance_offset;
command_encoder.set_vertex_bytes(
UnderlineInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
let underline_bytes_len = mem::size_of_val(underlines);
let mut transforms_offset = underlines_offset + underline_bytes_len;
align_offset(&mut transforms_offset);
let transform_bytes_len = mem::size_of_val(underline_transforms);
let next_offset = transforms_offset + transform_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
UnderlineInputIndex::Underlines as u64,
Some(&instance_buffer.metal_buffer),
underlines_offset as u64,
);
command_encoder.set_fragment_buffer(
UnderlineInputIndex::Underlines as u64,
Some(&instance_buffer.metal_buffer),
underlines_offset as u64,
);
command_encoder.set_vertex_buffer(
UnderlineInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
command_encoder.set_fragment_buffer(
UnderlineInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
unsafe {
let underline_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(underlines_offset);
ptr::copy_nonoverlapping(
underlines.as_ptr() as *const u8,
underline_contents,
underline_bytes_len,
);
let transform_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(transforms_offset);
ptr::copy_nonoverlapping(
underline_transforms.as_ptr() as *const u8,
transform_contents,
transform_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
underlines.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_monochrome_sprites(
&self,
texture_id: AtlasTextureId,
sprites: &[MonochromeSprite],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
if sprites.is_empty() {
return true;
}
align_offset(instance_offset);
let sprite_bytes_len = mem::size_of_val(sprites);
let buffer_contents =
unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
let next_offset = *instance_offset + sprite_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
let texture = self.sprite_atlas.metal_texture(texture_id);
let texture_size = size(
DevicePixels(texture.width() as i32),
DevicePixels(texture.height() as i32),
);
command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
command_encoder.set_vertex_buffer(
SpriteInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
command_encoder.set_vertex_buffer(
SpriteInputIndex::Sprites as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
command_encoder.set_vertex_bytes(
SpriteInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_vertex_bytes(
SpriteInputIndex::AtlasTextureSize as u64,
mem::size_of_val(&texture_size) as u64,
&texture_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_buffer(
SpriteInputIndex::Sprites as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
unsafe {
ptr::copy_nonoverlapping(
sprites.as_ptr() as *const u8,
buffer_contents,
sprite_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
sprites.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_polychrome_sprites(
&self,
texture_id: AtlasTextureId,
sprites: &[PolychromeSprite],
sprite_transforms: &[TransformationMatrix],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
if sprites.is_empty() {
return true;
}
debug_assert_eq!(sprites.len(), sprite_transforms.len());
align_offset(instance_offset);
let texture = self.sprite_atlas.metal_texture(texture_id);
let texture_size = size(
DevicePixels(texture.width() as i32),
DevicePixels(texture.height() as i32),
);
command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
command_encoder.set_vertex_buffer(
SpriteInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
command_encoder.set_vertex_buffer(
SpriteInputIndex::Sprites as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
command_encoder.set_vertex_bytes(
SpriteInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_vertex_bytes(
SpriteInputIndex::AtlasTextureSize as u64,
mem::size_of_val(&texture_size) as u64,
&texture_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_buffer(
SpriteInputIndex::Sprites as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
let sprite_bytes_len = mem::size_of_val(sprites);
let sprites_offset = *instance_offset;
let mut transforms_offset = sprites_offset + sprite_bytes_len;
align_offset(&mut transforms_offset);
let transform_bytes_len = mem::size_of_val(sprite_transforms);
let next_offset = transforms_offset + transform_bytes_len;
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
SpriteInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
command_encoder.set_fragment_buffer(
SpriteInputIndex::Transforms as u64,
Some(&instance_buffer.metal_buffer),
transforms_offset as u64,
);
unsafe {
let sprite_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(sprites_offset);
ptr::copy_nonoverlapping(
sprites.as_ptr() as *const u8,
sprite_contents,
sprite_bytes_len,
);
let transform_contents =
(instance_buffer.metal_buffer.contents() as *mut u8).add(transforms_offset);
ptr::copy_nonoverlapping(
sprite_transforms.as_ptr() as *const u8,
transform_contents,
transform_bytes_len,
);
}
command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle,
0,
6,
sprites.len() as u64,
);
*instance_offset = next_offset;
true
}
fn draw_surfaces(
&mut self,
surfaces: &[PaintSurface],
instance_buffer: &mut InstanceBuffer,
instance_offset: &mut usize,
viewport_size: Size<DevicePixels>,
command_encoder: &metal::RenderCommandEncoderRef,
) -> bool {
command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
command_encoder.set_vertex_buffer(
SurfaceInputIndex::Vertices as u64,
Some(&self.unit_vertices),
0,
);
command_encoder.set_vertex_bytes(
SurfaceInputIndex::ViewportSize as u64,
mem::size_of_val(&viewport_size) as u64,
&viewport_size as *const Size<DevicePixels> as *const _,
);
for surface in surfaces {
let texture_size = size(
DevicePixels::from(surface.image_buffer.get_width() as i32),
DevicePixels::from(surface.image_buffer.get_height() as i32),
);
assert_eq!(
surface.image_buffer.get_pixel_format(),
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
);
let y_texture = self
.core_video_texture_cache
.create_texture_from_image(
surface.image_buffer.as_concrete_TypeRef(),
None,
MTLPixelFormat::R8Unorm,
surface.image_buffer.get_width_of_plane(0),
surface.image_buffer.get_height_of_plane(0),
0,
)
.unwrap();
let cb_cr_texture = self
.core_video_texture_cache
.create_texture_from_image(
surface.image_buffer.as_concrete_TypeRef(),
None,
MTLPixelFormat::RG8Unorm,
surface.image_buffer.get_width_of_plane(1),
surface.image_buffer.get_height_of_plane(1),
1,
)
.unwrap();
align_offset(instance_offset);
let next_offset = *instance_offset + mem::size_of::<Surface>();
if next_offset > instance_buffer.size {
return false;
}
command_encoder.set_vertex_buffer(
SurfaceInputIndex::Surfaces as u64,
Some(&instance_buffer.metal_buffer),
*instance_offset as u64,
);
command_encoder.set_vertex_bytes(
SurfaceInputIndex::TextureSize as u64,
mem::size_of_val(&texture_size) as u64,
&texture_size as *const Size<DevicePixels> as *const _,
);
command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
Some(metal::TextureRef::from_ptr(texture as *mut _))
});
command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
Some(metal::TextureRef::from_ptr(texture as *mut _))
});
unsafe {
let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
.add(*instance_offset)
as *mut SurfaceBounds;
ptr::write(
buffer_contents,
SurfaceBounds {
bounds: surface.bounds,
content_mask: surface.content_mask.clone(),
},
);
}
command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
*instance_offset = next_offset;
}
true
}
}
fn new_command_encoder<'a>(
command_buffer: &'a metal::CommandBufferRef,
drawable: &'a metal::MetalDrawableRef,
viewport_size: Size<DevicePixels>,
configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
) -> &'a metal::RenderCommandEncoderRef {
let render_pass_descriptor = metal::RenderPassDescriptor::new();
let color_attachment = render_pass_descriptor
.color_attachments()
.object_at(0)
.unwrap();
color_attachment.set_texture(Some(drawable.texture()));
color_attachment.set_store_action(metal::MTLStoreAction::Store);
configure_color_attachment(color_attachment);
let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
command_encoder.set_viewport(metal::MTLViewport {
originX: 0.0,
originY: 0.0,
width: i32::from(viewport_size.width) as f64,
height: i32::from(viewport_size.height) as f64,
znear: 0.0,
zfar: 1.0,
});
command_encoder
}
fn build_pipeline_state(
device: &metal::DeviceRef,
library: &metal::LibraryRef,
label: &str,
vertex_fn_name: &str,
fragment_fn_name: &str,
pixel_format: metal::MTLPixelFormat,
) -> metal::RenderPipelineState {
let vertex_fn = library
.get_function(vertex_fn_name, None)
.expect("error locating vertex function");
let fragment_fn = library
.get_function(fragment_fn_name, None)
.expect("error locating fragment function");
let descriptor = metal::RenderPipelineDescriptor::new();
descriptor.set_label(label);
descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
color_attachment.set_pixel_format(pixel_format);
color_attachment.set_blending_enabled(true);
color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
device
.new_render_pipeline_state(&descriptor)
.expect("could not create render pipeline state")
}
fn build_path_sprite_pipeline_state(
device: &metal::DeviceRef,
library: &metal::LibraryRef,
label: &str,
vertex_fn_name: &str,
fragment_fn_name: &str,
pixel_format: metal::MTLPixelFormat,
) -> metal::RenderPipelineState {
let vertex_fn = library
.get_function(vertex_fn_name, None)
.expect("error locating vertex function");
let fragment_fn = library
.get_function(fragment_fn_name, None)
.expect("error locating fragment function");
let descriptor = metal::RenderPipelineDescriptor::new();
descriptor.set_label(label);
descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
color_attachment.set_pixel_format(pixel_format);
color_attachment.set_blending_enabled(true);
color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
device
.new_render_pipeline_state(&descriptor)
.expect("could not create render pipeline state")
}
fn build_path_rasterization_pipeline_state(
device: &metal::DeviceRef,
library: &metal::LibraryRef,
label: &str,
vertex_fn_name: &str,
fragment_fn_name: &str,
pixel_format: metal::MTLPixelFormat,
path_sample_count: u32,
) -> metal::RenderPipelineState {
let vertex_fn = library
.get_function(vertex_fn_name, None)
.expect("error locating vertex function");
let fragment_fn = library
.get_function(fragment_fn_name, None)
.expect("error locating fragment function");
let descriptor = metal::RenderPipelineDescriptor::new();
descriptor.set_label(label);
descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
if path_sample_count > 1 {
descriptor.set_raster_sample_count(path_sample_count as _);
descriptor.set_alpha_to_coverage_enabled(false);
}
let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
color_attachment.set_pixel_format(pixel_format);
color_attachment.set_blending_enabled(true);
color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
device
.new_render_pipeline_state(&descriptor)
.expect("could not create render pipeline state")
}
fn align_offset(offset: &mut usize) {
*offset = (*offset).div_ceil(256) * 256;
}
#[repr(C)]
enum ShadowInputIndex {
Vertices = 0,
Shadows = 1,
ViewportSize = 2,
Transforms = 3,
}
#[repr(C)]
enum QuadInputIndex {
Vertices = 0,
Quads = 1,
ViewportSize = 2,
Transforms = 3,
}
#[repr(C)]
enum BackdropBlurInputIndex {
Vertices = 0,
BackdropBlurs = 1,
ViewportSize = 2,
BackdropTexture = 3,
Transforms = 4,
}
#[repr(C)]
enum UnderlineInputIndex {
Vertices = 0,
Underlines = 1,
ViewportSize = 2,
Transforms = 3,
}
#[repr(C)]
enum SpriteInputIndex {
Vertices = 0,
Sprites = 1,
ViewportSize = 2,
AtlasTextureSize = 3,
AtlasTexture = 4,
Transforms = 5,
}
#[repr(C)]
enum SurfaceInputIndex {
Vertices = 0,
Surfaces = 1,
ViewportSize = 2,
TextureSize = 3,
YTexture = 4,
CbCrTexture = 5,
}
#[repr(C)]
enum PathRasterizationInputIndex {
Vertices = 0,
ViewportSize = 1,
}
#[derive(Clone, Debug, Eq, PartialEq)]
#[repr(C)]
pub struct PathSprite {
pub bounds: Bounds<ScaledPixels>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
#[repr(C)]
pub struct SurfaceBounds {
pub bounds: Bounds<ScaledPixels>,
pub content_mask: ContentMask<ScaledPixels>,
}