use std::collections::BTreeMap;
use std::sync::Arc;
use crossbeam::sync::{Parker, Unparker};
use ordered_float::OrderedFloat;
use parking_lot::Mutex;
use vulkano::buffer::subbuffer::Subbuffer;
use vulkano::buffer::{Buffer, BufferCreateInfo, BufferUsage};
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
use vulkano::command_buffer::{
AutoCommandBufferBuilder, CommandBufferUsage, CopyBufferInfo, PrimaryCommandBufferAbstract,
};
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
use vulkano::device::{Device, Queue};
use vulkano::format::Format;
use vulkano::memory::allocator::{AllocationCreateInfo, MemoryUsage, StandardMemoryAllocator};
use vulkano::pipeline::ComputePipeline;
use vulkano::shader::ShaderModule;
use vulkano::sync::GpuFuture;
use crate::shaders::glyph_cs;
use crate::{ImtError, ImtGlyphBitmap, ImtParser, ImtShapedGlyph};
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ImtFillQuality {
Fast,
Normal,
Best,
}
impl ImtFillQuality {
pub fn ray_count(&self) -> usize {
match self {
Self::Fast => 3,
Self::Normal => 5,
Self::Best => 13,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ImtSampleQuality {
Fastest,
Faster,
Fast,
Normal,
Best,
}
impl ImtSampleQuality {
pub fn sample_count(&self) -> usize {
match self {
Self::Fastest => 1,
Self::Faster => 4,
Self::Fast => 9,
Self::Normal => 16,
Self::Best => 25,
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct ImtRasterOpts {
pub fill_quality: ImtFillQuality,
pub sample_quality: ImtSampleQuality,
pub align_whole_pixels: bool,
pub cpu_rasterization: bool,
pub raster_to_image: bool,
pub raster_image_format: Format,
}
impl ImtRasterOpts {
pub fn sample_count(&self) -> usize {
self.sample_quality.sample_count()
}
pub fn ray_count(&self) -> usize {
self.fill_quality.ray_count()
}
}
impl Default for ImtRasterOpts {
fn default() -> Self {
ImtRasterOpts {
fill_quality: ImtFillQuality::Normal,
sample_quality: ImtSampleQuality::Normal,
align_whole_pixels: true,
cpu_rasterization: false,
raster_to_image: true,
raster_image_format: Format::R8G8B8A8_UNORM,
}
}
}
pub struct ImtRasteredGlyph {
pub shaped: ImtShapedGlyph,
pub bitmap: Arc<ImtGlyphBitmap>,
}
#[derive(Clone)]
enum RasterCacheState {
Completed(Arc<ImtGlyphBitmap>),
Incomplete(Vec<Unparker>),
Errored(ImtError),
}
#[allow(dead_code)]
pub struct ImtRaster {
opts: ImtRasterOpts,
cache: Mutex<BTreeMap<(OrderedFloat<f32>, u16), RasterCacheState>>,
gpu_raster_context: Option<GpuRasterContext>,
cpu_raster_context: Option<CpuRasterContext>,
}
#[allow(dead_code)]
pub(crate) struct GpuRasterContext {
pub device: Arc<Device>,
pub queue: Arc<Queue>,
pub mem_alloc: StandardMemoryAllocator,
pub cmd_alloc: StandardCommandBufferAllocator,
pub glyph_cs: Arc<ShaderModule>,
pub common_buf: Subbuffer<glyph_cs::Common>,
pub pipeline: Arc<ComputePipeline>,
pub set_alloc: StandardDescriptorSetAllocator,
pub raster_to_image: bool,
pub raster_image_format: Format,
}
pub(crate) struct CpuRasterContext {
pub samples: Vec<[f32; 2]>,
pub rays: Vec<[f32; 2]>,
}
impl ImtRaster {
pub fn new_gpu(
device: Arc<Device>,
queue: Arc<Queue>,
mut opts: ImtRasterOpts,
) -> Result<Self, ImtError> {
opts.cpu_rasterization = false;
let glyph_cs = glyph_cs::load(device.clone()).unwrap();
let mem_alloc = StandardMemoryAllocator::new_default(device.clone());
let cmd_alloc = StandardCommandBufferAllocator::new(device.clone(), Default::default());
let mut samples_and_rays = [[0.0; 4]; 25];
let sample_count = opts.sample_count();
let w = (sample_count as f32).sqrt() as usize;
let mut sar_i = 0;
for x in 1..=w {
for y in 1..=w {
samples_and_rays[sar_i][0] = ((x as f32 / (w as f32 + 1.0)) * 2.0) - 1.0;
samples_and_rays[sar_i][1] = ((y as f32 / (w as f32 + 1.0)) * 2.0) - 1.0;
sar_i += 1;
}
}
let ray_count = opts.ray_count();
for i in 0..ray_count {
let rad = (i as f32 * (360.0 / ray_count as f32)).to_radians();
samples_and_rays[i][2] = rad.cos();
samples_and_rays[i][3] = rad.sin();
}
let common_cpu_buf = Buffer::from_data(
&mem_alloc,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
AllocationCreateInfo {
usage: MemoryUsage::Upload,
..Default::default()
},
glyph_cs::Common {
samples_and_rays,
sample_count: sample_count as u32,
ray_count: ray_count as u32,
},
)
.unwrap();
let common_dev_buf = Buffer::new_sized(
&mem_alloc,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST | BufferUsage::UNIFORM_BUFFER,
..Default::default()
},
AllocationCreateInfo {
usage: MemoryUsage::DeviceOnly,
..Default::default()
},
)
.unwrap();
let mut cmd_buf = AutoCommandBufferBuilder::primary(
&cmd_alloc,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
cmd_buf
.copy_buffer(CopyBufferInfo::buffers(
common_cpu_buf,
common_dev_buf.clone(),
))
.unwrap();
cmd_buf
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap()
.wait(None)
.unwrap();
let pipeline = ComputePipeline::new(
device.clone(),
glyph_cs.entry_point("main").unwrap(),
&(),
None,
|_| {},
)
.unwrap();
let set_alloc = StandardDescriptorSetAllocator::new(device.clone());
let raster_to_image = opts.raster_to_image;
let raster_image_format = opts.raster_image_format;
Ok(ImtRaster {
opts,
cache: Mutex::new(BTreeMap::new()),
gpu_raster_context: Some(GpuRasterContext {
device,
queue,
mem_alloc,
cmd_alloc,
glyph_cs,
common_buf: common_dev_buf,
pipeline,
set_alloc,
raster_to_image,
raster_image_format,
}),
cpu_raster_context: None,
})
}
pub fn new_cpu(mut opts: ImtRasterOpts) -> Result<Self, ImtError> {
opts.cpu_rasterization = true;
let sample_count = opts.sample_count();
let ray_count = opts.ray_count();
let mut samples = Vec::with_capacity(sample_count);
let mut rays = Vec::with_capacity(ray_count);
let w = (sample_count as f32).sqrt() as usize;
for x in 1..=w {
for y in 1..=w {
samples.push([
((x as f32 / (w as f32 + 1.0)) * 2.0) - 1.0,
((y as f32 / (w as f32 + 1.0)) * 2.0) - 1.0,
]);
}
}
for i in 0..ray_count {
let rad = (i as f32 * (360.0 / ray_count as f32)).to_radians();
rays.push([rad.cos(), rad.sin()]);
}
Ok(ImtRaster {
opts,
cache: Mutex::new(BTreeMap::new()),
gpu_raster_context: None,
cpu_raster_context: Some(CpuRasterContext {
samples,
rays,
}),
})
}
pub fn sample_count(&self) -> usize {
self.opts.sample_count()
}
pub fn ray_count(&self) -> usize {
self.opts.ray_count()
}
#[allow(unused_assignments)]
pub fn raster_shaped_glyphs(
&self,
parser: &ImtParser,
text_height: f32,
shaped_glyphs: Vec<ImtShapedGlyph>,
) -> Result<Vec<ImtRasteredGlyph>, ImtError> {
let mut rastered_glyphs_out = Vec::new();
let mut cache_lk_op = None;
let height_key = OrderedFloat::from(text_height);
'glyphs: for shaped in shaped_glyphs {
let index = shaped.parsed.inner.glyph_index;
if cache_lk_op.is_none() {
cache_lk_op = Some(self.cache.lock());
}
let mut parker_op = None;
if let Some(cache_state) = cache_lk_op.as_mut().unwrap().get_mut(&(height_key, index)) {
match cache_state {
&mut RasterCacheState::Completed(ref bitmap) => {
rastered_glyphs_out.push(ImtRasteredGlyph {
shaped,
bitmap: bitmap.clone(),
});
continue;
},
&mut RasterCacheState::Incomplete(ref mut unparkers) => {
let parker = Parker::new();
unparkers.push(parker.unparker().clone());
parker_op = Some(parker);
},
&mut RasterCacheState::Errored(_) => (),
}
}
if let Some(parker) = parker_op {
loop {
cache_lk_op = None;
parker.park();
cache_lk_op = Some(self.cache.lock());
let cache_state = cache_lk_op
.as_ref()
.unwrap()
.get(&(height_key, index))
.unwrap();
match cache_state {
&RasterCacheState::Completed(ref bitmap) => {
rastered_glyphs_out.push(ImtRasteredGlyph {
shaped,
bitmap: bitmap.clone(),
});
continue 'glyphs;
},
&RasterCacheState::Incomplete(_) => continue,
&RasterCacheState::Errored(_) => break,
}
}
}
if cache_lk_op.is_none() {
cache_lk_op = Some(self.cache.lock());
}
cache_lk_op.as_mut().unwrap().insert(
(height_key, index),
RasterCacheState::Incomplete(Vec::new()),
);
cache_lk_op = None;
let mut bitmap =
ImtGlyphBitmap::new(parser, shaped.parsed.clone(), text_height, &self.opts);
bitmap.create_outline();
let raster_result = if self.opts.cpu_rasterization {
bitmap.raster_cpu(self.cpu_raster_context.as_ref().unwrap())
} else {
bitmap.raster_gpu(self.gpu_raster_context.as_ref().unwrap())
};
if let Err(e) = raster_result {
cache_lk_op = Some(self.cache.lock());
let old_state = cache_lk_op
.as_mut()
.unwrap()
.insert((height_key, index), RasterCacheState::Errored(e.clone()));
if let Some(RasterCacheState::Incomplete(unparkers)) = old_state {
for unparker in unparkers {
unparker.unpark();
}
}
return Err(e);
}
let bitmap = Arc::new(bitmap);
cache_lk_op = Some(self.cache.lock());
let old_state = cache_lk_op.as_mut().unwrap().insert(
(height_key, index),
RasterCacheState::Completed(bitmap.clone()),
);
if let Some(RasterCacheState::Incomplete(unparkers)) = old_state {
for unparker in unparkers {
unparker.unpark();
}
}
rastered_glyphs_out.push(ImtRasteredGlyph {
shaped,
bitmap: bitmap.clone(),
});
}
Ok(rastered_glyphs_out)
}
}