use std::collections::HashMap;
use std::sync::{Arc, RwLock};
#[derive(Debug, thiserror::Error)]
pub enum TextureManagerError {
#[error("Texture {0} not found")]
TextureNotFound(u64),
}
#[derive(Clone)]
pub struct TextureManager {
device: Arc<wgpu::Device>,
queue: Arc<wgpu::Queue>,
sampler: Arc<wgpu::Sampler>,
texture_storage: Arc<RwLock<HashMap<u64, wgpu::Texture>>>,
shape_bind_group_cache: Arc<RwLock<BindGroupCache>>,
}
type BindGroupCache = HashMap<(u64, u64), Arc<wgpu::BindGroup>>;
impl TextureManager {
pub(crate) fn new(device: Arc<wgpu::Device>, queue: Arc<wgpu::Queue>) -> Self {
let sampler = Self::create_sampler(&device);
Self {
device,
queue,
sampler: Arc::new(sampler),
texture_storage: Arc::new(RwLock::new(HashMap::new())),
shape_bind_group_cache: Arc::new(RwLock::new(HashMap::new())),
}
}
pub fn clear(&self) {
self.texture_storage.write().unwrap().clear();
self.shape_bind_group_cache.write().unwrap().clear();
}
pub fn size(&self) -> (usize, usize) {
(
self.texture_storage.read().unwrap().len(),
self.shape_bind_group_cache.read().unwrap().len(),
)
}
fn create_sampler(device: &wgpu::Device) -> wgpu::Sampler {
device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
})
}
pub fn allocate_texture(&self, texture_id: u64, texture_dimensions: (u32, u32)) {
let mut bind_group_cache = self.shape_bind_group_cache.write().unwrap();
bind_group_cache
.retain(|(cached_texture_id, _shape_id), _bind_group| *cached_texture_id != texture_id);
let texture_extent = wgpu::Extent3d {
width: texture_dimensions.0,
height: texture_dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
self.texture_storage
.write()
.unwrap()
.insert(texture_id, texture);
}
pub fn allocate_texture_with_data(
&self,
texture_id: u64,
texture_dimensions: (u32, u32),
texture_data: &[u8],
) {
self.allocate_texture(texture_id, texture_dimensions);
self.load_data_into_texture(texture_id, texture_dimensions, texture_data)
.unwrap();
}
pub fn load_data_into_texture(
&self,
texture_id: u64,
texture_dimensions: (u32, u32),
texture_data: &[u8],
) -> Result<(), TextureManagerError> {
let texture_storage = self.texture_storage.read().unwrap();
let texture = texture_storage
.get(&texture_id)
.ok_or(TextureManagerError::TextureNotFound(texture_id))?;
let texture_extent = wgpu::Extent3d {
width: texture_dimensions.0,
height: texture_dimensions.1,
depth_or_array_layers: 1,
};
self.write_image_bytes_to_texture(
texture,
texture_dimensions,
texture_extent,
texture_data,
);
Ok(())
}
pub fn remove_texture(&self, texture_id: u64) {
let mut bind_group_cache = self.shape_bind_group_cache.write().unwrap();
bind_group_cache
.retain(|(cached_texture_id, _shape_id), _bind_group| *cached_texture_id != texture_id);
self.texture_storage.write().unwrap().remove(&texture_id);
}
fn write_image_bytes_to_texture(
&self,
texture: &wgpu::Texture,
texture_dimensions: (u32, u32),
texture_extent: wgpu::Extent3d,
texture_data_bytes: &[u8],
) {
self.queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
texture_data_bytes,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * texture_dimensions.0),
rows_per_image: Some(texture_dimensions.1),
},
texture_extent,
);
}
pub(crate) fn get_or_create_shape_bind_group(
&self,
layout: &wgpu::BindGroupLayout,
layout_epoch: u64,
texture_id: u64,
) -> Result<Arc<wgpu::BindGroup>, TextureManagerError> {
if let Some(bg) = self
.shape_bind_group_cache
.read()
.unwrap()
.get(&(texture_id, layout_epoch))
.cloned()
{
return Ok(bg);
}
let storage = self.texture_storage.read().unwrap();
let texture = storage
.get(&texture_id)
.ok_or(TextureManagerError::TextureNotFound(texture_id))?;
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let bind_group = Arc::new(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.sampler),
},
],
label: Some("shape_texture_bind_group_cached"),
}));
self.shape_bind_group_cache
.write()
.unwrap()
.insert((texture_id, layout_epoch), bind_group.clone());
Ok(bind_group)
}
pub fn is_texture_loaded(&self, texture_id: u64) -> bool {
self.texture_storage
.read()
.unwrap()
.contains_key(&texture_id)
}
}
fn srgb_to_linear_u8(c: u8) -> f32 {
let x = c as f32 / 255.0;
if x <= 0.04045 {
x / 12.92
} else {
((x + 0.055) / 1.055).powf(2.4)
}
}
fn linear_to_srgb_u8(x: f32) -> u8 {
let x = x.clamp(0.0, 1.0);
let y = if x <= 0.0031308 {
x * 12.92
} else {
1.055 * x.powf(1.0 / 2.4) - 0.055
};
(y.clamp(0.0, 1.0) * 255.0 + 0.5).floor() as u8
}
pub fn premultiply_rgba8_srgb_inplace(pixels: &mut [u8]) {
assert!(
pixels.len().is_multiple_of(4),
"RGBA8 data length must be multiple of 4"
);
for px in pixels.chunks_mut(4) {
let r_lin = srgb_to_linear_u8(px[0]);
let g_lin = srgb_to_linear_u8(px[1]);
let b_lin = srgb_to_linear_u8(px[2]);
let a = px[3] as f32 / 255.0;
let r_pma = r_lin * a;
let g_pma = g_lin * a;
let b_pma = b_lin * a;
px[0] = linear_to_srgb_u8(r_pma);
px[1] = linear_to_srgb_u8(g_pma);
px[2] = linear_to_srgb_u8(b_pma);
}
}