use std::{marker::PhantomData, ops::Index};
use bytemuck::NoUninit;
use wgpu::util::DeviceExt as _;
pub(crate) struct UniformState<T: NoUninit> {
pub(crate) bind_group_layout: wgpu::BindGroupLayout,
pub(crate) bind_group: wgpu::BindGroup,
_phantom: PhantomData<T>,
}
impl<T: NoUninit> UniformState<T> {
pub(crate) fn new(device: &wgpu::Device, initial_value: &T) -> Self {
let contents = bytemuck::bytes_of(initial_value);
assert!(
contents.len().is_multiple_of(16),
"Uniform of type '{}' is not aligned to 16 bytes",
std::any::type_name::<T>(),
);
let buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Uniform Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Uniform Bind Group"),
layout: &bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(buffer.as_entire_buffer_binding()),
}],
});
Self {
bind_group,
bind_group_layout,
_phantom: PhantomData,
}
}
}
pub(crate) struct UniformArrayState<T: NoUninit + Default> {
pub(crate) bind_group_layout: wgpu::BindGroupLayout,
pub(crate) bind_group: wgpu::BindGroup,
buffer: wgpu::Buffer,
local_buffer: Vec<T>,
}
impl<T: NoUninit + Default> UniformArrayState<T> {
const MAX_BYTES: u64 = 0x4000;
const MAX_ITEMS: u64 = Self::MAX_BYTES / std::mem::size_of::<T>() as u64;
pub(crate) fn new(
preallocated_items: usize,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
assert!(
std::mem::size_of::<T>().is_multiple_of(16),
"Uniform of type '{}' is not aligned to 16 bytes",
std::any::type_name::<T>(),
);
assert!(preallocated_items < Self::MAX_ITEMS as usize);
let local_buffer = vec![T::default(); preallocated_items];
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Uniform Array Buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
size: Self::MAX_BYTES,
});
let data = bytemuck::cast_slice(&local_buffer);
queue.write_buffer(&buffer, data.len() as u64, data);
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Uniform Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Uniform Bind Group"),
layout: &bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(buffer.as_entire_buffer_binding()),
}],
});
Self {
bind_group_layout,
bind_group,
buffer,
local_buffer,
}
}
pub(crate) fn push(&mut self, value: &T, queue: &wgpu::Queue) -> u64 {
let index = self.local_buffer.len() as u64;
assert!(index <= Self::MAX_ITEMS, "Uniform value out ouf range");
self.local_buffer.push(*value);
let data = bytemuck::bytes_of(value);
queue.write_buffer(&self.buffer, index * std::mem::size_of::<T>() as u64, data);
index
}
#[cfg(feature = "embed-assets")]
pub(crate) fn set(&mut self, index: usize, value: &T, queue: &wgpu::Queue) {
self.local_buffer[index] = *value;
let data = bytemuck::bytes_of(value);
queue.write_buffer(
&self.buffer,
(index * std::mem::size_of::<T>()) as u64,
data,
);
}
}
impl<T: NoUninit + Default> Index<usize> for UniformArrayState<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.local_buffer
.get(index)
.expect("Uniform array value not set")
}
}