use re_log::ResultExt as _;
use crate::wgpu_resources::BindGroupEntry;
use crate::{Label, RenderContext};
struct UniformBufferSizeCheck<T> {
pub _marker: std::marker::PhantomData<T>,
}
impl<T> UniformBufferSizeCheck<T> {
const CHECK: () = assert!(
std::mem::size_of::<T>().is_multiple_of(256) && std::mem::size_of::<T>() > 0,
"Uniform buffers need to have a size that is a multiple of 256 bytes.
Use types like `F32RowPadded` or `PaddingRow` to pad out as needed."
);
}
pub fn create_and_fill_uniform_buffer_batch<T: bytemuck::Pod + Send + Sync>(
ctx: &RenderContext,
label: Label,
content: impl ExactSizeIterator<Item = T>,
) -> Vec<BindGroupEntry> {
re_tracing::profile_function!(label.get());
#[expect(clippy::let_unit_value)]
let _ = UniformBufferSizeCheck::<T>::CHECK;
if content.len() == 0 {
return vec![];
}
let num_buffers = content.len() as u64;
let element_size = std::mem::size_of::<T>() as u64;
let buffer = ctx.gpu_resources.buffers.alloc(
&ctx.device,
&crate::wgpu_resources::BufferDesc {
label,
size: num_buffers * element_size,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
},
);
let Some(mut staging_buffer) = ctx
.cpu_write_gpu_read_belt
.lock()
.allocate::<T>(&ctx.device, &ctx.gpu_resources.buffers, num_buffers as _)
.ok_or_log_error()
else {
return Vec::new();
};
staging_buffer.extend(content).ok_or_log_error();
staging_buffer
.copy_to_buffer(
ctx.active_frame.before_view_builder_encoder.lock().get(),
&buffer,
0,
)
.ok_or_log_error();
(0..num_buffers)
.map(|i| BindGroupEntry::Buffer {
handle: buffer.handle,
offset: i * element_size,
size: Some(std::num::NonZeroU64::new(element_size).unwrap()),
})
.collect()
}
pub fn create_and_fill_uniform_buffer<T: bytemuck::Pod + Send + Sync>(
ctx: &RenderContext,
label: Label,
content: T,
) -> BindGroupEntry {
create_and_fill_uniform_buffer_batch(ctx, label, std::iter::once(content))
.into_iter()
.next()
.unwrap()
}