use alloc::vec::Vec;
use bevy_mesh::Indices;
use core::{
fmt::{self, Display, Formatter},
ops::Range,
};
use nonmax::NonMaxU32;
use bevy_app::{App, Plugin};
use bevy_asset::AssetId;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{Res, ResMut},
world::{FromWorld, World},
};
use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet};
use bevy_utils::default;
use offset_allocator::{Allocation, Allocator};
use tracing::error;
use wgpu::{
BufferDescriptor, BufferSize, BufferUsages, CommandEncoderDescriptor, DownlevelFlags,
COPY_BUFFER_ALIGNMENT,
};
use crate::{
mesh::{Mesh, MeshVertexBufferLayouts, RenderMesh},
render_asset::{prepare_assets, ExtractedAssets},
render_resource::Buffer,
renderer::{RenderAdapter, RenderDevice, RenderQueue},
Render, RenderApp, RenderSystems,
};
pub struct MeshAllocatorPlugin;
#[derive(Resource)]
pub struct MeshAllocator {
slabs: HashMap<SlabId, Slab>,
slab_layouts: HashMap<ElementLayout, Vec<SlabId>>,
mesh_id_to_vertex_slab: HashMap<AssetId<Mesh>, SlabId>,
mesh_id_to_index_slab: HashMap<AssetId<Mesh>, SlabId>,
next_slab_id: SlabId,
general_vertex_slabs_supported: bool,
pub extra_buffer_usages: BufferUsages,
}
#[derive(Resource)]
pub struct MeshAllocatorSettings {
pub min_slab_size: u64,
pub max_slab_size: u64,
pub large_threshold: u64,
pub growth_factor: f64,
}
impl Default for MeshAllocatorSettings {
fn default() -> Self {
Self {
min_slab_size: 1024 * 1024,
max_slab_size: 1024 * 1024 * 512,
large_threshold: 1024 * 1024 * 256,
growth_factor: 1.5,
}
}
}
pub struct MeshBufferSlice<'a> {
pub buffer: &'a Buffer,
pub range: Range<u32>,
}
#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[repr(transparent)]
pub struct SlabId(pub NonMaxU32);
#[expect(
clippy::large_enum_variant,
reason = "See https://github.com/bevyengine/bevy/issues/19220"
)]
enum Slab {
General(GeneralSlab),
LargeObject(LargeObjectSlab),
}
impl Slab {
pub fn buffer_size(&self) -> u64 {
match self {
Self::General(gs) => gs.buffer.as_ref().map(|buffer| buffer.size()).unwrap_or(0),
Self::LargeObject(lo) => lo.buffer.as_ref().map(|buffer| buffer.size()).unwrap_or(0),
}
}
}
struct GeneralSlab {
allocator: Allocator,
buffer: Option<Buffer>,
resident_allocations: HashMap<AssetId<Mesh>, SlabAllocation>,
pending_allocations: HashMap<AssetId<Mesh>, SlabAllocation>,
element_layout: ElementLayout,
current_slot_capacity: u32,
}
struct LargeObjectSlab {
buffer: Option<Buffer>,
element_layout: ElementLayout,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
enum ElementClass {
Vertex,
Index,
}
enum SlabGrowthResult {
NoGrowthNeeded,
NeededGrowth(SlabToReallocate),
CantGrow,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
struct ElementLayout {
class: ElementClass,
size: u64,
elements_per_slot: u32,
}
struct MeshAllocation {
slab_id: SlabId,
slab_allocation: SlabAllocation,
}
#[derive(Clone)]
struct SlabAllocation {
allocation: Allocation,
slot_count: u32,
}
#[derive(Default, Deref, DerefMut)]
struct SlabsToReallocate(HashMap<SlabId, SlabToReallocate>);
#[derive(Default)]
struct SlabToReallocate {
old_slot_capacity: u32,
}
impl Display for SlabId {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl Plugin for MeshAllocatorPlugin {
fn build(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<MeshAllocatorSettings>()
.add_systems(
Render,
allocate_and_free_meshes
.in_set(RenderSystems::PrepareAssets)
.before(prepare_assets::<RenderMesh>),
);
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app.init_resource::<MeshAllocator>();
}
}
impl FromWorld for MeshAllocator {
fn from_world(world: &mut World) -> Self {
let render_adapter = world.resource::<RenderAdapter>();
let general_vertex_slabs_supported = render_adapter
.get_downlevel_capabilities()
.flags
.contains(DownlevelFlags::BASE_VERTEX);
Self {
slabs: HashMap::default(),
slab_layouts: HashMap::default(),
mesh_id_to_vertex_slab: HashMap::default(),
mesh_id_to_index_slab: HashMap::default(),
next_slab_id: default(),
general_vertex_slabs_supported,
extra_buffer_usages: BufferUsages::empty(),
}
}
}
pub fn allocate_and_free_meshes(
mut mesh_allocator: ResMut<MeshAllocator>,
mesh_allocator_settings: Res<MeshAllocatorSettings>,
extracted_meshes: Res<ExtractedAssets<RenderMesh>>,
mut mesh_vertex_buffer_layouts: ResMut<MeshVertexBufferLayouts>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
mesh_allocator.free_meshes(&extracted_meshes);
mesh_allocator.allocate_meshes(
&mesh_allocator_settings,
&extracted_meshes,
&mut mesh_vertex_buffer_layouts,
&render_device,
&render_queue,
);
}
impl MeshAllocator {
pub fn mesh_vertex_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice<'_>> {
self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_vertex_slab.get(mesh_id)?)
}
pub fn mesh_index_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice<'_>> {
self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_index_slab.get(mesh_id)?)
}
pub fn mesh_slabs(&self, mesh_id: &AssetId<Mesh>) -> (Option<SlabId>, Option<SlabId>) {
(
self.mesh_id_to_vertex_slab.get(mesh_id).cloned(),
self.mesh_id_to_index_slab.get(mesh_id).cloned(),
)
}
pub fn slab_count(&self) -> usize {
self.slabs.len()
}
pub fn slabs_size(&self) -> u64 {
self.slabs.iter().map(|slab| slab.1.buffer_size()).sum()
}
pub fn allocations(&self) -> usize {
self.mesh_id_to_index_slab.len()
}
fn mesh_slice_in_slab(
&self,
mesh_id: &AssetId<Mesh>,
slab_id: SlabId,
) -> Option<MeshBufferSlice<'_>> {
match self.slabs.get(&slab_id)? {
Slab::General(general_slab) => {
let slab_allocation = general_slab.resident_allocations.get(mesh_id)?;
Some(MeshBufferSlice {
buffer: general_slab.buffer.as_ref()?,
range: (slab_allocation.allocation.offset
* general_slab.element_layout.elements_per_slot)
..((slab_allocation.allocation.offset + slab_allocation.slot_count)
* general_slab.element_layout.elements_per_slot),
})
}
Slab::LargeObject(large_object_slab) => {
let buffer = large_object_slab.buffer.as_ref()?;
Some(MeshBufferSlice {
buffer,
range: 0..((buffer.size() / large_object_slab.element_layout.size) as u32),
})
}
}
}
fn allocate_meshes(
&mut self,
mesh_allocator_settings: &MeshAllocatorSettings,
extracted_meshes: &ExtractedAssets<RenderMesh>,
mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts,
render_device: &RenderDevice,
render_queue: &RenderQueue,
) {
let mut slabs_to_grow = SlabsToReallocate::default();
for (mesh_id, mesh) in &extracted_meshes.extracted {
let vertex_buffer_size = mesh.get_vertex_buffer_size() as u64;
if vertex_buffer_size == 0 {
continue;
}
let vertex_element_layout = ElementLayout::vertex(mesh_vertex_buffer_layouts, mesh);
if self.general_vertex_slabs_supported {
self.allocate(
mesh_id,
vertex_buffer_size,
vertex_element_layout,
&mut slabs_to_grow,
mesh_allocator_settings,
);
} else {
self.allocate_large(mesh_id, vertex_element_layout);
}
if let (Some(index_buffer_data), Some(index_element_layout)) =
(mesh.get_index_buffer_bytes(), ElementLayout::index(mesh))
{
self.allocate(
mesh_id,
index_buffer_data.len() as u64,
index_element_layout,
&mut slabs_to_grow,
mesh_allocator_settings,
);
}
}
for (slab_id, slab_to_grow) in slabs_to_grow.0 {
self.reallocate_slab(render_device, render_queue, slab_id, slab_to_grow);
}
for (mesh_id, mesh) in &extracted_meshes.extracted {
self.copy_mesh_vertex_data(mesh_id, mesh, render_device, render_queue);
self.copy_mesh_index_data(mesh_id, mesh, render_device, render_queue);
}
}
fn copy_mesh_vertex_data(
&mut self,
mesh_id: &AssetId<Mesh>,
mesh: &Mesh,
render_device: &RenderDevice,
render_queue: &RenderQueue,
) {
let Some(&slab_id) = self.mesh_id_to_vertex_slab.get(mesh_id) else {
return;
};
self.copy_element_data(
mesh_id,
mesh.get_vertex_buffer_size(),
|slice| mesh.write_packed_vertex_buffer_data(slice),
BufferUsages::VERTEX,
slab_id,
render_device,
render_queue,
);
}
fn copy_mesh_index_data(
&mut self,
mesh_id: &AssetId<Mesh>,
mesh: &Mesh,
render_device: &RenderDevice,
render_queue: &RenderQueue,
) {
let Some(&slab_id) = self.mesh_id_to_index_slab.get(mesh_id) else {
return;
};
let Some(index_data) = mesh.get_index_buffer_bytes() else {
return;
};
self.copy_element_data(
mesh_id,
index_data.len(),
|slice| slice.copy_from_slice(index_data),
BufferUsages::INDEX,
slab_id,
render_device,
render_queue,
);
}
fn copy_element_data(
&mut self,
mesh_id: &AssetId<Mesh>,
len: usize,
fill_data: impl Fn(&mut [u8]),
buffer_usages: BufferUsages,
slab_id: SlabId,
render_device: &RenderDevice,
render_queue: &RenderQueue,
) {
let Some(slab) = self.slabs.get_mut(&slab_id) else {
return;
};
match *slab {
Slab::General(ref mut general_slab) => {
let (Some(buffer), Some(allocated_range)) = (
&general_slab.buffer,
general_slab.pending_allocations.remove(mesh_id),
) else {
return;
};
let slot_size = general_slab.element_layout.slot_size();
if let Some(size) = BufferSize::new((len as u64).next_multiple_of(slot_size)) {
if let Some(mut buffer) = render_queue.write_buffer_with(
buffer,
allocated_range.allocation.offset as u64 * slot_size,
size,
) {
let slice = &mut buffer.as_mut()[..len];
fill_data(slice);
}
}
general_slab
.resident_allocations
.insert(*mesh_id, allocated_range);
}
Slab::LargeObject(ref mut large_object_slab) => {
debug_assert!(large_object_slab.buffer.is_none());
let buffer = render_device.create_buffer(&BufferDescriptor {
label: Some(&format!(
"large mesh slab {} ({}buffer)",
slab_id,
buffer_usages_to_str(buffer_usages)
)),
size: len as u64,
usage: buffer_usages | BufferUsages::COPY_DST | self.extra_buffer_usages,
mapped_at_creation: true,
});
{
let slice = &mut buffer.slice(..).get_mapped_range_mut()[..len];
fill_data(slice);
}
buffer.unmap();
large_object_slab.buffer = Some(buffer);
}
}
}
fn free_meshes(&mut self, extracted_meshes: &ExtractedAssets<RenderMesh>) {
let mut empty_slabs = <HashSet<_>>::default();
let meshes_to_free = extracted_meshes
.removed
.iter()
.chain(extracted_meshes.modified.iter());
for mesh_id in meshes_to_free {
if let Some(slab_id) = self.mesh_id_to_vertex_slab.remove(mesh_id) {
self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs);
}
if let Some(slab_id) = self.mesh_id_to_index_slab.remove(mesh_id) {
self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs);
}
}
for empty_slab in empty_slabs {
self.slab_layouts.values_mut().for_each(|slab_ids| {
let idx = slab_ids.iter().position(|&slab_id| slab_id == empty_slab);
if let Some(idx) = idx {
slab_ids.remove(idx);
}
});
self.slabs.remove(&empty_slab);
}
}
fn free_allocation_in_slab(
&mut self,
mesh_id: &AssetId<Mesh>,
slab_id: SlabId,
empty_slabs: &mut HashSet<SlabId>,
) {
let Some(slab) = self.slabs.get_mut(&slab_id) else {
return;
};
match *slab {
Slab::General(ref mut general_slab) => {
let Some(slab_allocation) = general_slab
.resident_allocations
.remove(mesh_id)
.or_else(|| general_slab.pending_allocations.remove(mesh_id))
else {
return;
};
general_slab.allocator.free(slab_allocation.allocation);
if general_slab.is_empty() {
empty_slabs.insert(slab_id);
}
}
Slab::LargeObject(_) => {
empty_slabs.insert(slab_id);
}
}
}
fn allocate(
&mut self,
mesh_id: &AssetId<Mesh>,
data_byte_len: u64,
layout: ElementLayout,
slabs_to_grow: &mut SlabsToReallocate,
settings: &MeshAllocatorSettings,
) {
let data_element_count = data_byte_len.div_ceil(layout.size) as u32;
let data_slot_count = data_element_count.div_ceil(layout.elements_per_slot);
if data_slot_count as u64 * layout.slot_size()
>= settings.large_threshold.min(settings.max_slab_size)
{
self.allocate_large(mesh_id, layout);
} else {
self.allocate_general(mesh_id, data_slot_count, layout, slabs_to_grow, settings);
}
}
fn allocate_general(
&mut self,
mesh_id: &AssetId<Mesh>,
data_slot_count: u32,
layout: ElementLayout,
slabs_to_grow: &mut SlabsToReallocate,
settings: &MeshAllocatorSettings,
) {
let candidate_slabs = self.slab_layouts.entry(layout).or_default();
let mut mesh_allocation = None;
for &slab_id in &*candidate_slabs {
let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else {
unreachable!("Slab not found")
};
let Some(allocation) = slab.allocator.allocate(data_slot_count) else {
continue;
};
match slab.grow_if_necessary(allocation.offset + data_slot_count, settings) {
SlabGrowthResult::NoGrowthNeeded => {}
SlabGrowthResult::NeededGrowth(slab_to_reallocate) => {
if let Entry::Vacant(vacant_entry) = slabs_to_grow.entry(slab_id) {
vacant_entry.insert(slab_to_reallocate);
}
}
SlabGrowthResult::CantGrow => continue,
}
mesh_allocation = Some(MeshAllocation {
slab_id,
slab_allocation: SlabAllocation {
allocation,
slot_count: data_slot_count,
},
});
break;
}
if mesh_allocation.is_none() {
let new_slab_id = self.next_slab_id;
self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default();
let new_slab = GeneralSlab::new(
new_slab_id,
&mut mesh_allocation,
settings,
layout,
data_slot_count,
);
self.slabs.insert(new_slab_id, Slab::General(new_slab));
candidate_slabs.push(new_slab_id);
slabs_to_grow.insert(new_slab_id, SlabToReallocate::default());
}
let mesh_allocation = mesh_allocation.expect("Should have been able to allocate");
if let Some(Slab::General(general_slab)) = self.slabs.get_mut(&mesh_allocation.slab_id) {
general_slab
.pending_allocations
.insert(*mesh_id, mesh_allocation.slab_allocation);
};
self.record_allocation(mesh_id, mesh_allocation.slab_id, layout.class);
}
fn allocate_large(&mut self, mesh_id: &AssetId<Mesh>, layout: ElementLayout) {
let new_slab_id = self.next_slab_id;
self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default();
self.record_allocation(mesh_id, new_slab_id, layout.class);
self.slabs.insert(
new_slab_id,
Slab::LargeObject(LargeObjectSlab {
buffer: None,
element_layout: layout,
}),
);
}
fn reallocate_slab(
&mut self,
render_device: &RenderDevice,
render_queue: &RenderQueue,
slab_id: SlabId,
slab_to_grow: SlabToReallocate,
) {
let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else {
error!("Couldn't find slab {} to grow", slab_id);
return;
};
let old_buffer = slab.buffer.take();
let mut buffer_usages = BufferUsages::COPY_SRC | BufferUsages::COPY_DST;
match slab.element_layout.class {
ElementClass::Vertex => buffer_usages |= BufferUsages::VERTEX,
ElementClass::Index => buffer_usages |= BufferUsages::INDEX,
};
let new_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some(&format!(
"general mesh slab {} ({}buffer)",
slab_id,
buffer_usages_to_str(buffer_usages)
)),
size: slab.current_slot_capacity as u64 * slab.element_layout.slot_size(),
usage: buffer_usages | self.extra_buffer_usages,
mapped_at_creation: false,
});
slab.buffer = Some(new_buffer.clone());
let Some(old_buffer) = old_buffer else { return };
let mut encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("slab resize encoder"),
});
encoder.copy_buffer_to_buffer(
&old_buffer,
0,
&new_buffer,
0,
slab_to_grow.old_slot_capacity as u64 * slab.element_layout.slot_size(),
);
let command_buffer = encoder.finish();
render_queue.submit([command_buffer]);
}
fn record_allocation(
&mut self,
mesh_id: &AssetId<Mesh>,
slab_id: SlabId,
element_class: ElementClass,
) {
match element_class {
ElementClass::Vertex => {
self.mesh_id_to_vertex_slab.insert(*mesh_id, slab_id);
}
ElementClass::Index => {
self.mesh_id_to_index_slab.insert(*mesh_id, slab_id);
}
}
}
}
impl GeneralSlab {
fn new(
new_slab_id: SlabId,
mesh_allocation: &mut Option<MeshAllocation>,
settings: &MeshAllocatorSettings,
layout: ElementLayout,
data_slot_count: u32,
) -> GeneralSlab {
let initial_slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size())
as u32)
.max(offset_allocator::ext::min_allocator_size(data_slot_count));
let max_slab_slot_capacity = (settings.max_slab_size.div_ceil(layout.slot_size()) as u32)
.max(offset_allocator::ext::min_allocator_size(data_slot_count));
let mut new_slab = GeneralSlab {
allocator: Allocator::new(max_slab_slot_capacity),
buffer: None,
resident_allocations: HashMap::default(),
pending_allocations: HashMap::default(),
element_layout: layout,
current_slot_capacity: initial_slab_slot_capacity,
};
if let Some(allocation) = new_slab.allocator.allocate(data_slot_count) {
*mesh_allocation = Some(MeshAllocation {
slab_id: new_slab_id,
slab_allocation: SlabAllocation {
slot_count: data_slot_count,
allocation,
},
});
}
new_slab
}
fn grow_if_necessary(
&mut self,
new_size_in_slots: u32,
settings: &MeshAllocatorSettings,
) -> SlabGrowthResult {
let initial_slot_capacity = self.current_slot_capacity;
if self.current_slot_capacity >= new_size_in_slots {
return SlabGrowthResult::NoGrowthNeeded;
}
while self.current_slot_capacity < new_size_in_slots {
let new_slab_slot_capacity =
((self.current_slot_capacity as f64 * settings.growth_factor).ceil() as u32)
.min((settings.max_slab_size / self.element_layout.slot_size()) as u32);
if new_slab_slot_capacity == self.current_slot_capacity {
return SlabGrowthResult::CantGrow;
}
self.current_slot_capacity = new_slab_slot_capacity;
}
SlabGrowthResult::NeededGrowth(SlabToReallocate {
old_slot_capacity: initial_slot_capacity,
})
}
}
impl ElementLayout {
fn new(class: ElementClass, size: u64) -> ElementLayout {
const {
assert!(4 == COPY_BUFFER_ALIGNMENT);
}
let elements_per_slot = [1, 4, 2, 4][size as usize & 3];
ElementLayout {
class,
size,
elements_per_slot,
}
}
fn slot_size(&self) -> u64 {
self.size * self.elements_per_slot as u64
}
fn vertex(
mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts,
mesh: &Mesh,
) -> ElementLayout {
let mesh_vertex_buffer_layout =
mesh.get_mesh_vertex_buffer_layout(mesh_vertex_buffer_layouts);
ElementLayout::new(
ElementClass::Vertex,
mesh_vertex_buffer_layout.0.layout().array_stride,
)
}
fn index(mesh: &Mesh) -> Option<ElementLayout> {
let size = match mesh.indices()? {
Indices::U16(_) => 2,
Indices::U32(_) => 4,
};
Some(ElementLayout::new(ElementClass::Index, size))
}
}
impl GeneralSlab {
fn is_empty(&self) -> bool {
self.resident_allocations.is_empty() && self.pending_allocations.is_empty()
}
}
fn buffer_usages_to_str(buffer_usages: BufferUsages) -> &'static str {
if buffer_usages.contains(BufferUsages::VERTEX) {
"vertex "
} else if buffer_usages.contains(BufferUsages::INDEX) {
"index "
} else {
""
}
}