use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use crate::raw::bindings::*;
use super::buffer::{Buffer, BufferCreateInfo, MemoryRequirements};
use super::device::DeviceInner;
use super::image::{Image, Image2dCreateInfo};
use super::physical::PhysicalDevice;
use super::{Device, Error, Result, check};
mod linear;
mod tlsf;
use self::linear::Linear;
use self::tlsf::{Tlsf, TlsfAllocation};
const SMALL_HEAP_BLOCK_SIZE: u64 = 64 * 1024 * 1024;
const LARGE_HEAP_BLOCK_SIZE: u64 = 256 * 1024 * 1024;
const LARGE_HEAP_THRESHOLD: u64 = 4 * 1024 * 1024 * 1024;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum AllocationUsage {
#[default]
Auto,
DeviceLocal,
HostVisible,
HostVisibleDeviceLocal,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum AllocationStrategy {
#[default]
FreeList,
Linear,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct PoolHandle(u64);
#[derive(Debug, Clone, Copy)]
pub struct PoolCreateInfo {
pub memory_type_index: u32,
pub strategy: AllocationStrategy,
pub block_size: u64,
pub max_block_count: u32,
}
#[derive(Debug, Clone, Copy, Default)]
pub struct AllocationCreateInfo {
pub usage: AllocationUsage,
pub dedicated: bool,
pub mapped: bool,
pub pool: Option<PoolHandle>,
pub user_data: u64,
pub device_mask: Option<u32>,
}
#[derive(Debug, Clone, Copy, Default)]
pub struct AllocationStatistics {
pub block_bytes: u64,
pub allocation_bytes: u64,
pub block_count: u32,
pub allocation_count: u32,
pub free_region_count: u32,
pub peak_allocation_bytes: u64,
pub dedicated_allocation_count: u32,
}
enum BlockStrategy {
Tlsf(Tlsf),
Linear(Linear),
}
impl BlockStrategy {
fn allocation_count(&self) -> u32 {
match self {
Self::Tlsf(t) => t.allocation_count(),
Self::Linear(l) => l.allocation_count(),
}
}
fn free_region_count(&self) -> u32 {
match self {
Self::Tlsf(t) => t.free_region_count(),
Self::Linear(l) => {
if l.free_bytes() == 0 {
0
} else {
1
}
}
}
}
}
#[allow(dead_code)] struct Block {
memory: VkDeviceMemory,
capacity: u64,
memory_type_index: u32,
strategy: BlockStrategy,
mapped_ptr: *mut std::ffi::c_void,
}
unsafe impl Send for Block {}
unsafe impl Sync for Block {}
#[allow(dead_code)] struct Pool {
memory_type_index: u32,
blocks: Vec<Block>,
block_size: u64,
max_block_count: u32,
strategy: AllocationStrategy,
mapped: bool,
live_allocations: Vec<std::sync::Weak<AllocationInner>>,
}
impl Pool {
fn new_default(memory_type_index: u32, block_size: u64) -> Self {
Self {
memory_type_index,
blocks: Vec::new(),
block_size,
max_block_count: 0,
strategy: AllocationStrategy::FreeList,
mapped: false,
live_allocations: Vec::new(),
}
}
fn new_custom(info: PoolCreateInfo, default_block_size: u64, mapped: bool) -> Self {
let block_size = if info.block_size == 0 {
default_block_size
} else {
info.block_size
};
let max = if info.strategy == AllocationStrategy::Linear {
1
} else {
info.max_block_count
};
Self {
memory_type_index: info.memory_type_index,
blocks: Vec::new(),
block_size,
max_block_count: max,
strategy: info.strategy,
mapped,
live_allocations: Vec::new(),
}
}
}
pub struct Allocator {
inner: Arc<AllocatorInner>,
}
struct AllocatorInner {
device: Arc<DeviceInner>,
physical: PhysicalDevice,
memory_properties: VkPhysicalDeviceMemoryProperties,
pools: Mutex<PoolState>,
}
struct PoolState {
pools: Vec<Option<Pool>>,
custom_pools: HashMap<u64, Pool>,
next_pool_id: u64,
next_alloc_id: u64,
statistics: AllocationStatistics,
dedicated_blocks: Vec<DedicatedBlock>,
}
#[allow(clippy::too_many_arguments)] fn make_allocation(
state: &mut PoolState,
memory: VkDeviceMemory,
offset: u64,
size: u64,
memory_type_index: u32,
mapped_ptr: *mut std::ffi::c_void,
kind: AllocationKind,
user_data: u64,
) -> Allocation {
let id = state.next_alloc_id;
state.next_alloc_id += 1;
let alloc = Allocation {
inner: Arc::new(AllocationInner {
location: Mutex::new(AllocationLocation {
memory,
offset,
mapped_ptr,
kind: kind.clone(),
}),
size,
memory_type_index,
user_data,
id,
}),
};
let weak = Arc::downgrade(&alloc.inner);
match &kind {
AllocationKind::DefaultPool {
memory_type_index, ..
} => {
if let Some(Some(pool)) = state.pools.get_mut(*memory_type_index as usize) {
pool.live_allocations.push(weak);
}
}
AllocationKind::CustomPool { pool_id, .. } => {
if let Some(pool) = state.custom_pools.get_mut(pool_id) {
pool.live_allocations.push(weak);
}
}
AllocationKind::Dedicated { .. } => {}
}
alloc
}
#[allow(dead_code)] struct DedicatedBlock {
memory: VkDeviceMemory,
size: u64,
memory_type_index: u32,
mapped_ptr: *mut std::ffi::c_void,
id: u64,
}
unsafe impl Send for DedicatedBlock {}
unsafe impl Sync for DedicatedBlock {}
#[derive(Debug, Clone)]
pub struct Allocation {
pub(crate) inner: Arc<AllocationInner>,
}
#[derive(Debug)]
pub(crate) struct AllocationInner {
pub(crate) location: Mutex<AllocationLocation>,
pub(crate) size: u64,
pub(crate) memory_type_index: u32,
pub(crate) user_data: u64,
pub(crate) id: u64,
}
#[derive(Debug, Clone)]
pub(crate) struct AllocationLocation {
pub(crate) memory: VkDeviceMemory,
pub(crate) offset: u64,
pub(crate) mapped_ptr: *mut std::ffi::c_void,
pub(crate) kind: AllocationKind,
}
unsafe impl Send for AllocationInner {}
unsafe impl Sync for AllocationInner {}
#[derive(Debug, Clone)]
pub(crate) enum AllocationKind {
DefaultPool {
memory_type_index: u32,
block_index: u32,
tlsf_block_id: u32,
},
CustomPool {
pool_id: u64,
block_index: u32,
tlsf_block_id: u32,
},
Dedicated { id: u64 },
}
impl Allocation {
pub fn memory(&self) -> VkDeviceMemory {
self.inner.location.lock().unwrap().memory
}
pub fn offset(&self) -> u64 {
self.inner.location.lock().unwrap().offset
}
pub fn size(&self) -> u64 {
self.inner.size
}
pub fn memory_type_index(&self) -> u32 {
self.inner.memory_type_index
}
pub fn user_data(&self) -> u64 {
self.inner.user_data
}
pub fn id(&self) -> u64 {
self.inner.id
}
pub fn mapped_ptr(&self) -> Option<*mut std::ffi::c_void> {
let loc = self.inner.location.lock().unwrap();
if loc.mapped_ptr.is_null() {
None
} else {
unsafe { Some(loc.mapped_ptr.add(loc.offset as usize)) }
}
}
}
impl Allocator {
pub fn new(device: &Device, physical: &PhysicalDevice) -> Result<Self> {
let get = physical
.instance()
.vkGetPhysicalDeviceMemoryProperties
.ok_or(Error::MissingFunction(
"vkGetPhysicalDeviceMemoryProperties",
))?;
let mut props: VkPhysicalDeviceMemoryProperties = unsafe { std::mem::zeroed() };
unsafe { get(physical.raw(), &mut props) };
let pools: Vec<Option<Pool>> = (0..props.memoryTypeCount).map(|_| None).collect();
Ok(Self {
inner: Arc::new(AllocatorInner {
device: Arc::clone(&device.inner),
physical: physical.clone(),
memory_properties: props,
pools: Mutex::new(PoolState {
pools,
custom_pools: HashMap::new(),
next_pool_id: 1,
next_alloc_id: 1,
statistics: AllocationStatistics::default(),
dedicated_blocks: Vec::new(),
}),
}),
})
}
pub fn clone_inner(&self) -> Self {
Self {
inner: Arc::clone(&self.inner),
}
}
pub fn statistics(&self) -> AllocationStatistics {
self.inner.pools.lock().unwrap().statistics
}
pub fn query_budget(&self) -> Option<crate::safe::MemoryBudget> {
self.inner.physical.memory_budget()
}
pub fn physical_device(&self) -> &PhysicalDevice {
&self.inner.physical
}
pub fn create_pool(&self, info: PoolCreateInfo) -> Result<PoolHandle> {
let mut state = self.inner.pools.lock().unwrap();
let id = state.next_pool_id;
state.next_pool_id += 1;
let block_size = self.heap_block_size_for_type(info.memory_type_index);
let pool = Pool::new_custom(info, block_size, false);
state.custom_pools.insert(id, pool);
Ok(PoolHandle(id))
}
pub fn destroy_pool(&self, handle: PoolHandle) {
let mut state = self.inner.pools.lock().unwrap();
if let Some(mut pool) = state.custom_pools.remove(&handle.0) {
for block in pool.blocks.drain(..) {
self.free_block_memory(&block);
state.statistics.block_bytes =
state.statistics.block_bytes.saturating_sub(block.capacity);
state.statistics.block_count = state.statistics.block_count.saturating_sub(1);
let alloc_count = block.strategy.allocation_count();
state.statistics.allocation_count = state
.statistics
.allocation_count
.saturating_sub(alloc_count);
}
self.refresh_free_region_count(&mut state);
}
}
pub fn reset_pool(&self, handle: PoolHandle) {
let mut state = self.inner.pools.lock().unwrap();
if let Some(pool) = state.custom_pools.get_mut(&handle.0)
&& pool.strategy == AllocationStrategy::Linear
{
let mut total_count = 0u32;
let mut total_bytes = 0u64;
for block in pool.blocks.iter_mut() {
total_count += block.strategy.allocation_count();
if let BlockStrategy::Linear(ref mut l) = block.strategy {
total_bytes += l.used_bytes();
l.reset();
}
}
state.statistics.allocation_count = state
.statistics
.allocation_count
.saturating_sub(total_count);
state.statistics.allocation_bytes = state
.statistics
.allocation_bytes
.saturating_sub(total_bytes);
self.refresh_free_region_count(&mut state);
}
}
pub fn pool_statistics(&self, handle: PoolHandle) -> Option<AllocationStatistics> {
let state = self.inner.pools.lock().unwrap();
let pool = state.custom_pools.get(&handle.0)?;
let mut stats = AllocationStatistics::default();
for block in &pool.blocks {
stats.block_count += 1;
stats.block_bytes += block.capacity;
stats.allocation_count += block.strategy.allocation_count();
stats.free_region_count += block.strategy.free_region_count();
match &block.strategy {
BlockStrategy::Tlsf(t) => stats.allocation_bytes += t.used_bytes(),
BlockStrategy::Linear(l) => stats.allocation_bytes += l.used_bytes(),
}
}
if stats.allocation_bytes > stats.peak_allocation_bytes {
stats.peak_allocation_bytes = stats.allocation_bytes;
}
Some(stats)
}
pub fn free(&self, allocation: Allocation) {
let location = allocation.inner.location.lock().unwrap().clone();
let alloc_size = allocation.inner.size;
drop(allocation);
let mut state = self.inner.pools.lock().unwrap();
match location.kind {
AllocationKind::DefaultPool {
memory_type_index,
block_index,
tlsf_block_id,
} => {
if let Some(Some(pool)) = state.pools.get_mut(memory_type_index as usize)
&& let Some(block) = pool.blocks.get_mut(block_index as usize)
&& let BlockStrategy::Tlsf(ref mut t) = block.strategy
{
t.free(TlsfAllocation {
offset: location.offset,
size: alloc_size,
block_id: tlsf_block_id,
});
}
state.statistics.allocation_count =
state.statistics.allocation_count.saturating_sub(1);
state.statistics.allocation_bytes =
state.statistics.allocation_bytes.saturating_sub(alloc_size);
self.refresh_free_region_count(&mut state);
}
AllocationKind::CustomPool {
pool_id,
block_index,
tlsf_block_id,
} => {
if let Some(pool) = state.custom_pools.get_mut(&pool_id)
&& let Some(block) = pool.blocks.get_mut(block_index as usize)
&& let BlockStrategy::Tlsf(ref mut t) = block.strategy
{
t.free(TlsfAllocation {
offset: location.offset,
size: alloc_size,
block_id: tlsf_block_id,
});
}
state.statistics.allocation_count =
state.statistics.allocation_count.saturating_sub(1);
state.statistics.allocation_bytes =
state.statistics.allocation_bytes.saturating_sub(alloc_size);
self.refresh_free_region_count(&mut state);
}
AllocationKind::Dedicated { id } => {
if let Some(pos) = state.dedicated_blocks.iter().position(|d| d.id == id) {
let dedicated = state.dedicated_blocks.swap_remove(pos);
if !dedicated.mapped_ptr.is_null()
&& let Some(unmap) = self.inner.device.dispatch.vkUnmapMemory
{
unsafe { unmap(self.inner.device.handle, dedicated.memory) };
}
if let Some(free) = self.inner.device.dispatch.vkFreeMemory {
unsafe {
free(self.inner.device.handle, dedicated.memory, std::ptr::null())
};
}
state.statistics.block_count = state.statistics.block_count.saturating_sub(1);
state.statistics.block_bytes =
state.statistics.block_bytes.saturating_sub(dedicated.size);
state.statistics.dedicated_allocation_count = state
.statistics
.dedicated_allocation_count
.saturating_sub(1);
state.statistics.allocation_count =
state.statistics.allocation_count.saturating_sub(1);
state.statistics.allocation_bytes =
state.statistics.allocation_bytes.saturating_sub(alloc_size);
}
}
}
}
pub fn allocate(
&self,
requirements: MemoryRequirements,
info: AllocationCreateInfo,
) -> Result<Allocation> {
if let Some(pool_handle) = info.pool {
if info.device_mask.is_some() {
return Err(Error::InvalidArgument(
"AllocationCreateInfo.device_mask cannot be combined with a custom pool; \
omit `pool` to get a dedicated allocation with the requested device mask",
));
}
return self.allocate_in_custom_pool(pool_handle, &requirements, info);
}
let memory_type_index = self
.pick_memory_type(requirements.memory_type_bits, info.usage)
.ok_or(Error::Vk(VkResult::ERROR_FEATURE_NOT_PRESENT))?;
let mut state = self.inner.pools.lock().unwrap();
let block_size = self.heap_block_size_for_type(memory_type_index);
let force_dedicated =
info.dedicated || info.device_mask.is_some() || requirements.size > block_size / 2;
if force_dedicated {
return self.allocate_dedicated(&mut state, memory_type_index, &requirements, info);
}
if state.pools[memory_type_index as usize].is_none() {
state.pools[memory_type_index as usize] =
Some(Pool::new_default(memory_type_index, block_size));
}
let pool = state.pools[memory_type_index as usize].as_mut().unwrap();
for (block_index, block) in pool.blocks.iter_mut().enumerate() {
let BlockStrategy::Tlsf(ref mut t) = block.strategy else {
continue;
};
if let Some(ta) = t.allocate(requirements.size, requirements.alignment) {
let memory = block.memory;
let mapped_ptr = block.mapped_ptr;
let kind = AllocationKind::DefaultPool {
memory_type_index,
block_index: block_index as u32,
tlsf_block_id: ta.block_id,
};
state.statistics.allocation_count += 1;
state.statistics.allocation_bytes += ta.size;
if state.statistics.allocation_bytes > state.statistics.peak_allocation_bytes {
state.statistics.peak_allocation_bytes = state.statistics.allocation_bytes;
}
self.refresh_free_region_count(&mut state);
let alloc = make_allocation(
&mut state,
memory,
ta.offset,
ta.size,
memory_type_index,
mapped_ptr,
kind,
info.user_data,
);
return Ok(alloc);
}
}
let new_block_size = block_size.max(
requirements
.size
.next_power_of_two()
.max(SMALL_HEAP_BLOCK_SIZE),
);
let memory = self.raw_allocate(new_block_size, memory_type_index)?;
let mapped_ptr = if info.mapped && self.is_host_visible(memory_type_index) {
self.raw_map_persistent(memory)?
} else {
std::ptr::null_mut()
};
let mut tlsf = Tlsf::new(new_block_size);
let ta = tlsf
.allocate(requirements.size, requirements.alignment)
.ok_or(Error::Vk(VkResult::ERROR_OUT_OF_DEVICE_MEMORY))?;
let block = Block {
memory,
capacity: new_block_size,
memory_type_index,
strategy: BlockStrategy::Tlsf(tlsf),
mapped_ptr,
};
let pool = state.pools[memory_type_index as usize].as_mut().unwrap();
pool.blocks.push(block);
let block_index = pool.blocks.len() as u32 - 1;
state.statistics.block_bytes += new_block_size;
state.statistics.block_count += 1;
state.statistics.allocation_count += 1;
state.statistics.allocation_bytes += ta.size;
if state.statistics.allocation_bytes > state.statistics.peak_allocation_bytes {
state.statistics.peak_allocation_bytes = state.statistics.allocation_bytes;
}
self.refresh_free_region_count(&mut state);
Ok(make_allocation(
&mut state,
memory,
ta.offset,
ta.size,
memory_type_index,
mapped_ptr,
AllocationKind::DefaultPool {
memory_type_index,
block_index,
tlsf_block_id: ta.block_id,
},
info.user_data,
))
}
fn allocate_in_custom_pool(
&self,
handle: PoolHandle,
requirements: &MemoryRequirements,
info: AllocationCreateInfo,
) -> Result<Allocation> {
let mut state = self.inner.pools.lock().unwrap();
let (memory_type_index, strategy, block_size, max_blocks) = {
let pool = state
.custom_pools
.get(&handle.0)
.ok_or(Error::Vk(VkResult::ERROR_OUT_OF_POOL_MEMORY))?;
(
pool.memory_type_index,
pool.strategy,
pool.block_size,
pool.max_block_count,
)
};
let block_count = state
.custom_pools
.get(&handle.0)
.map(|p| p.blocks.len())
.unwrap_or(0);
for block_index in 0..block_count {
let attempt: Option<(VkDeviceMemory, *mut std::ffi::c_void, u64, u64, u32)> = {
let pool = state.custom_pools.get_mut(&handle.0).unwrap();
let block = &mut pool.blocks[block_index];
match &mut block.strategy {
BlockStrategy::Tlsf(t) => {
if let Some(ta) = t.allocate(requirements.size, requirements.alignment) {
Some((
block.memory,
block.mapped_ptr,
ta.offset,
ta.size,
ta.block_id,
))
} else {
None
}
}
BlockStrategy::Linear(l) => {
if let Some(la) = l.allocate(requirements.size, requirements.alignment) {
Some((block.memory, block.mapped_ptr, la.offset, la.size, 0))
} else {
None
}
}
}
};
if let Some((memory, mapped_ptr, off, sz, tlsf_id)) = attempt {
state.statistics.allocation_count += 1;
state.statistics.allocation_bytes += sz;
if state.statistics.allocation_bytes > state.statistics.peak_allocation_bytes {
state.statistics.peak_allocation_bytes = state.statistics.allocation_bytes;
}
self.refresh_free_region_count(&mut state);
return Ok(make_allocation(
&mut state,
memory,
off,
sz,
memory_type_index,
mapped_ptr,
AllocationKind::CustomPool {
pool_id: handle.0,
block_index: block_index as u32,
tlsf_block_id: tlsf_id,
},
info.user_data,
));
}
}
let pool = state.custom_pools.get_mut(&handle.0).unwrap();
if max_blocks > 0 && pool.blocks.len() as u32 >= max_blocks {
return Err(Error::Vk(VkResult::ERROR_OUT_OF_DEVICE_MEMORY));
}
let new_block_size = block_size.max(requirements.size.next_power_of_two().max(64));
let memory = self.raw_allocate(new_block_size, memory_type_index)?;
let mapped_ptr = if info.mapped && self.is_host_visible(memory_type_index) {
self.raw_map_persistent(memory)?
} else {
std::ptr::null_mut()
};
let (block_strategy, alloc_offset, alloc_size, tlsf_block_id) = match strategy {
AllocationStrategy::FreeList => {
let mut t = Tlsf::new(new_block_size);
let ta = t
.allocate(requirements.size, requirements.alignment)
.ok_or(Error::Vk(VkResult::ERROR_OUT_OF_DEVICE_MEMORY))?;
(BlockStrategy::Tlsf(t), ta.offset, ta.size, ta.block_id)
}
AllocationStrategy::Linear => {
let mut l = Linear::new(new_block_size);
let la = l
.allocate(requirements.size, requirements.alignment)
.ok_or(Error::Vk(VkResult::ERROR_OUT_OF_DEVICE_MEMORY))?;
(BlockStrategy::Linear(l), la.offset, la.size, 0)
}
};
let block = Block {
memory,
capacity: new_block_size,
memory_type_index,
strategy: block_strategy,
mapped_ptr,
};
let pool = state.custom_pools.get_mut(&handle.0).unwrap();
pool.blocks.push(block);
let block_index = pool.blocks.len() as u32 - 1;
state.statistics.block_bytes += new_block_size;
state.statistics.block_count += 1;
state.statistics.allocation_count += 1;
state.statistics.allocation_bytes += alloc_size;
if state.statistics.allocation_bytes > state.statistics.peak_allocation_bytes {
state.statistics.peak_allocation_bytes = state.statistics.allocation_bytes;
}
self.refresh_free_region_count(&mut state);
Ok(make_allocation(
&mut state,
memory,
alloc_offset,
alloc_size,
memory_type_index,
mapped_ptr,
AllocationKind::CustomPool {
pool_id: handle.0,
block_index,
tlsf_block_id,
},
info.user_data,
))
}
pub fn create_buffer(
&self,
info: BufferCreateInfo,
alloc_info: AllocationCreateInfo,
) -> Result<(Buffer, Allocation)> {
let device_for_buffer = Device {
inner: Arc::clone(&self.inner.device),
};
let buffer = Buffer::new(&device_for_buffer, info)?;
let req = buffer.memory_requirements();
let allocation = self.allocate(req, alloc_info)?;
let bind = self
.inner
.device
.dispatch
.vkBindBufferMemory
.ok_or(Error::MissingFunction("vkBindBufferMemory"))?;
check(unsafe {
bind(
self.inner.device.handle,
buffer.raw(),
allocation.memory(),
allocation.offset(),
)
})?;
Ok((buffer, allocation))
}
pub fn create_image_2d(
&self,
info: Image2dCreateInfo,
alloc_info: AllocationCreateInfo,
) -> Result<(Image, Allocation)> {
let device_for_image = Device {
inner: Arc::clone(&self.inner.device),
};
let image = Image::new_2d(&device_for_image, info)?;
let req = image.memory_requirements();
let allocation = self.allocate(req, alloc_info)?;
let bind = self
.inner
.device
.dispatch
.vkBindImageMemory
.ok_or(Error::MissingFunction("vkBindImageMemory"))?;
check(unsafe {
bind(
self.inner.device.handle,
image.raw(),
allocation.memory(),
allocation.offset(),
)
})?;
Ok((image, allocation))
}
fn allocate_dedicated(
&self,
state: &mut PoolState,
memory_type_index: u32,
requirements: &MemoryRequirements,
info: AllocationCreateInfo,
) -> Result<Allocation> {
let memory = self.raw_allocate_with_mask(
requirements.size,
memory_type_index,
info.device_mask,
)?;
let mapped_ptr = if info.mapped && self.is_host_visible(memory_type_index) {
self.raw_map_persistent(memory)?
} else {
std::ptr::null_mut()
};
let id = state
.dedicated_blocks
.iter()
.map(|d| d.id)
.max()
.map_or(0, |m| m + 1);
state.dedicated_blocks.push(DedicatedBlock {
memory,
size: requirements.size,
memory_type_index,
mapped_ptr,
id,
});
state.statistics.block_bytes += requirements.size;
state.statistics.block_count += 1;
state.statistics.dedicated_allocation_count += 1;
state.statistics.allocation_count += 1;
state.statistics.allocation_bytes += requirements.size;
if state.statistics.allocation_bytes > state.statistics.peak_allocation_bytes {
state.statistics.peak_allocation_bytes = state.statistics.allocation_bytes;
}
Ok(make_allocation(
state,
memory,
0,
requirements.size,
memory_type_index,
mapped_ptr,
AllocationKind::Dedicated { id },
info.user_data,
))
}
fn raw_allocate(&self, size: u64, memory_type_index: u32) -> Result<VkDeviceMemory> {
self.raw_allocate_with_mask(size, memory_type_index, None)
}
fn raw_allocate_with_mask(
&self,
size: u64,
memory_type_index: u32,
device_mask: Option<u32>,
) -> Result<VkDeviceMemory> {
let allocate = self
.inner
.device
.dispatch
.vkAllocateMemory
.ok_or(Error::MissingFunction("vkAllocateMemory"))?;
let flags_info = device_mask.map(|mask| VkMemoryAllocateFlagsInfo {
sType: VkStructureType::STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO,
pNext: std::ptr::null(),
flags: MEMORY_ALLOCATE_DEVICE_MASK_BIT,
deviceMask: mask,
});
let p_next: *const std::ffi::c_void = match &flags_info {
Some(f) => f as *const _ as *const std::ffi::c_void,
None => std::ptr::null(),
};
let info = VkMemoryAllocateInfo {
sType: VkStructureType::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
pNext: p_next,
allocationSize: size,
memoryTypeIndex: memory_type_index,
};
let mut handle: VkDeviceMemory = 0;
check(unsafe {
allocate(
self.inner.device.handle,
&info,
std::ptr::null(),
&mut handle,
)
})?;
Ok(handle)
}
fn raw_map_persistent(&self, memory: VkDeviceMemory) -> Result<*mut std::ffi::c_void> {
let map = self
.inner
.device
.dispatch
.vkMapMemory
.ok_or(Error::MissingFunction("vkMapMemory"))?;
let mut ptr: *mut std::ffi::c_void = std::ptr::null_mut();
check(unsafe {
map(
self.inner.device.handle,
memory,
0,
u64::MAX, 0,
&mut ptr,
)
})?;
Ok(ptr)
}
fn pick_memory_type(&self, type_bits: u32, usage: AllocationUsage) -> Option<u32> {
let (required_a, preferred_a, fallback_required, fallback_preferred) = match usage {
AllocationUsage::Auto | AllocationUsage::DeviceLocal => (
0x0001u32, 0x0001u32, 0u32, 0u32,
),
AllocationUsage::HostVisible => (
0x0002 | 0x0004, 0x0002 | 0x0004,
0x0002,
0x0002,
),
AllocationUsage::HostVisibleDeviceLocal => (
0x0001 | 0x0002 | 0x0004,
0x0001 | 0x0002 | 0x0004,
0x0002 | 0x0004,
0x0002 | 0x0004,
),
};
if let Some(i) = self.find_type(type_bits, required_a, preferred_a) {
return Some(i);
}
self.find_type(type_bits, fallback_required, fallback_preferred)
}
fn find_type(&self, type_bits: u32, required: u32, preferred: u32) -> Option<u32> {
let mp = &self.inner.memory_properties;
for i in 0..mp.memoryTypeCount {
if (type_bits & (1 << i)) == 0 {
continue;
}
let flags = mp.memoryTypes[i as usize].propertyFlags;
if (flags & required) == required && (flags & preferred) == preferred {
return Some(i);
}
}
for i in 0..mp.memoryTypeCount {
if (type_bits & (1 << i)) == 0 {
continue;
}
let flags = mp.memoryTypes[i as usize].propertyFlags;
if (flags & required) == required {
return Some(i);
}
}
None
}
fn is_host_visible(&self, memory_type_index: u32) -> bool {
let mp = &self.inner.memory_properties;
let flags = mp.memoryTypes[memory_type_index as usize].propertyFlags;
(flags & 0x0002) != 0 }
fn heap_block_size_for_type(&self, memory_type_index: u32) -> u64 {
let mp = &self.inner.memory_properties;
let heap_index = mp.memoryTypes[memory_type_index as usize].heapIndex;
let heap_size = mp.memoryHeaps[heap_index as usize].size;
if heap_size >= LARGE_HEAP_THRESHOLD {
LARGE_HEAP_BLOCK_SIZE
} else {
SMALL_HEAP_BLOCK_SIZE
}
}
fn refresh_free_region_count(&self, state: &mut PoolState) {
let mut total = 0u32;
for pool in state.pools.iter().flatten() {
for block in &pool.blocks {
total += block.strategy.free_region_count();
}
}
for pool in state.custom_pools.values() {
for block in &pool.blocks {
total += block.strategy.free_region_count();
}
}
state.statistics.free_region_count = total;
}
fn free_block_memory(&self, block: &Block) {
if !block.mapped_ptr.is_null()
&& let Some(unmap) = self.inner.device.dispatch.vkUnmapMemory
{
unsafe { unmap(self.inner.device.handle, block.memory) };
}
if let Some(free) = self.inner.device.dispatch.vkFreeMemory {
unsafe { free(self.inner.device.handle, block.memory, std::ptr::null()) };
}
}
}
#[derive(Debug, Clone)]
pub struct DefragmentationMove {
pub allocation_id: u64,
pub user_data: u64,
pub size: u64,
pub src_memory: VkDeviceMemory,
pub src_offset: u64,
pub dst_memory: VkDeviceMemory,
pub dst_offset: u64,
}
#[derive(Debug, Clone, Default)]
pub struct DefragmentationPlan {
pub moves: Vec<DefragmentationMove>,
pub(crate) total_layout: Vec<DefragmentationMove>,
pub(crate) pool_id: u64,
pub bytes_freed: u64,
}
impl DefragmentationPlan {
pub fn total_layout(&self) -> &[DefragmentationMove] {
&self.total_layout
}
}
impl Allocator {
pub fn build_defragmentation_plan(&self, pool: PoolHandle) -> DefragmentationPlan {
let mut state = self.inner.pools.lock().unwrap();
let p = match state.custom_pools.get_mut(&pool.0) {
Some(p) if p.strategy == AllocationStrategy::FreeList => p,
_ => return DefragmentationPlan::default(),
};
p.live_allocations.retain(|w| w.upgrade().is_some());
struct LiveAlloc {
arc: Arc<AllocationInner>,
current_memory: VkDeviceMemory,
current_offset: u64,
block_index: u32,
size: u64,
}
let mut lives: Vec<LiveAlloc> = Vec::new();
for w in &p.live_allocations {
let Some(arc) = w.upgrade() else {
continue;
};
let loc = arc.location.lock().unwrap();
let block_index = match &loc.kind {
AllocationKind::CustomPool { block_index, .. } => *block_index,
_ => continue,
};
lives.push(LiveAlloc {
current_memory: loc.memory,
current_offset: loc.offset,
block_index,
size: arc.size,
arc: Arc::clone(&arc),
});
}
lives.sort_by_key(|l| (l.block_index, l.current_offset));
if p.blocks.is_empty() {
return DefragmentationPlan::default();
}
let target_memory = p.blocks[0].memory;
let mut total_layout = Vec::new();
let mut moves = Vec::new();
let mut next_offset: u64 = 0;
let mut bytes_freed: u64 = 0;
const ALIGN: u64 = 256;
for live in &lives {
let aligned_next = (next_offset + ALIGN - 1) & !(ALIGN - 1);
let entry = DefragmentationMove {
allocation_id: live.arc.id,
user_data: live.arc.user_data,
size: live.size,
src_memory: live.current_memory,
src_offset: live.current_offset,
dst_memory: target_memory,
dst_offset: aligned_next,
};
let unchanged =
aligned_next == live.current_offset && target_memory == live.current_memory;
if !unchanged {
moves.push(entry.clone());
bytes_freed += live.size;
}
total_layout.push(entry);
next_offset = aligned_next + live.size;
}
DefragmentationPlan {
moves,
total_layout,
pool_id: pool.0,
bytes_freed,
}
}
pub fn apply_defragmentation_plan(&self, plan: DefragmentationPlan) {
if plan.total_layout.is_empty() {
return;
}
let pool_id = plan.pool_id;
let mut state = self.inner.pools.lock().unwrap();
let mut by_id: HashMap<u64, Arc<AllocationInner>> = HashMap::new();
if let Some(pool) = state.custom_pools.get(&pool_id) {
for w in &pool.live_allocations {
if let Some(arc) = w.upgrade() {
by_id.insert(arc.id, arc);
}
}
} else {
return;
}
if let Some(pool) = state.custom_pools.get_mut(&pool_id) {
for block in pool.blocks.iter_mut() {
if let BlockStrategy::Tlsf(ref mut t) = block.strategy {
let cap = t.capacity();
*t = Tlsf::new(cap);
}
}
}
for entry in &plan.total_layout {
let target_block_index = if let Some(pool) = state.custom_pools.get(&pool_id) {
pool.blocks
.iter()
.position(|b| b.memory == entry.dst_memory)
.map(|i| i as u32)
} else {
None
};
let Some(block_index) = target_block_index else {
continue;
};
const ALIGN: u64 = 256;
if let Some(pool) = state.custom_pools.get_mut(&pool_id)
&& let BlockStrategy::Tlsf(ref mut t) = pool.blocks[block_index as usize].strategy
&& let Some(ta) = t.allocate(entry.size, ALIGN)
&& let Some(arc) = by_id.get(&entry.allocation_id)
{
let mut loc = arc.location.lock().unwrap();
loc.memory = entry.dst_memory;
loc.offset = ta.offset;
loc.kind = AllocationKind::CustomPool {
pool_id,
block_index,
tlsf_block_id: ta.block_id,
};
}
}
self.refresh_free_region_count(&mut state);
}
}
impl Drop for AllocatorInner {
fn drop(&mut self) {
let mut state = self.pools.lock().unwrap();
let unmap = self.device.dispatch.vkUnmapMemory;
let free = self.device.dispatch.vkFreeMemory;
let dh = self.device.handle;
let free_block = |block: &Block| {
if !block.mapped_ptr.is_null()
&& let Some(u) = unmap
{
unsafe { u(dh, block.memory) };
}
if let Some(f) = free {
unsafe { f(dh, block.memory, std::ptr::null()) };
}
};
for pool in state.pools.iter_mut().flatten() {
for block in pool.blocks.drain(..) {
free_block(&block);
}
}
let custom_keys: Vec<u64> = state.custom_pools.keys().copied().collect();
for k in custom_keys {
if let Some(mut pool) = state.custom_pools.remove(&k) {
for block in pool.blocks.drain(..) {
free_block(&block);
}
}
}
for dedicated in state.dedicated_blocks.drain(..) {
if !dedicated.mapped_ptr.is_null()
&& let Some(u) = unmap
{
unsafe { u(dh, dedicated.memory) };
}
if let Some(f) = free {
unsafe { f(dh, dedicated.memory, std::ptr::null()) };
}
}
}
}