1#[cfg(feature = "std")]
2use alloc::sync::Arc;
3use alloc::{borrow::ToOwned, boxed::Box, string::ToString, vec::Vec};
4use core::{fmt, marker::PhantomData};
5#[cfg(feature = "std")]
6use std::backtrace::Backtrace;
7
8use ash::vk;
9use log::{debug, Level};
10
11#[cfg(feature = "visualizer")]
12mod visualizer;
13#[cfg(feature = "visualizer")]
14pub use visualizer::AllocatorVisualizer;
15
16use crate::{
17 allocator::{
18 AllocationType, AllocatorReport, DedicatedBlockAllocator, FreeListAllocator,
19 MemoryBlockReport, SubAllocator,
20 },
21 AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation, Result,
22};
23
24#[derive(Copy, Clone, Debug, Eq, PartialEq)]
25pub enum AllocationScheme {
26 DedicatedBuffer(vk::Buffer),
29 DedicatedImage(vk::Image),
32 GpuAllocatorManaged,
34}
35
36#[derive(Clone, Debug)]
37pub struct AllocationCreateDesc<'a> {
38 pub name: &'a str,
40 pub requirements: vk::MemoryRequirements,
42 pub location: MemoryLocation,
44 pub linear: bool,
46 pub allocation_scheme: AllocationScheme,
48}
49
50#[derive(Clone, Copy, Debug)]
54pub(crate) struct SendSyncPtr(core::ptr::NonNull<core::ffi::c_void>);
55unsafe impl Send for SendSyncPtr {}
57unsafe impl Sync for SendSyncPtr {}
62
63pub struct AllocatorCreateDesc {
64 pub instance: ash::Instance,
65 pub device: ash::Device,
66 pub physical_device: vk::PhysicalDevice,
67 pub debug_settings: AllocatorDebugSettings,
68 pub buffer_device_address: bool,
69 pub allocation_sizes: AllocationSizes,
70}
71
72#[derive(Debug)]
157pub struct Allocation {
158 chunk_id: Option<core::num::NonZeroU64>,
159 offset: u64,
160 size: u64,
161 memory_block_index: usize,
162 memory_type_index: usize,
163 device_memory: vk::DeviceMemory,
164 mapped_ptr: Option<SendSyncPtr>,
165 dedicated_allocation: bool,
166 memory_properties: vk::MemoryPropertyFlags,
167 name: Option<Box<str>>,
168}
169
170impl Allocation {
171 #[allow(clippy::needless_lifetimes)]
190 pub fn try_as_mapped_slab<'a>(&'a mut self) -> Option<MappedAllocationSlab<'a>> {
191 let mapped_ptr = self.mapped_ptr()?.cast().as_ptr();
192
193 if self.size > isize::MAX as _ {
194 return None;
195 }
196
197 let size = self.size as usize;
199
200 Some(MappedAllocationSlab {
201 _borrowed_alloc: PhantomData,
202 mapped_ptr,
203 size,
204 })
205 }
206
207 pub fn chunk_id(&self) -> Option<core::num::NonZeroU64> {
208 self.chunk_id
209 }
210
211 pub fn memory_properties(&self) -> vk::MemoryPropertyFlags {
213 self.memory_properties
214 }
215
216 pub unsafe fn memory(&self) -> vk::DeviceMemory {
229 self.device_memory
230 }
231
232 pub fn is_dedicated(&self) -> bool {
234 self.dedicated_allocation
235 }
236
237 pub fn offset(&self) -> u64 {
240 self.offset
241 }
242
243 pub fn size(&self) -> u64 {
245 self.size
246 }
247
248 pub fn mapped_ptr(&self) -> Option<core::ptr::NonNull<core::ffi::c_void>> {
251 self.mapped_ptr.map(|SendSyncPtr(p)| p)
252 }
253
254 pub fn mapped_slice(&self) -> Option<&[u8]> {
257 self.mapped_ptr().map(|ptr| unsafe {
258 core::slice::from_raw_parts(ptr.cast().as_ptr(), self.size as usize)
259 })
260 }
261
262 pub fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> {
265 self.mapped_ptr().map(|ptr| unsafe {
266 core::slice::from_raw_parts_mut(ptr.cast().as_ptr(), self.size as usize)
267 })
268 }
269
270 pub fn is_null(&self) -> bool {
271 self.chunk_id.is_none()
272 }
273}
274
275impl Default for Allocation {
276 fn default() -> Self {
277 Self {
278 chunk_id: None,
279 offset: 0,
280 size: 0,
281 memory_block_index: !0,
282 memory_type_index: !0,
283 device_memory: vk::DeviceMemory::null(),
284 mapped_ptr: None,
285 memory_properties: vk::MemoryPropertyFlags::empty(),
286 name: None,
287 dedicated_allocation: false,
288 }
289 }
290}
291
292pub struct MappedAllocationSlab<'a> {
296 _borrowed_alloc: PhantomData<&'a mut Allocation>,
297 mapped_ptr: *mut u8,
298 size: usize,
299}
300
301unsafe impl presser::Slab for MappedAllocationSlab<'_> {
303 fn base_ptr(&self) -> *const u8 {
304 self.mapped_ptr
305 }
306
307 fn base_ptr_mut(&mut self) -> *mut u8 {
308 self.mapped_ptr
309 }
310
311 fn size(&self) -> usize {
312 self.size
313 }
314}
315
316unsafe impl presser::Slab for Allocation {
318 fn base_ptr(&self) -> *const u8 {
319 self.mapped_ptr
320 .expect("tried to use a non-mapped Allocation as a Slab")
321 .0
322 .as_ptr()
323 .cast()
324 }
325
326 fn base_ptr_mut(&mut self) -> *mut u8 {
327 self.mapped_ptr
328 .expect("tried to use a non-mapped Allocation as a Slab")
329 .0
330 .as_ptr()
331 .cast()
332 }
333
334 fn size(&self) -> usize {
335 if self.size > isize::MAX as _ {
336 panic!("tried to use an Allocation with size > isize::MAX as a Slab")
337 }
338 self.size as usize
340 }
341}
342
343#[derive(Debug)]
344pub(crate) struct MemoryBlock {
345 pub(crate) device_memory: vk::DeviceMemory,
346 pub(crate) size: u64,
347 pub(crate) mapped_ptr: Option<SendSyncPtr>,
348 pub(crate) sub_allocator: Box<dyn SubAllocator>,
349 #[cfg(feature = "visualizer")]
350 pub(crate) dedicated_allocation: bool,
351}
352
353impl MemoryBlock {
354 fn new(
355 device: &ash::Device,
356 size: u64,
357 mem_type_index: usize,
358 mapped: bool,
359 buffer_device_address: bool,
360 allocation_scheme: AllocationScheme,
361 requires_personal_block: bool,
362 ) -> Result<Self> {
363 let device_memory = {
364 let alloc_info = vk::MemoryAllocateInfo::default()
365 .allocation_size(size)
366 .memory_type_index(mem_type_index as u32);
367
368 let allocation_flags = vk::MemoryAllocateFlags::DEVICE_ADDRESS;
369 let mut flags_info = vk::MemoryAllocateFlagsInfo::default().flags(allocation_flags);
370 let alloc_info = if buffer_device_address {
372 alloc_info.push_next(&mut flags_info)
373 } else {
374 alloc_info
375 };
376
377 let mut dedicated_memory_info = vk::MemoryDedicatedAllocateInfo::default();
379 let alloc_info = match allocation_scheme {
380 AllocationScheme::DedicatedBuffer(buffer) => {
381 dedicated_memory_info = dedicated_memory_info.buffer(buffer);
382 alloc_info.push_next(&mut dedicated_memory_info)
383 }
384 AllocationScheme::DedicatedImage(image) => {
385 dedicated_memory_info = dedicated_memory_info.image(image);
386 alloc_info.push_next(&mut dedicated_memory_info)
387 }
388 AllocationScheme::GpuAllocatorManaged => alloc_info,
389 };
390
391 unsafe { device.allocate_memory(&alloc_info, None) }.map_err(|e| match e {
392 vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => AllocationError::OutOfMemory,
393 e => AllocationError::Internal(format!(
394 "Unexpected error in vkAllocateMemory: {e:?}"
395 )),
396 })?
397 };
398
399 let mapped_ptr = mapped
400 .then(|| {
401 unsafe {
402 device.map_memory(
403 device_memory,
404 0,
405 vk::WHOLE_SIZE,
406 vk::MemoryMapFlags::empty(),
407 )
408 }
409 .map_err(|e| {
410 unsafe { device.free_memory(device_memory, None) };
411 AllocationError::FailedToMap(e.to_string())
412 })
413 .and_then(|p| {
414 core::ptr::NonNull::new(p).map(SendSyncPtr).ok_or_else(|| {
415 AllocationError::FailedToMap("Returned mapped pointer is null".to_owned())
416 })
417 })
418 })
419 .transpose()?;
420
421 let sub_allocator: Box<dyn SubAllocator> = if allocation_scheme
422 != AllocationScheme::GpuAllocatorManaged
423 || requires_personal_block
424 {
425 Box::new(DedicatedBlockAllocator::new(size))
426 } else {
427 Box::new(FreeListAllocator::new(size))
428 };
429
430 Ok(Self {
431 device_memory,
432 size,
433 mapped_ptr,
434 sub_allocator,
435 #[cfg(feature = "visualizer")]
436 dedicated_allocation: allocation_scheme != AllocationScheme::GpuAllocatorManaged,
437 })
438 }
439
440 fn destroy(self, device: &ash::Device) {
441 if self.mapped_ptr.is_some() {
442 unsafe { device.unmap_memory(self.device_memory) };
443 }
444
445 unsafe { device.free_memory(self.device_memory, None) };
446 }
447}
448
449#[derive(Debug)]
450pub(crate) struct MemoryType {
451 pub(crate) memory_blocks: Vec<Option<MemoryBlock>>,
452 pub(crate) memory_properties: vk::MemoryPropertyFlags,
453 pub(crate) memory_type_index: usize,
454 pub(crate) heap_index: usize,
455 pub(crate) mappable: bool,
456 pub(crate) active_general_blocks: usize,
457 pub(crate) buffer_device_address: bool,
458}
459
460impl MemoryType {
461 fn allocate(
462 &mut self,
463 device: &ash::Device,
464 desc: &AllocationCreateDesc<'_>,
465 granularity: u64,
466 #[cfg(feature = "std")] backtrace: Arc<Backtrace>,
467 allocation_sizes: &AllocationSizes,
468 ) -> Result<Allocation> {
469 let allocation_type = if desc.linear {
470 AllocationType::Linear
471 } else {
472 AllocationType::NonLinear
473 };
474
475 let is_host = self
476 .memory_properties
477 .contains(vk::MemoryPropertyFlags::HOST_VISIBLE);
478
479 let memblock_size = allocation_sizes.get_memblock_size(is_host, self.active_general_blocks);
480
481 let size = desc.requirements.size;
482 let alignment = desc.requirements.alignment;
483
484 let dedicated_allocation = desc.allocation_scheme != AllocationScheme::GpuAllocatorManaged;
485 let requires_personal_block = size > memblock_size;
486
487 if dedicated_allocation || requires_personal_block {
489 let mem_block = MemoryBlock::new(
490 device,
491 size,
492 self.memory_type_index,
493 self.mappable,
494 self.buffer_device_address,
495 desc.allocation_scheme,
496 requires_personal_block,
497 )?;
498
499 let mut block_index = None;
500 for (i, block) in self.memory_blocks.iter().enumerate() {
501 if block.is_none() {
502 block_index = Some(i);
503 break;
504 }
505 }
506
507 let block_index = match block_index {
508 Some(i) => {
509 self.memory_blocks[i].replace(mem_block);
510 i
511 }
512 None => {
513 self.memory_blocks.push(Some(mem_block));
514 self.memory_blocks.len() - 1
515 }
516 };
517
518 let mem_block = self.memory_blocks[block_index]
519 .as_mut()
520 .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
521
522 let (offset, chunk_id) = mem_block.sub_allocator.allocate(
523 size,
524 alignment,
525 allocation_type,
526 granularity,
527 desc.name,
528 #[cfg(feature = "std")]
529 backtrace,
530 )?;
531
532 return Ok(Allocation {
533 chunk_id: Some(chunk_id),
534 offset,
535 size,
536 memory_block_index: block_index,
537 memory_type_index: self.memory_type_index,
538 device_memory: mem_block.device_memory,
539 mapped_ptr: mem_block.mapped_ptr,
540 memory_properties: self.memory_properties,
541 name: Some(desc.name.into()),
542 dedicated_allocation,
543 });
544 }
545
546 let mut empty_block_index = None;
547 for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
548 if let Some(mem_block) = mem_block {
549 let allocation = mem_block.sub_allocator.allocate(
550 size,
551 alignment,
552 allocation_type,
553 granularity,
554 desc.name,
555 #[cfg(feature = "std")]
556 backtrace.clone(),
557 );
558
559 match allocation {
560 Ok((offset, chunk_id)) => {
561 let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr
562 {
563 let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
564 core::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
565 } else {
566 None
567 };
568 return Ok(Allocation {
569 chunk_id: Some(chunk_id),
570 offset,
571 size,
572 memory_block_index: mem_block_i,
573 memory_type_index: self.memory_type_index,
574 device_memory: mem_block.device_memory,
575 memory_properties: self.memory_properties,
576 mapped_ptr,
577 dedicated_allocation: false,
578 name: Some(desc.name.into()),
579 });
580 }
581 Err(err) => match err {
582 AllocationError::OutOfMemory => {} _ => return Err(err), },
585 }
586 } else if empty_block_index.is_none() {
587 empty_block_index = Some(mem_block_i);
588 }
589 }
590
591 let new_memory_block = MemoryBlock::new(
592 device,
593 memblock_size,
594 self.memory_type_index,
595 self.mappable,
596 self.buffer_device_address,
597 desc.allocation_scheme,
598 false,
599 )?;
600
601 let new_block_index = if let Some(block_index) = empty_block_index {
602 self.memory_blocks[block_index] = Some(new_memory_block);
603 block_index
604 } else {
605 self.memory_blocks.push(Some(new_memory_block));
606 self.memory_blocks.len() - 1
607 };
608
609 self.active_general_blocks += 1;
610
611 let mem_block = self.memory_blocks[new_block_index]
612 .as_mut()
613 .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
614 let allocation = mem_block.sub_allocator.allocate(
615 size,
616 alignment,
617 allocation_type,
618 granularity,
619 desc.name,
620 #[cfg(feature = "std")]
621 backtrace,
622 );
623 let (offset, chunk_id) = match allocation {
624 Ok(value) => value,
625 Err(err) => match err {
626 AllocationError::OutOfMemory => {
627 return Err(AllocationError::Internal(
628 "Allocation that must succeed failed. This is a bug in the allocator."
629 .into(),
630 ))
631 }
632 _ => return Err(err),
633 },
634 };
635
636 let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr {
637 let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
638 core::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
639 } else {
640 None
641 };
642
643 Ok(Allocation {
644 chunk_id: Some(chunk_id),
645 offset,
646 size,
647 memory_block_index: new_block_index,
648 memory_type_index: self.memory_type_index,
649 device_memory: mem_block.device_memory,
650 mapped_ptr,
651 memory_properties: self.memory_properties,
652 name: Some(desc.name.into()),
653 dedicated_allocation: false,
654 })
655 }
656
657 #[allow(clippy::needless_pass_by_value)]
658 fn free(&mut self, allocation: Allocation, device: &ash::Device) -> Result<()> {
659 let block_idx = allocation.memory_block_index;
660
661 let mem_block = self.memory_blocks[block_idx]
662 .as_mut()
663 .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
664
665 mem_block.sub_allocator.free(allocation.chunk_id)?;
666
667 let is_dedicated_or_not_last_general_block =
671 !mem_block.sub_allocator.supports_general_allocations()
672 || self.active_general_blocks > 1;
673 if mem_block.sub_allocator.is_empty() && is_dedicated_or_not_last_general_block {
674 let block = self.memory_blocks[block_idx]
675 .take()
676 .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
677
678 if block.sub_allocator.supports_general_allocations() {
679 self.active_general_blocks -= 1;
680 }
681
682 block.destroy(device);
683 }
684
685 Ok(())
686 }
687}
688
689pub struct Allocator {
690 pub(crate) memory_types: Vec<MemoryType>,
691 pub(crate) memory_heaps: Vec<vk::MemoryHeap>,
692 device: ash::Device,
693 pub(crate) buffer_image_granularity: u64,
694 pub(crate) debug_settings: AllocatorDebugSettings,
695 allocation_sizes: AllocationSizes,
696}
697
698impl fmt::Debug for Allocator {
699 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
700 self.generate_report().fmt(f)
701 }
702}
703
704impl Allocator {
705 pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
706 if desc.physical_device == vk::PhysicalDevice::null() {
707 return Err(AllocationError::InvalidAllocatorCreateDesc(
708 "AllocatorCreateDesc field `physical_device` is null.".into(),
709 ));
710 }
711
712 let mem_props = unsafe {
713 desc.instance
714 .get_physical_device_memory_properties(desc.physical_device)
715 };
716
717 let memory_types = &mem_props.memory_types_as_slice();
718 let memory_heaps = mem_props.memory_heaps_as_slice().to_vec();
719
720 if desc.debug_settings.log_memory_information {
721 debug!("memory type count: {}", mem_props.memory_type_count);
722 debug!("memory heap count: {}", mem_props.memory_heap_count);
723
724 for (i, mem_type) in memory_types.iter().enumerate() {
725 let flags = mem_type.property_flags;
726 debug!(
727 "memory type[{}]: prop flags: 0x{:x}, heap[{}]",
728 i,
729 flags.as_raw(),
730 mem_type.heap_index,
731 );
732 }
733 for (i, heap) in memory_heaps.iter().enumerate() {
734 debug!(
735 "heap[{}] flags: 0x{:x}, size: {} MiB",
736 i,
737 heap.flags.as_raw(),
738 heap.size / (1024 * 1024)
739 );
740 }
741 }
742
743 let memory_types = memory_types
744 .iter()
745 .enumerate()
746 .map(|(i, mem_type)| MemoryType {
747 memory_blocks: Vec::default(),
748 memory_properties: mem_type.property_flags,
749 memory_type_index: i,
750 heap_index: mem_type.heap_index as usize,
751 mappable: mem_type
752 .property_flags
753 .contains(vk::MemoryPropertyFlags::HOST_VISIBLE),
754 active_general_blocks: 0,
755 buffer_device_address: desc.buffer_device_address,
756 })
757 .collect::<Vec<_>>();
758
759 let physical_device_properties = unsafe {
760 desc.instance
761 .get_physical_device_properties(desc.physical_device)
762 };
763
764 let granularity = physical_device_properties.limits.buffer_image_granularity;
765
766 Ok(Self {
767 memory_types,
768 memory_heaps,
769 device: desc.device.clone(),
770 buffer_image_granularity: granularity,
771 debug_settings: desc.debug_settings,
772 allocation_sizes: desc.allocation_sizes,
773 })
774 }
775
776 pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
777 let size = desc.requirements.size;
778 let alignment = desc.requirements.alignment;
779
780 #[cfg(feature = "std")]
781 let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
782 Backtrace::force_capture()
783 } else {
784 Backtrace::disabled()
785 });
786
787 if self.debug_settings.log_allocations {
788 debug!(
789 "Allocating `{}` of {} bytes with an alignment of {}.",
790 &desc.name, size, alignment
791 );
792 #[cfg(feature = "std")]
793 if self.debug_settings.log_stack_traces {
794 let backtrace = Backtrace::force_capture();
795 debug!("Allocation stack trace: {backtrace}");
796 }
797 }
798
799 if size == 0 || !alignment.is_power_of_two() {
800 return Err(AllocationError::InvalidAllocationCreateDesc);
801 }
802
803 let mem_loc_preferred_bits = match desc.location {
804 MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
805 MemoryLocation::CpuToGpu => {
806 vk::MemoryPropertyFlags::HOST_VISIBLE
807 | vk::MemoryPropertyFlags::HOST_COHERENT
808 | vk::MemoryPropertyFlags::DEVICE_LOCAL
809 }
810 MemoryLocation::GpuToCpu => {
811 vk::MemoryPropertyFlags::HOST_VISIBLE
812 | vk::MemoryPropertyFlags::HOST_COHERENT
813 | vk::MemoryPropertyFlags::HOST_CACHED
814 }
815 MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
816 };
817 let mut memory_type_index_opt =
818 self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
819
820 if memory_type_index_opt.is_none() {
821 let mem_loc_required_bits = match desc.location {
822 MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
823 MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu => {
824 vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT
825 }
826 MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
827 };
828
829 memory_type_index_opt =
830 self.find_memorytype_index(&desc.requirements, mem_loc_required_bits);
831 }
832
833 let memory_type_index = match memory_type_index_opt {
834 Some(x) => x as usize,
835 None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
836 };
837
838 let memory_type = &mut self.memory_types[memory_type_index];
840 let allocation = if size > self.memory_heaps[memory_type.heap_index].size {
841 Err(AllocationError::OutOfMemory)
842 } else {
843 memory_type.allocate(
844 &self.device,
845 desc,
846 self.buffer_image_granularity,
847 #[cfg(feature = "std")]
848 backtrace.clone(),
849 &self.allocation_sizes,
850 )
851 };
852
853 if desc.location == MemoryLocation::CpuToGpu {
854 if allocation.is_err() {
855 let mem_loc_preferred_bits =
856 vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT;
857
858 let memory_type_index_opt =
859 self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
860
861 let memory_type_index = match memory_type_index_opt {
862 Some(x) => x as usize,
863 None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
864 };
865
866 self.memory_types[memory_type_index].allocate(
867 &self.device,
868 desc,
869 self.buffer_image_granularity,
870 #[cfg(feature = "std")]
871 backtrace,
872 &self.allocation_sizes,
873 )
874 } else {
875 allocation
876 }
877 } else {
878 allocation
879 }
880 }
881
882 pub fn free(&mut self, allocation: Allocation) -> Result<()> {
883 if self.debug_settings.log_frees {
884 let name = allocation.name.as_deref().unwrap_or("<null>");
885 debug!("Freeing `{name}`.");
886 #[cfg(feature = "std")]
887 if self.debug_settings.log_stack_traces {
888 let backtrace = Backtrace::force_capture();
889 debug!("Free stack trace: {backtrace}");
890 }
891 }
892
893 if allocation.is_null() {
894 return Ok(());
895 }
896
897 self.memory_types[allocation.memory_type_index].free(allocation, &self.device)?;
898
899 Ok(())
900 }
901
902 pub fn rename_allocation(&mut self, allocation: &mut Allocation, name: &str) -> Result<()> {
903 allocation.name = Some(name.into());
904
905 if allocation.is_null() {
906 return Ok(());
907 }
908
909 let mem_type = &mut self.memory_types[allocation.memory_type_index];
910 let mem_block = mem_type.memory_blocks[allocation.memory_block_index]
911 .as_mut()
912 .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
913
914 mem_block
915 .sub_allocator
916 .rename_allocation(allocation.chunk_id, name)?;
917
918 Ok(())
919 }
920
921 pub fn report_memory_leaks(&self, log_level: Level) {
922 for (mem_type_i, mem_type) in self.memory_types.iter().enumerate() {
923 for (block_i, mem_block) in mem_type.memory_blocks.iter().enumerate() {
924 if let Some(mem_block) = mem_block {
925 mem_block
926 .sub_allocator
927 .report_memory_leaks(log_level, mem_type_i, block_i);
928 }
929 }
930 }
931 }
932
933 fn find_memorytype_index(
934 &self,
935 memory_req: &vk::MemoryRequirements,
936 flags: vk::MemoryPropertyFlags,
937 ) -> Option<u32> {
938 self.memory_types
939 .iter()
940 .find(|memory_type| {
941 (1 << memory_type.memory_type_index) & memory_req.memory_type_bits != 0
942 && memory_type.memory_properties.contains(flags)
943 })
944 .map(|memory_type| memory_type.memory_type_index as _)
945 }
946
947 pub fn generate_report(&self) -> AllocatorReport {
948 let mut allocations = vec![];
949 let mut blocks = vec![];
950 let mut total_capacity_bytes = 0;
951
952 for memory_type in &self.memory_types {
953 for block in memory_type.memory_blocks.iter().flatten() {
954 total_capacity_bytes += block.size;
955 let first_allocation = allocations.len();
956 allocations.extend(block.sub_allocator.report_allocations());
957 blocks.push(MemoryBlockReport {
958 size: block.size,
959 allocations: first_allocation..allocations.len(),
960 });
961 }
962 }
963
964 let total_allocated_bytes = allocations.iter().map(|report| report.size).sum();
965
966 AllocatorReport {
967 allocations,
968 blocks,
969 total_allocated_bytes,
970 total_capacity_bytes,
971 }
972 }
973
974 pub fn capacity(&self) -> u64 {
976 let mut total_capacity_bytes = 0;
977
978 for memory_type in &self.memory_types {
979 for block in memory_type.memory_blocks.iter().flatten() {
980 total_capacity_bytes += block.size;
981 }
982 }
983
984 total_capacity_bytes
985 }
986}
987
988impl Drop for Allocator {
989 fn drop(&mut self) {
990 if self.debug_settings.log_leaks_on_shutdown {
991 self.report_memory_leaks(Level::Warn);
992 }
993
994 for mem_type in self.memory_types.iter_mut() {
996 for mem_block in mem_type.memory_blocks.iter_mut() {
997 let block = mem_block.take();
998 if let Some(block) = block {
999 block.destroy(&self.device);
1000 }
1001 }
1002 }
1003 }
1004}