1#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
2
3#[cfg(feature = "visualizer")]
4mod visualizer;
5use std::{backtrace::Backtrace, fmt, marker::PhantomData, sync::Arc};
6
7use ash::vk;
8use log::{debug, Level};
9#[cfg(feature = "visualizer")]
10pub use visualizer::AllocatorVisualizer;
11
12use super::allocator;
13use crate::{
14 allocator::{AllocatorReport, MemoryBlockReport},
15 AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation, Result,
16};
17
18#[derive(Copy, Clone, Debug, Eq, PartialEq)]
19pub enum AllocationScheme {
20 DedicatedBuffer(vk::Buffer),
23 DedicatedImage(vk::Image),
26 GpuAllocatorManaged,
28}
29
30#[derive(Clone, Debug)]
31pub struct AllocationCreateDesc<'a> {
32 pub name: &'a str,
34 pub requirements: vk::MemoryRequirements,
36 pub location: MemoryLocation,
38 pub linear: bool,
40 pub allocation_scheme: AllocationScheme,
42}
43
44#[derive(Clone, Copy, Debug)]
48pub(crate) struct SendSyncPtr(std::ptr::NonNull<std::ffi::c_void>);
49unsafe impl Send for SendSyncPtr {}
51unsafe impl Sync for SendSyncPtr {}
56
57pub struct AllocatorCreateDesc {
58 pub instance: ash::Instance,
59 pub device: ash::Device,
60 pub physical_device: vk::PhysicalDevice,
61 pub debug_settings: AllocatorDebugSettings,
62 pub buffer_device_address: bool,
63 pub allocation_sizes: AllocationSizes,
64}
65
66#[derive(Debug)]
151pub struct Allocation {
152 chunk_id: Option<std::num::NonZeroU64>,
153 offset: u64,
154 size: u64,
155 memory_block_index: usize,
156 memory_type_index: usize,
157 device_memory: vk::DeviceMemory,
158 mapped_ptr: Option<SendSyncPtr>,
159 dedicated_allocation: bool,
160 memory_properties: vk::MemoryPropertyFlags,
161 name: Option<Box<str>>,
162}
163
164impl Allocation {
165 #[allow(clippy::needless_lifetimes)]
184 pub fn try_as_mapped_slab<'a>(&'a mut self) -> Option<MappedAllocationSlab<'a>> {
185 let mapped_ptr = self.mapped_ptr()?.cast().as_ptr();
186
187 if self.size > isize::MAX as _ {
188 return None;
189 }
190
191 let size = self.size as usize;
193
194 Some(MappedAllocationSlab {
195 _borrowed_alloc: PhantomData,
196 mapped_ptr,
197 size,
198 })
199 }
200
201 pub fn chunk_id(&self) -> Option<std::num::NonZeroU64> {
202 self.chunk_id
203 }
204
205 pub fn memory_properties(&self) -> vk::MemoryPropertyFlags {
207 self.memory_properties
208 }
209
210 pub unsafe fn memory(&self) -> vk::DeviceMemory {
219 self.device_memory
220 }
221
222 pub fn is_dedicated(&self) -> bool {
224 self.dedicated_allocation
225 }
226
227 pub fn offset(&self) -> u64 {
230 self.offset
231 }
232
233 pub fn size(&self) -> u64 {
235 self.size
236 }
237
238 pub fn mapped_ptr(&self) -> Option<std::ptr::NonNull<std::ffi::c_void>> {
241 self.mapped_ptr.map(|SendSyncPtr(p)| p)
242 }
243
244 pub fn mapped_slice(&self) -> Option<&[u8]> {
247 self.mapped_ptr().map(|ptr| unsafe {
248 std::slice::from_raw_parts(ptr.cast().as_ptr(), self.size as usize)
249 })
250 }
251
252 pub fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> {
255 self.mapped_ptr().map(|ptr| unsafe {
256 std::slice::from_raw_parts_mut(ptr.cast().as_ptr(), self.size as usize)
257 })
258 }
259
260 pub fn is_null(&self) -> bool {
261 self.chunk_id.is_none()
262 }
263}
264
265impl Default for Allocation {
266 fn default() -> Self {
267 Self {
268 chunk_id: None,
269 offset: 0,
270 size: 0,
271 memory_block_index: !0,
272 memory_type_index: !0,
273 device_memory: vk::DeviceMemory::null(),
274 mapped_ptr: None,
275 memory_properties: vk::MemoryPropertyFlags::empty(),
276 name: None,
277 dedicated_allocation: false,
278 }
279 }
280}
281
282pub struct MappedAllocationSlab<'a> {
286 _borrowed_alloc: PhantomData<&'a mut Allocation>,
287 mapped_ptr: *mut u8,
288 size: usize,
289}
290
291unsafe impl<'a> presser::Slab for MappedAllocationSlab<'a> {
293 fn base_ptr(&self) -> *const u8 {
294 self.mapped_ptr
295 }
296
297 fn base_ptr_mut(&mut self) -> *mut u8 {
298 self.mapped_ptr
299 }
300
301 fn size(&self) -> usize {
302 self.size
303 }
304}
305
306unsafe impl presser::Slab for Allocation {
308 fn base_ptr(&self) -> *const u8 {
309 self.mapped_ptr
310 .expect("tried to use a non-mapped Allocation as a Slab")
311 .0
312 .as_ptr()
313 .cast()
314 }
315
316 fn base_ptr_mut(&mut self) -> *mut u8 {
317 self.mapped_ptr
318 .expect("tried to use a non-mapped Allocation as a Slab")
319 .0
320 .as_ptr()
321 .cast()
322 }
323
324 fn size(&self) -> usize {
325 if self.size > isize::MAX as _ {
326 panic!("tried to use an Allocation with size > isize::MAX as a Slab")
327 }
328 self.size as usize
330 }
331}
332
333#[derive(Debug)]
334pub(crate) struct MemoryBlock {
335 pub(crate) device_memory: vk::DeviceMemory,
336 pub(crate) size: u64,
337 pub(crate) mapped_ptr: Option<SendSyncPtr>,
338 pub(crate) sub_allocator: Box<dyn allocator::SubAllocator>,
339 #[cfg(feature = "visualizer")]
340 pub(crate) dedicated_allocation: bool,
341}
342
343impl MemoryBlock {
344 fn new(
345 device: &ash::Device,
346 size: u64,
347 mem_type_index: usize,
348 mapped: bool,
349 buffer_device_address: bool,
350 allocation_scheme: AllocationScheme,
351 requires_personal_block: bool,
352 ) -> Result<Self> {
353 let device_memory = {
354 let alloc_info = vk::MemoryAllocateInfo::default()
355 .allocation_size(size)
356 .memory_type_index(mem_type_index as u32);
357
358 let allocation_flags = vk::MemoryAllocateFlags::DEVICE_ADDRESS;
359 let mut flags_info = vk::MemoryAllocateFlagsInfo::default().flags(allocation_flags);
360 let alloc_info = if buffer_device_address {
362 alloc_info.push_next(&mut flags_info)
363 } else {
364 alloc_info
365 };
366
367 let mut dedicated_memory_info = vk::MemoryDedicatedAllocateInfo::default();
369 let alloc_info = match allocation_scheme {
370 AllocationScheme::DedicatedBuffer(buffer) => {
371 dedicated_memory_info = dedicated_memory_info.buffer(buffer);
372 alloc_info.push_next(&mut dedicated_memory_info)
373 }
374 AllocationScheme::DedicatedImage(image) => {
375 dedicated_memory_info = dedicated_memory_info.image(image);
376 alloc_info.push_next(&mut dedicated_memory_info)
377 }
378 AllocationScheme::GpuAllocatorManaged => alloc_info,
379 };
380
381 unsafe { device.allocate_memory(&alloc_info, None) }.map_err(|e| match e {
382 vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => AllocationError::OutOfMemory,
383 e => AllocationError::Internal(format!(
384 "Unexpected error in vkAllocateMemory: {:?}",
385 e
386 )),
387 })?
388 };
389
390 let mapped_ptr = mapped
391 .then(|| {
392 unsafe {
393 device.map_memory(
394 device_memory,
395 0,
396 vk::WHOLE_SIZE,
397 vk::MemoryMapFlags::empty(),
398 )
399 }
400 .map_err(|e| {
401 unsafe { device.free_memory(device_memory, None) };
402 AllocationError::FailedToMap(e.to_string())
403 })
404 .and_then(|p| {
405 std::ptr::NonNull::new(p).map(SendSyncPtr).ok_or_else(|| {
406 AllocationError::FailedToMap("Returned mapped pointer is null".to_owned())
407 })
408 })
409 })
410 .transpose()?;
411
412 let sub_allocator: Box<dyn allocator::SubAllocator> = if allocation_scheme
413 != AllocationScheme::GpuAllocatorManaged
414 || requires_personal_block
415 {
416 Box::new(allocator::DedicatedBlockAllocator::new(size))
417 } else {
418 Box::new(allocator::FreeListAllocator::new(size))
419 };
420
421 Ok(Self {
422 device_memory,
423 size,
424 mapped_ptr,
425 sub_allocator,
426 #[cfg(feature = "visualizer")]
427 dedicated_allocation: allocation_scheme != AllocationScheme::GpuAllocatorManaged,
428 })
429 }
430
431 fn destroy(self, device: &ash::Device) {
432 if self.mapped_ptr.is_some() {
433 unsafe { device.unmap_memory(self.device_memory) };
434 }
435
436 unsafe { device.free_memory(self.device_memory, None) };
437 }
438}
439
440#[derive(Debug)]
441pub(crate) struct MemoryType {
442 pub(crate) memory_blocks: Vec<Option<MemoryBlock>>,
443 pub(crate) memory_properties: vk::MemoryPropertyFlags,
444 pub(crate) memory_type_index: usize,
445 pub(crate) heap_index: usize,
446 pub(crate) mappable: bool,
447 pub(crate) active_general_blocks: usize,
448 pub(crate) buffer_device_address: bool,
449}
450
451impl MemoryType {
452 fn allocate(
453 &mut self,
454 device: &ash::Device,
455 desc: &AllocationCreateDesc<'_>,
456 granularity: u64,
457 backtrace: Arc<Backtrace>,
458 allocation_sizes: &AllocationSizes,
459 ) -> Result<Allocation> {
460 let allocation_type = if desc.linear {
461 allocator::AllocationType::Linear
462 } else {
463 allocator::AllocationType::NonLinear
464 };
465
466 let memblock_size = if self
467 .memory_properties
468 .contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
469 {
470 allocation_sizes.host_memblock_size
471 } else {
472 allocation_sizes.device_memblock_size
473 };
474
475 let size = desc.requirements.size;
476 let alignment = desc.requirements.alignment;
477
478 let dedicated_allocation = desc.allocation_scheme != AllocationScheme::GpuAllocatorManaged;
479 let requires_personal_block = size > memblock_size;
480
481 if dedicated_allocation || requires_personal_block {
483 let mem_block = MemoryBlock::new(
484 device,
485 size,
486 self.memory_type_index,
487 self.mappable,
488 self.buffer_device_address,
489 desc.allocation_scheme,
490 requires_personal_block,
491 )?;
492
493 let mut block_index = None;
494 for (i, block) in self.memory_blocks.iter().enumerate() {
495 if block.is_none() {
496 block_index = Some(i);
497 break;
498 }
499 }
500
501 let block_index = match block_index {
502 Some(i) => {
503 self.memory_blocks[i].replace(mem_block);
504 i
505 }
506 None => {
507 self.memory_blocks.push(Some(mem_block));
508 self.memory_blocks.len() - 1
509 }
510 };
511
512 let mem_block = self.memory_blocks[block_index]
513 .as_mut()
514 .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
515
516 let (offset, chunk_id) = mem_block.sub_allocator.allocate(
517 size,
518 alignment,
519 allocation_type,
520 granularity,
521 desc.name,
522 backtrace,
523 )?;
524
525 return Ok(Allocation {
526 chunk_id: Some(chunk_id),
527 offset,
528 size,
529 memory_block_index: block_index,
530 memory_type_index: self.memory_type_index,
531 device_memory: mem_block.device_memory,
532 mapped_ptr: mem_block.mapped_ptr,
533 memory_properties: self.memory_properties,
534 name: Some(desc.name.into()),
535 dedicated_allocation,
536 });
537 }
538
539 let mut empty_block_index = None;
540 for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
541 if let Some(mem_block) = mem_block {
542 let allocation = mem_block.sub_allocator.allocate(
543 size,
544 alignment,
545 allocation_type,
546 granularity,
547 desc.name,
548 backtrace.clone(),
549 );
550
551 match allocation {
552 Ok((offset, chunk_id)) => {
553 let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr
554 {
555 let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
556 std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
557 } else {
558 None
559 };
560 return Ok(Allocation {
561 chunk_id: Some(chunk_id),
562 offset,
563 size,
564 memory_block_index: mem_block_i,
565 memory_type_index: self.memory_type_index,
566 device_memory: mem_block.device_memory,
567 memory_properties: self.memory_properties,
568 mapped_ptr,
569 dedicated_allocation: false,
570 name: Some(desc.name.into()),
571 });
572 }
573 Err(err) => match err {
574 AllocationError::OutOfMemory => {} _ => return Err(err), },
577 }
578 } else if empty_block_index.is_none() {
579 empty_block_index = Some(mem_block_i);
580 }
581 }
582
583 let new_memory_block = MemoryBlock::new(
584 device,
585 memblock_size,
586 self.memory_type_index,
587 self.mappable,
588 self.buffer_device_address,
589 desc.allocation_scheme,
590 false,
591 )?;
592
593 let new_block_index = if let Some(block_index) = empty_block_index {
594 self.memory_blocks[block_index] = Some(new_memory_block);
595 block_index
596 } else {
597 self.memory_blocks.push(Some(new_memory_block));
598 self.memory_blocks.len() - 1
599 };
600
601 self.active_general_blocks += 1;
602
603 let mem_block = self.memory_blocks[new_block_index]
604 .as_mut()
605 .ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
606 let allocation = mem_block.sub_allocator.allocate(
607 size,
608 alignment,
609 allocation_type,
610 granularity,
611 desc.name,
612 backtrace,
613 );
614 let (offset, chunk_id) = match allocation {
615 Ok(value) => value,
616 Err(err) => match err {
617 AllocationError::OutOfMemory => {
618 return Err(AllocationError::Internal(
619 "Allocation that must succeed failed. This is a bug in the allocator."
620 .into(),
621 ))
622 }
623 _ => return Err(err),
624 },
625 };
626
627 let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr {
628 let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
629 std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
630 } else {
631 None
632 };
633
634 Ok(Allocation {
635 chunk_id: Some(chunk_id),
636 offset,
637 size,
638 memory_block_index: new_block_index,
639 memory_type_index: self.memory_type_index,
640 device_memory: mem_block.device_memory,
641 mapped_ptr,
642 memory_properties: self.memory_properties,
643 name: Some(desc.name.into()),
644 dedicated_allocation: false,
645 })
646 }
647
648 #[allow(clippy::needless_pass_by_value)]
649 fn free(&mut self, allocation: Allocation, device: &ash::Device) -> Result<()> {
650 let block_idx = allocation.memory_block_index;
651
652 let mem_block = self.memory_blocks[block_idx]
653 .as_mut()
654 .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
655
656 mem_block.sub_allocator.free(allocation.chunk_id)?;
657
658 if mem_block.sub_allocator.is_empty() {
659 if mem_block.sub_allocator.supports_general_allocations() {
660 if self.active_general_blocks > 1 {
661 let block = self.memory_blocks[block_idx].take();
662 let block = block.ok_or_else(|| {
663 AllocationError::Internal("Memory block must be Some.".into())
664 })?;
665 block.destroy(device);
666
667 self.active_general_blocks -= 1;
668 }
669 } else {
670 let block = self.memory_blocks[block_idx].take();
671 let block = block.ok_or_else(|| {
672 AllocationError::Internal("Memory block must be Some.".into())
673 })?;
674 block.destroy(device);
675 }
676 }
677
678 Ok(())
679 }
680}
681
682pub struct Allocator {
683 pub(crate) memory_types: Vec<MemoryType>,
684 pub(crate) memory_heaps: Vec<vk::MemoryHeap>,
685 device: ash::Device,
686 pub(crate) buffer_image_granularity: u64,
687 pub(crate) debug_settings: AllocatorDebugSettings,
688 allocation_sizes: AllocationSizes,
689}
690
691impl fmt::Debug for Allocator {
692 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
693 self.generate_report().fmt(f)
694 }
695}
696
697impl Allocator {
698 pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
699 if desc.physical_device == vk::PhysicalDevice::null() {
700 return Err(AllocationError::InvalidAllocatorCreateDesc(
701 "AllocatorCreateDesc field `physical_device` is null.".into(),
702 ));
703 }
704
705 let mem_props = unsafe {
706 desc.instance
707 .get_physical_device_memory_properties(desc.physical_device)
708 };
709
710 let memory_types = &mem_props.memory_types_as_slice();
711 let memory_heaps = mem_props.memory_heaps_as_slice().to_vec();
712
713 if desc.debug_settings.log_memory_information {
714 debug!("memory type count: {}", mem_props.memory_type_count);
715 debug!("memory heap count: {}", mem_props.memory_heap_count);
716
717 for (i, mem_type) in memory_types.iter().enumerate() {
718 let flags = mem_type.property_flags;
719 debug!(
720 "memory type[{}]: prop flags: 0x{:x}, heap[{}]",
721 i,
722 flags.as_raw(),
723 mem_type.heap_index,
724 );
725 }
726 for (i, heap) in memory_heaps.iter().enumerate() {
727 debug!(
728 "heap[{}] flags: 0x{:x}, size: {} MiB",
729 i,
730 heap.flags.as_raw(),
731 heap.size / (1024 * 1024)
732 );
733 }
734 }
735
736 let memory_types = memory_types
737 .iter()
738 .enumerate()
739 .map(|(i, mem_type)| MemoryType {
740 memory_blocks: Vec::default(),
741 memory_properties: mem_type.property_flags,
742 memory_type_index: i,
743 heap_index: mem_type.heap_index as usize,
744 mappable: mem_type
745 .property_flags
746 .contains(vk::MemoryPropertyFlags::HOST_VISIBLE),
747 active_general_blocks: 0,
748 buffer_device_address: desc.buffer_device_address,
749 })
750 .collect::<Vec<_>>();
751
752 let physical_device_properties = unsafe {
753 desc.instance
754 .get_physical_device_properties(desc.physical_device)
755 };
756
757 let granularity = physical_device_properties.limits.buffer_image_granularity;
758
759 Ok(Self {
760 memory_types,
761 memory_heaps,
762 device: desc.device.clone(),
763 buffer_image_granularity: granularity,
764 debug_settings: desc.debug_settings,
765 allocation_sizes: AllocationSizes::default(),
766 })
767 }
768
769 pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
770 let size = desc.requirements.size;
771 let alignment = desc.requirements.alignment;
772
773 let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
774 Backtrace::force_capture()
775 } else {
776 Backtrace::disabled()
777 });
778
779 if self.debug_settings.log_allocations {
780 debug!(
781 "Allocating `{}` of {} bytes with an alignment of {}.",
782 &desc.name, size, alignment
783 );
784 if self.debug_settings.log_stack_traces {
785 let backtrace = Backtrace::force_capture();
786 debug!("Allocation stack trace: {}", backtrace);
787 }
788 }
789
790 if size == 0 || !alignment.is_power_of_two() {
791 return Err(AllocationError::InvalidAllocationCreateDesc);
792 }
793
794 let mem_loc_preferred_bits = match desc.location {
795 MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
796 MemoryLocation::CpuToGpu => {
797 vk::MemoryPropertyFlags::HOST_VISIBLE
798 | vk::MemoryPropertyFlags::HOST_COHERENT
799 | vk::MemoryPropertyFlags::DEVICE_LOCAL
800 }
801 MemoryLocation::GpuToCpu => {
802 vk::MemoryPropertyFlags::HOST_VISIBLE
803 | vk::MemoryPropertyFlags::HOST_COHERENT
804 | vk::MemoryPropertyFlags::HOST_CACHED
805 }
806 MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
807 };
808 let mut memory_type_index_opt =
809 self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
810
811 if memory_type_index_opt.is_none() {
812 let mem_loc_required_bits = match desc.location {
813 MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
814 MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu => {
815 vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT
816 }
817 MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
818 };
819
820 memory_type_index_opt =
821 self.find_memorytype_index(&desc.requirements, mem_loc_required_bits);
822 }
823
824 let memory_type_index = match memory_type_index_opt {
825 Some(x) => x as usize,
826 None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
827 };
828
829 let memory_type = &mut self.memory_types[memory_type_index];
831 let allocation = if size > self.memory_heaps[memory_type.heap_index].size {
832 Err(AllocationError::OutOfMemory)
833 } else {
834 memory_type.allocate(
835 &self.device,
836 desc,
837 self.buffer_image_granularity,
838 backtrace.clone(),
839 &self.allocation_sizes,
840 )
841 };
842
843 if desc.location == MemoryLocation::CpuToGpu {
844 if allocation.is_err() {
845 let mem_loc_preferred_bits =
846 vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT;
847
848 let memory_type_index_opt =
849 self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
850
851 let memory_type_index = match memory_type_index_opt {
852 Some(x) => x as usize,
853 None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
854 };
855
856 self.memory_types[memory_type_index].allocate(
857 &self.device,
858 desc,
859 self.buffer_image_granularity,
860 backtrace,
861 &self.allocation_sizes,
862 )
863 } else {
864 allocation
865 }
866 } else {
867 allocation
868 }
869 }
870
871 pub fn free(&mut self, allocation: Allocation) -> Result<()> {
872 if self.debug_settings.log_frees {
873 let name = allocation.name.as_deref().unwrap_or("<null>");
874 debug!("Freeing `{}`.", name);
875 if self.debug_settings.log_stack_traces {
876 let backtrace = Backtrace::force_capture();
877 debug!("Free stack trace: {}", backtrace);
878 }
879 }
880
881 if allocation.is_null() {
882 return Ok(());
883 }
884
885 self.memory_types[allocation.memory_type_index].free(allocation, &self.device)?;
886
887 Ok(())
888 }
889
890 pub fn rename_allocation(&mut self, allocation: &mut Allocation, name: &str) -> Result<()> {
891 allocation.name = Some(name.into());
892
893 if allocation.is_null() {
894 return Ok(());
895 }
896
897 let mem_type = &mut self.memory_types[allocation.memory_type_index];
898 let mem_block = mem_type.memory_blocks[allocation.memory_block_index]
899 .as_mut()
900 .ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
901
902 mem_block
903 .sub_allocator
904 .rename_allocation(allocation.chunk_id, name)?;
905
906 Ok(())
907 }
908
909 pub fn report_memory_leaks(&self, log_level: Level) {
910 for (mem_type_i, mem_type) in self.memory_types.iter().enumerate() {
911 for (block_i, mem_block) in mem_type.memory_blocks.iter().enumerate() {
912 if let Some(mem_block) = mem_block {
913 mem_block
914 .sub_allocator
915 .report_memory_leaks(log_level, mem_type_i, block_i);
916 }
917 }
918 }
919 }
920
921 fn find_memorytype_index(
922 &self,
923 memory_req: &vk::MemoryRequirements,
924 flags: vk::MemoryPropertyFlags,
925 ) -> Option<u32> {
926 self.memory_types
927 .iter()
928 .find(|memory_type| {
929 (1 << memory_type.memory_type_index) & memory_req.memory_type_bits != 0
930 && memory_type.memory_properties.contains(flags)
931 })
932 .map(|memory_type| memory_type.memory_type_index as _)
933 }
934
935 pub fn generate_report(&self) -> AllocatorReport {
936 let mut allocations = vec![];
937 let mut blocks = vec![];
938 let mut total_reserved_bytes = 0;
939
940 for memory_type in &self.memory_types {
941 for block in memory_type.memory_blocks.iter().flatten() {
942 total_reserved_bytes += block.size;
943 let first_allocation = allocations.len();
944 allocations.extend(block.sub_allocator.report_allocations());
945 blocks.push(MemoryBlockReport {
946 size: block.size,
947 allocations: first_allocation..allocations.len(),
948 });
949 }
950 }
951
952 let total_allocated_bytes = allocations.iter().map(|report| report.size).sum();
953
954 AllocatorReport {
955 allocations,
956 blocks,
957 total_allocated_bytes,
958 total_reserved_bytes,
959 }
960 }
961}
962
963impl Drop for Allocator {
964 fn drop(&mut self) {
965 if self.debug_settings.log_leaks_on_shutdown {
966 self.report_memory_leaks(Level::Warn);
967 }
968
969 for mem_type in self.memory_types.iter_mut() {
971 for mem_block in mem_type.memory_blocks.iter_mut() {
972 let block = mem_block.take();
973 if let Some(block) = block {
974 block.destroy(&self.device);
975 }
976 }
977 }
978 }
979}