vk_mem/
defragmentation.rs1use crate::ffi;
2use crate::Allocator;
3use ash::prelude::VkResult;
4use ash::vk;
5
6pub use ffi::VmaDefragmentationMove as DefragmentationMove;
7pub use ffi::VmaDefragmentationStats as DefragmentationStats;
8pub struct DefragmentationContext<'a> {
9 allocator: &'a Allocator,
10 raw: ffi::VmaDefragmentationContext,
11}
12
13impl<'a> Drop for DefragmentationContext<'a> {
14 fn drop(&mut self) {
15 unsafe {
16 ffi::vmaEndDefragmentation(self.allocator.internal, self.raw, std::ptr::null_mut());
17 }
18 }
19}
20
21impl<'a> DefragmentationContext<'a> {
22 pub fn end(self) -> DefragmentationStats {
24 let mut stats = DefragmentationStats {
25 bytesMoved: 0,
26 bytesFreed: 0,
27 allocationsMoved: 0,
28 deviceMemoryBlocksFreed: 0,
29 };
30 unsafe {
31 ffi::vmaEndDefragmentation(self.allocator.internal, self.raw, &mut stats);
32 }
33 std::mem::forget(self);
34 stats
35 }
36
37 pub fn begin_pass(&self, mover: impl FnOnce(&mut [DefragmentationMove]) -> ()) -> bool {
39 let mut pass_info = ffi::VmaDefragmentationPassMoveInfo {
40 moveCount: 0,
41 pMoves: std::ptr::null_mut(),
42 };
43 let result = unsafe {
44 ffi::vmaBeginDefragmentationPass(self.allocator.internal, self.raw, &mut pass_info)
45 };
46 if result == vk::Result::SUCCESS {
47 return false;
48 }
49 debug_assert_eq!(result, vk::Result::INCOMPLETE);
50 let moves = unsafe {
51 std::slice::from_raw_parts_mut(pass_info.pMoves, pass_info.moveCount as usize)
52 };
53 mover(moves);
54
55 let result = unsafe {
56 ffi::vmaEndDefragmentationPass(self.allocator.internal, self.raw, &mut pass_info)
57 };
58
59 return result == vk::Result::INCOMPLETE;
60 }
61}
62
63impl Allocator {
64 pub unsafe fn begin_defragmentation(
70 &self,
71 info: &ffi::VmaDefragmentationInfo,
72 ) -> VkResult<DefragmentationContext> {
73 let mut context: ffi::VmaDefragmentationContext = std::ptr::null_mut();
74
75 ffi::vmaBeginDefragmentation(self.internal, info, &mut context).result()?;
76
77 Ok(DefragmentationContext {
78 allocator: self,
79 raw: context,
80 })
81 }
82}