kronos_compute/implementation/
pool_allocator.rs1use std::collections::HashMap;
9use std::sync::Mutex;
10use crate::sys::*;
11use crate::core::*;
12use crate::ffi::*;
13use super::error::IcdError;
14
15const SLAB_SIZE: VkDeviceSize = 256 * 1024;
17
18#[allow(dead_code)]
20const MIN_ALLOCATION_SIZE: VkDeviceSize = 64 * 1024;
21
22#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
24pub enum PoolType {
25 DeviceLocal,
27 HostVisibleCoherent,
29 HostVisibleCached,
31}
32
33impl PoolType {
34 pub fn required_flags(&self) -> VkMemoryPropertyFlags {
36 match self {
37 PoolType::DeviceLocal => VkMemoryPropertyFlags::DEVICE_LOCAL,
38 PoolType::HostVisibleCoherent => {
39 VkMemoryPropertyFlags::HOST_VISIBLE | VkMemoryPropertyFlags::HOST_COHERENT
40 }
41 PoolType::HostVisibleCached => {
42 VkMemoryPropertyFlags::HOST_VISIBLE | VkMemoryPropertyFlags::HOST_CACHED
43 }
44 }
45 }
46
47 pub fn should_map(&self) -> bool {
49 matches!(self, PoolType::HostVisibleCoherent | PoolType::HostVisibleCached)
50 }
51}
52
53#[derive(Debug)]
55struct SubAllocation {
56 offset: VkDeviceSize,
57 size: VkDeviceSize,
58 in_use: bool,
59}
60
61struct MemorySlab {
63 memory: VkDeviceMemory,
64 size: VkDeviceSize,
65 mapped_ptr: Option<*mut std::ffi::c_void>,
66 allocations: Vec<SubAllocation>,
67 free_space: VkDeviceSize,
68}
69
70unsafe impl Send for MemorySlab {}
72unsafe impl Sync for MemorySlab {}
73
74impl MemorySlab {
75 fn allocate(&mut self, size: VkDeviceSize, alignment: VkDeviceSize) -> Option<VkDeviceSize> {
77 if self.free_space < size {
78 return None;
79 }
80
81 let mut current_offset = 0;
83
84 for alloc in &self.allocations {
85 if !alloc.in_use {
86 continue;
87 }
88
89 let aligned_offset = (current_offset + alignment - 1) & !(alignment - 1);
91 if aligned_offset + size <= alloc.offset {
92 self.allocations.push(SubAllocation {
94 offset: aligned_offset,
95 size,
96 in_use: true,
97 });
98 self.free_space -= size;
99 return Some(aligned_offset);
100 }
101
102 current_offset = alloc.offset + alloc.size;
103 }
104
105 let aligned_offset = (current_offset + alignment - 1) & !(alignment - 1);
107 if aligned_offset + size <= self.size {
108 self.allocations.push(SubAllocation {
109 offset: aligned_offset,
110 size,
111 in_use: true,
112 });
113 self.free_space -= size;
114 Some(aligned_offset)
115 } else {
116 None
117 }
118 }
119
120 fn free(&mut self, offset: VkDeviceSize) -> bool {
122 if let Some(alloc) = self.allocations.iter_mut().find(|a| a.offset == offset) {
123 if alloc.in_use {
124 alloc.in_use = false;
125 self.free_space += alloc.size;
126 return true;
127 }
128 }
129 false
130 }
131}
132
133struct MemoryPool {
135 device: VkDevice,
136 pool_type: PoolType,
137 memory_type_index: u32,
138 slabs: Vec<MemorySlab>,
139 total_allocated: VkDeviceSize,
140}
141
142impl MemoryPool {
143 fn new(device: VkDevice, pool_type: PoolType, memory_type_index: u32) -> Self {
144 Self {
145 device,
146 pool_type,
147 memory_type_index,
148 slabs: Vec::new(),
149 total_allocated: 0,
150 }
151 }
152
153 unsafe fn allocate(
165 &mut self,
166 size: VkDeviceSize,
167 alignment: VkDeviceSize,
168 ) -> Result<(VkDeviceMemory, VkDeviceSize, Option<*mut std::ffi::c_void>), IcdError> {
169 for slab in &mut self.slabs {
171 if let Some(offset) = slab.allocate(size, alignment) {
172 let mapped_ptr = slab.mapped_ptr.map(|ptr| {
173 (ptr as *mut u8).add(offset as usize) as *mut std::ffi::c_void
174 });
175 return Ok((slab.memory, offset, mapped_ptr));
176 }
177 }
178
179 let slab_size = SLAB_SIZE.max(size);
181
182 let alloc_info = VkMemoryAllocateInfo {
183 sType: VkStructureType::MemoryAllocateInfo,
184 pNext: std::ptr::null(),
185 allocationSize: slab_size,
186 memoryTypeIndex: self.memory_type_index,
187 };
188
189 let mut memory = VkDeviceMemory::NULL;
190
191 if let Some(icd) = super::icd_loader::get_icd() {
192 if let Some(alloc_fn) = icd.allocate_memory {
193 let result = alloc_fn(self.device, &alloc_info, std::ptr::null(), &mut memory);
194 if result != VkResult::Success {
195 return Err(IcdError::VulkanError(result));
196 }
197 } else {
198 return Err(IcdError::MissingFunction("vkAllocateMemory"));
199 }
200 } else {
201 return Err(IcdError::NoIcdLoaded);
202 }
203
204 let mapped_ptr = if self.pool_type.should_map() {
206 let mut ptr = std::ptr::null_mut();
207 if let Some(icd) = super::icd_loader::get_icd() {
208 if let Some(map_fn) = icd.map_memory {
209 let result = map_fn(self.device, memory, 0, VK_WHOLE_SIZE, 0, &mut ptr);
210 if result == VkResult::Success {
211 Some(ptr)
212 } else {
213 None
214 }
215 } else {
216 None
217 }
218 } else {
219 None
220 }
221 } else {
222 None
223 };
224
225 let mut slab = MemorySlab {
227 memory,
228 size: slab_size,
229 mapped_ptr,
230 allocations: Vec::new(),
231 free_space: slab_size,
232 };
233
234 let offset = slab.allocate(size, alignment)
236 .expect("New slab should have space");
237
238 let result_ptr = mapped_ptr.map(|ptr| {
239 (ptr as *mut u8).add(offset as usize) as *mut std::ffi::c_void
240 });
241
242 self.slabs.push(slab);
243 self.total_allocated += slab_size;
244
245 Ok((memory, offset, result_ptr))
246 }
247
248 unsafe fn free(&mut self, memory: VkDeviceMemory, offset: VkDeviceSize) -> bool {
258 for slab in &mut self.slabs {
259 if slab.memory == memory {
260 return slab.free(offset);
261 }
262 }
263 false
264 }
265}
266
267#[derive(Debug, Clone, Copy)]
269pub struct AllocationHandle {
270 memory: VkDeviceMemory,
271 offset: VkDeviceSize,
272 size: VkDeviceSize,
273 pool_type: PoolType,
274 mapped_ptr: Option<*mut std::ffi::c_void>,
275}
276
277unsafe impl Send for AllocationHandle {}
279unsafe impl Sync for AllocationHandle {}
280
281impl AllocationHandle {
282 pub fn memory(&self) -> VkDeviceMemory {
284 self.memory
285 }
286
287 pub fn offset(&self) -> VkDeviceSize {
289 self.offset
290 }
291
292 pub fn size(&self) -> VkDeviceSize {
294 self.size
295 }
296
297 pub fn mapped_ptr(&self) -> Option<*mut std::ffi::c_void> {
299 self.mapped_ptr
300 }
301}
302
303pub struct PoolAllocator {
305 pools: HashMap<(u64, PoolType), MemoryPool>,
306 allocations: HashMap<u64, AllocationHandle>,
307 next_id: u64,
308}
309
310lazy_static::lazy_static! {
311 static ref POOL_ALLOCATOR: Mutex<PoolAllocator> = Mutex::new(PoolAllocator {
312 pools: HashMap::new(),
313 allocations: HashMap::new(),
314 next_id: 1,
315 });
316}
317
318pub unsafe fn initialize_pools(
329 device: VkDevice,
330 physical_device: VkPhysicalDevice,
331) -> Result<(), IcdError> {
332 let mut allocator = POOL_ALLOCATOR.lock()?;
333
334 let mut mem_props = VkPhysicalDeviceMemoryProperties::default();
336 if let Some(icd) = super::icd_loader::get_icd() {
337 if let Some(get_props_fn) = icd.get_physical_device_memory_properties {
338 get_props_fn(physical_device, &mut mem_props);
339 }
340 }
341
342 for pool_type in &[PoolType::DeviceLocal, PoolType::HostVisibleCoherent, PoolType::HostVisibleCached] {
344 let required_flags = pool_type.required_flags();
345
346 for i in 0..mem_props.memoryTypeCount {
347 let mem_type = &mem_props.memoryTypes[i as usize];
348 if mem_type.propertyFlags.contains(required_flags) {
349 let key = (device.as_raw(), *pool_type);
350 allocator.pools.insert(key, MemoryPool::new(device, *pool_type, i));
351 break;
352 }
353 }
354 }
355
356 Ok(())
357}
358
359pub unsafe fn allocate_from_pool(
370 device: VkDevice,
371 requirements: &VkMemoryRequirements,
372 pool_type: PoolType,
373) -> Result<u64, IcdError> {
374 let mut allocator = POOL_ALLOCATOR.lock()?;
375
376 let key = (device.as_raw(), pool_type);
377 let pool = allocator.pools.get_mut(&key)
378 .ok_or(IcdError::InvalidOperation("Pool not initialized"))?;
379
380 let (memory, offset, mapped_ptr) = pool.allocate(requirements.size, requirements.alignment)?;
381
382 let handle = AllocationHandle {
383 memory,
384 offset,
385 size: requirements.size,
386 pool_type,
387 mapped_ptr,
388 };
389
390 let id = allocator.next_id;
391 allocator.next_id += 1;
392 allocator.allocations.insert(id, handle);
393
394 Ok(id)
395}
396
397pub fn get_allocation(id: u64) -> Result<AllocationHandle, IcdError> {
399 let allocator = POOL_ALLOCATOR.lock()?;
400 allocator.allocations.get(&id)
401 .copied()
402 .ok_or(IcdError::InvalidOperation("Invalid allocation ID"))
403}
404
405pub unsafe fn free_allocation(device: VkDevice, id: u64) -> Result<(), IcdError> {
416 let mut allocator = POOL_ALLOCATOR.lock()?;
417
418 let handle = allocator.allocations.remove(&id)
419 .ok_or(IcdError::InvalidOperation("Invalid allocation ID"))?;
420
421 let key = (device.as_raw(), handle.pool_type);
422 if let Some(pool) = allocator.pools.get_mut(&key) {
423 pool.free(handle.memory, handle.offset);
424 }
425
426 Ok(())
427}
428
429#[derive(Debug, Default)]
431pub struct PoolStats {
432 pub total_allocated: VkDeviceSize,
433 pub total_slabs: usize,
434 pub allocations_in_flight: usize,
435}
436
437pub fn get_pool_stats(device: VkDevice, pool_type: PoolType) -> Result<PoolStats, IcdError> {
438 let allocator = POOL_ALLOCATOR.lock()?;
439
440 let key = (device.as_raw(), pool_type);
441 if let Some(pool) = allocator.pools.get(&key) {
442 Ok(PoolStats {
443 total_allocated: pool.total_allocated,
444 total_slabs: pool.slabs.len(),
445 allocations_in_flight: allocator.allocations.values()
446 .filter(|a| a.pool_type == pool_type)
447 .count(),
448 })
449 } else {
450 Ok(PoolStats::default())
451 }
452}
453
454pub unsafe fn allocate_buffer_memory(
466 device: VkDevice,
467 buffer: VkBuffer,
468 pool_type: PoolType,
469) -> Result<u64, IcdError> {
470 let mut requirements = VkMemoryRequirements::default();
471
472 if let Some(icd) = super::icd_loader::get_icd() {
473 if let Some(get_reqs_fn) = icd.get_buffer_memory_requirements {
474 get_reqs_fn(device, buffer, &mut requirements);
475 }
476 }
477
478 let allocation_id = allocate_from_pool(device, &requirements, pool_type)?;
479 let handle = get_allocation(allocation_id)?;
480
481 if let Some(icd) = super::icd_loader::get_icd() {
483 if let Some(bind_fn) = icd.bind_buffer_memory {
484 let result = bind_fn(device, buffer, handle.memory, handle.offset);
485 if result != VkResult::Success {
486 free_allocation(device, allocation_id)?;
487 return Err(IcdError::VulkanError(result));
488 }
489 }
490 }
491
492 Ok(allocation_id)
493}
494
495#[cfg(test)]
496mod tests {
497 use super::*;
498
499 #[test]
500 fn test_pool_type_flags() {
501 assert_eq!(
502 PoolType::DeviceLocal.required_flags(),
503 VkMemoryPropertyFlags::DEVICE_LOCAL
504 );
505 assert!(PoolType::HostVisibleCoherent.should_map());
506 assert!(!PoolType::DeviceLocal.should_map());
507 }
508
509 #[test]
510 fn test_slab_allocation() {
511 let memory = VkDeviceMemory::from_raw(0x1234);
512 let mut slab = MemorySlab {
513 memory,
514 size: 1024,
515 mapped_ptr: None,
516 allocations: Vec::new(),
517 free_space: 1024,
518 };
519
520 let offset1 = slab.allocate(256, 16).unwrap();
522 assert_eq!(offset1, 0);
523 assert_eq!(slab.free_space, 768);
524
525 let offset2 = slab.allocate(256, 16).unwrap();
526 assert_eq!(offset2, 256);
527 assert_eq!(slab.free_space, 512);
528
529 assert!(slab.free(offset1));
531 assert_eq!(slab.free_space, 768);
532 }
533}