kronos_compute/implementation/
pool_allocator.rs

1//! 3-pool memory allocator for zero allocation in steady state
2//! 
3//! Pools:
4//! 1. DEVICE_LOCAL - GPU-only memory
5//! 2. HOST_VISIBLE|COHERENT - Pinned staging, persistently mapped
6//! 3. HOST_VISIBLE|CACHED - Readback memory
7
8use std::collections::HashMap;
9use std::sync::Mutex;
10use crate::sys::*;
11use crate::core::*;
12use crate::ffi::*;
13use super::error::IcdError;
14
15/// Slab size for suballocation (256 KiB default)
16const SLAB_SIZE: VkDeviceSize = 256 * 1024;
17
18/// Minimum allocation size (64 KiB)
19#[allow(dead_code)]
20const MIN_ALLOCATION_SIZE: VkDeviceSize = 64 * 1024;
21
22/// Memory pool types
23#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
24pub enum PoolType {
25    /// GPU-only memory
26    DeviceLocal,
27    /// Pinned staging memory (persistently mapped)
28    HostVisibleCoherent,
29    /// Readback memory
30    HostVisibleCached,
31}
32
33impl PoolType {
34    /// Get required memory property flags
35    pub fn required_flags(&self) -> VkMemoryPropertyFlags {
36        match self {
37            PoolType::DeviceLocal => VkMemoryPropertyFlags::DEVICE_LOCAL,
38            PoolType::HostVisibleCoherent => {
39                VkMemoryPropertyFlags::HOST_VISIBLE | VkMemoryPropertyFlags::HOST_COHERENT
40            }
41            PoolType::HostVisibleCached => {
42                VkMemoryPropertyFlags::HOST_VISIBLE | VkMemoryPropertyFlags::HOST_CACHED
43            }
44        }
45    }
46    
47    /// Check if pool should be persistently mapped
48    pub fn should_map(&self) -> bool {
49        matches!(self, PoolType::HostVisibleCoherent | PoolType::HostVisibleCached)
50    }
51}
52
53/// A single allocation within a slab
54#[derive(Debug)]
55struct SubAllocation {
56    offset: VkDeviceSize,
57    size: VkDeviceSize,
58    in_use: bool,
59}
60
61/// A slab of memory that can be subdivided
62struct MemorySlab {
63    memory: VkDeviceMemory,
64    size: VkDeviceSize,
65    mapped_ptr: Option<*mut std::ffi::c_void>,
66    allocations: Vec<SubAllocation>,
67    free_space: VkDeviceSize,
68}
69
70// Safe to send between threads - the pointer is just an address
71unsafe impl Send for MemorySlab {}
72unsafe impl Sync for MemorySlab {}
73
74impl MemorySlab {
75    /// Try to allocate from this slab
76    fn allocate(&mut self, size: VkDeviceSize, alignment: VkDeviceSize) -> Option<VkDeviceSize> {
77        if self.free_space < size {
78            return None;
79        }
80        
81        // Find a free spot (first-fit algorithm)
82        let mut current_offset = 0;
83        
84        for alloc in &self.allocations {
85            if !alloc.in_use {
86                continue;
87            }
88            
89            // Check if we can fit before this allocation
90            let aligned_offset = (current_offset + alignment - 1) & !(alignment - 1);
91            if aligned_offset + size <= alloc.offset {
92                // Found a spot
93                self.allocations.push(SubAllocation {
94                    offset: aligned_offset,
95                    size,
96                    in_use: true,
97                });
98                self.free_space -= size;
99                return Some(aligned_offset);
100            }
101            
102            current_offset = alloc.offset + alloc.size;
103        }
104        
105        // Check if we can fit at the end
106        let aligned_offset = (current_offset + alignment - 1) & !(alignment - 1);
107        if aligned_offset + size <= self.size {
108            self.allocations.push(SubAllocation {
109                offset: aligned_offset,
110                size,
111                in_use: true,
112            });
113            self.free_space -= size;
114            Some(aligned_offset)
115        } else {
116            None
117        }
118    }
119    
120    /// Free an allocation
121    fn free(&mut self, offset: VkDeviceSize) -> bool {
122        if let Some(alloc) = self.allocations.iter_mut().find(|a| a.offset == offset) {
123            if alloc.in_use {
124                alloc.in_use = false;
125                self.free_space += alloc.size;
126                return true;
127            }
128        }
129        false
130    }
131}
132
133/// Memory pool for a specific type
134struct MemoryPool {
135    device: VkDevice,
136    pool_type: PoolType,
137    memory_type_index: u32,
138    slabs: Vec<MemorySlab>,
139    total_allocated: VkDeviceSize,
140}
141
142impl MemoryPool {
143    fn new(device: VkDevice, pool_type: PoolType, memory_type_index: u32) -> Self {
144        Self {
145            device,
146            pool_type,
147            memory_type_index,
148            slabs: Vec::new(),
149            total_allocated: 0,
150        }
151    }
152    
153    /// Allocate memory from the pool
154    ///
155    /// # Safety
156    ///
157    /// This function is unsafe because:
158    /// - Calls vkAllocateMemory through ICD function pointer
159    /// - May call vkMapMemory for host-visible memory types
160    /// - The device must be a valid VkDevice handle
161    /// - Returned memory must be freed with vkFreeMemory
162    /// - Mapped pointers are only valid while memory is allocated
163    /// - Size and alignment must be within device limits
164    unsafe fn allocate(
165        &mut self,
166        size: VkDeviceSize,
167        alignment: VkDeviceSize,
168    ) -> Result<(VkDeviceMemory, VkDeviceSize, Option<*mut std::ffi::c_void>), IcdError> {
169        // Try existing slabs first
170        for slab in &mut self.slabs {
171            if let Some(offset) = slab.allocate(size, alignment) {
172                let mapped_ptr = slab.mapped_ptr.map(|ptr| {
173                    (ptr as *mut u8).add(offset as usize) as *mut std::ffi::c_void
174                });
175                return Ok((slab.memory, offset, mapped_ptr));
176            }
177        }
178        
179        // Need a new slab
180        let slab_size = SLAB_SIZE.max(size);
181        
182        let alloc_info = VkMemoryAllocateInfo {
183            sType: VkStructureType::MemoryAllocateInfo,
184            pNext: std::ptr::null(),
185            allocationSize: slab_size,
186            memoryTypeIndex: self.memory_type_index,
187        };
188        
189        let mut memory = VkDeviceMemory::NULL;
190        
191        if let Some(icd) = super::icd_loader::get_icd() {
192            if let Some(alloc_fn) = icd.allocate_memory {
193                let result = alloc_fn(self.device, &alloc_info, std::ptr::null(), &mut memory);
194                if result != VkResult::Success {
195                    return Err(IcdError::VulkanError(result));
196                }
197            } else {
198                return Err(IcdError::MissingFunction("vkAllocateMemory"));
199            }
200        } else {
201            return Err(IcdError::NoIcdLoaded);
202        }
203        
204        // Map if needed
205        let mapped_ptr = if self.pool_type.should_map() {
206            let mut ptr = std::ptr::null_mut();
207            if let Some(icd) = super::icd_loader::get_icd() {
208                if let Some(map_fn) = icd.map_memory {
209                    let result = map_fn(self.device, memory, 0, VK_WHOLE_SIZE, 0, &mut ptr);
210                    if result == VkResult::Success {
211                        Some(ptr)
212                    } else {
213                        None
214                    }
215                } else {
216                    None
217                }
218            } else {
219                None
220            }
221        } else {
222            None
223        };
224        
225        // Create new slab
226        let mut slab = MemorySlab {
227            memory,
228            size: slab_size,
229            mapped_ptr,
230            allocations: Vec::new(),
231            free_space: slab_size,
232        };
233        
234        // Allocate from new slab
235        let offset = slab.allocate(size, alignment)
236            .expect("New slab should have space");
237        
238        let result_ptr = mapped_ptr.map(|ptr| {
239            (ptr as *mut u8).add(offset as usize) as *mut std::ffi::c_void
240        });
241        
242        self.slabs.push(slab);
243        self.total_allocated += slab_size;
244        
245        Ok((memory, offset, result_ptr))
246    }
247    
248    /// Free an allocation
249    ///
250    /// # Safety
251    ///
252    /// This function is unsafe because:
253    /// - The memory and offset must correspond to a valid allocation
254    /// - The allocation must not be in use by the GPU
255    /// - After freeing, any mapped pointers become invalid
256    /// - Double-free will corrupt the allocator state
257    unsafe fn free(&mut self, memory: VkDeviceMemory, offset: VkDeviceSize) -> bool {
258        for slab in &mut self.slabs {
259            if slab.memory == memory {
260                return slab.free(offset);
261            }
262        }
263        false
264    }
265}
266
267/// Allocation handle
268#[derive(Debug, Clone, Copy)]
269pub struct AllocationHandle {
270    memory: VkDeviceMemory,
271    offset: VkDeviceSize,
272    size: VkDeviceSize,
273    pool_type: PoolType,
274    mapped_ptr: Option<*mut std::ffi::c_void>,
275}
276
277// Safe to send between threads - the pointer is just an address
278unsafe impl Send for AllocationHandle {}
279unsafe impl Sync for AllocationHandle {}
280
281impl AllocationHandle {
282    /// Get the device memory handle
283    pub fn memory(&self) -> VkDeviceMemory {
284        self.memory
285    }
286    
287    /// Get the offset within the memory
288    pub fn offset(&self) -> VkDeviceSize {
289        self.offset
290    }
291    
292    /// Get the allocation size
293    pub fn size(&self) -> VkDeviceSize {
294        self.size
295    }
296    
297    /// Get mapped pointer if available
298    pub fn mapped_ptr(&self) -> Option<*mut std::ffi::c_void> {
299        self.mapped_ptr
300    }
301}
302
303/// Global pool allocator
304pub struct PoolAllocator {
305    pools: HashMap<(u64, PoolType), MemoryPool>,
306    allocations: HashMap<u64, AllocationHandle>,
307    next_id: u64,
308}
309
310lazy_static::lazy_static! {
311    static ref POOL_ALLOCATOR: Mutex<PoolAllocator> = Mutex::new(PoolAllocator {
312        pools: HashMap::new(),
313        allocations: HashMap::new(),
314        next_id: 1,
315    });
316}
317
318/// Initialize pools for a device
319///
320/// # Safety
321///
322/// This function is unsafe because:
323/// - Both device and physical_device must be valid Vulkan handles
324/// - Calls vkGetPhysicalDeviceMemoryProperties through ICD
325/// - The device must have been created from the physical device
326/// - Pools must be cleaned up before device destruction
327/// - Thread safety is provided by the global POOL_ALLOCATOR mutex
328pub unsafe fn initialize_pools(
329    device: VkDevice,
330    physical_device: VkPhysicalDevice,
331) -> Result<(), IcdError> {
332    let mut allocator = POOL_ALLOCATOR.lock()?;
333    
334    // Get memory properties
335    let mut mem_props = VkPhysicalDeviceMemoryProperties::default();
336    if let Some(icd) = super::icd_loader::get_icd() {
337        if let Some(get_props_fn) = icd.get_physical_device_memory_properties {
338            get_props_fn(physical_device, &mut mem_props);
339        }
340    }
341    
342    // Find memory types for each pool
343    for pool_type in &[PoolType::DeviceLocal, PoolType::HostVisibleCoherent, PoolType::HostVisibleCached] {
344        let required_flags = pool_type.required_flags();
345        
346        for i in 0..mem_props.memoryTypeCount {
347            let mem_type = &mem_props.memoryTypes[i as usize];
348            if mem_type.propertyFlags.contains(required_flags) {
349                let key = (device.as_raw(), *pool_type);
350                allocator.pools.insert(key, MemoryPool::new(device, *pool_type, i));
351                break;
352            }
353        }
354    }
355    
356    Ok(())
357}
358
359/// Allocate memory from appropriate pool
360///
361/// # Safety
362///
363/// This function is unsafe because:
364/// - The device must be a valid VkDevice handle
365/// - Pools must be initialized for the device first
366/// - The requirements must be valid (from vkGetBufferMemoryRequirements etc.)
367/// - The returned allocation ID must be freed with free_allocation
368/// - Memory allocated is not bound to any resource yet
369pub unsafe fn allocate_from_pool(
370    device: VkDevice,
371    requirements: &VkMemoryRequirements,
372    pool_type: PoolType,
373) -> Result<u64, IcdError> {
374    let mut allocator = POOL_ALLOCATOR.lock()?;
375    
376    let key = (device.as_raw(), pool_type);
377    let pool = allocator.pools.get_mut(&key)
378        .ok_or(IcdError::InvalidOperation("Pool not initialized"))?;
379    
380    let (memory, offset, mapped_ptr) = pool.allocate(requirements.size, requirements.alignment)?;
381    
382    let handle = AllocationHandle {
383        memory,
384        offset,
385        size: requirements.size,
386        pool_type,
387        mapped_ptr,
388    };
389    
390    let id = allocator.next_id;
391    allocator.next_id += 1;
392    allocator.allocations.insert(id, handle);
393    
394    Ok(id)
395}
396
397/// Get allocation handle
398pub fn get_allocation(id: u64) -> Result<AllocationHandle, IcdError> {
399    let allocator = POOL_ALLOCATOR.lock()?;
400    allocator.allocations.get(&id)
401        .copied()
402        .ok_or(IcdError::InvalidOperation("Invalid allocation ID"))
403}
404
405/// Free allocation
406///
407/// # Safety
408///
409/// This function is unsafe because:
410/// - The device must be a valid VkDevice handle
411/// - The allocation ID must be valid and not already freed
412/// - Any resources bound to this memory must be destroyed first
413/// - Any mapped pointers from this allocation become invalid
414/// - GPU must not be using the memory
415pub unsafe fn free_allocation(device: VkDevice, id: u64) -> Result<(), IcdError> {
416    let mut allocator = POOL_ALLOCATOR.lock()?;
417    
418    let handle = allocator.allocations.remove(&id)
419        .ok_or(IcdError::InvalidOperation("Invalid allocation ID"))?;
420    
421    let key = (device.as_raw(), handle.pool_type);
422    if let Some(pool) = allocator.pools.get_mut(&key) {
423        pool.free(handle.memory, handle.offset);
424    }
425    
426    Ok(())
427}
428
429/// Get pool statistics
430#[derive(Debug, Default)]
431pub struct PoolStats {
432    pub total_allocated: VkDeviceSize,
433    pub total_slabs: usize,
434    pub allocations_in_flight: usize,
435}
436
437pub fn get_pool_stats(device: VkDevice, pool_type: PoolType) -> Result<PoolStats, IcdError> {
438    let allocator = POOL_ALLOCATOR.lock()?;
439    
440    let key = (device.as_raw(), pool_type);
441    if let Some(pool) = allocator.pools.get(&key) {
442        Ok(PoolStats {
443            total_allocated: pool.total_allocated,
444            total_slabs: pool.slabs.len(),
445            allocations_in_flight: allocator.allocations.values()
446                .filter(|a| a.pool_type == pool_type)
447                .count(),
448        })
449    } else {
450        Ok(PoolStats::default())
451    }
452}
453
454/// Helper to allocate buffer memory
455///
456/// # Safety
457///
458/// This function is unsafe because:
459/// - Both device and buffer must be valid Vulkan handles
460/// - Calls vkGetBufferMemoryRequirements and vkBindBufferMemory
461/// - The buffer must not already have memory bound
462/// - The pool type must be compatible with buffer usage
463/// - On failure, the allocation is automatically freed
464/// - The returned allocation ID owns the memory binding
465pub unsafe fn allocate_buffer_memory(
466    device: VkDevice,
467    buffer: VkBuffer,
468    pool_type: PoolType,
469) -> Result<u64, IcdError> {
470    let mut requirements = VkMemoryRequirements::default();
471    
472    if let Some(icd) = super::icd_loader::get_icd() {
473        if let Some(get_reqs_fn) = icd.get_buffer_memory_requirements {
474            get_reqs_fn(device, buffer, &mut requirements);
475        }
476    }
477    
478    let allocation_id = allocate_from_pool(device, &requirements, pool_type)?;
479    let handle = get_allocation(allocation_id)?;
480    
481    // Bind buffer to memory
482    if let Some(icd) = super::icd_loader::get_icd() {
483        if let Some(bind_fn) = icd.bind_buffer_memory {
484            let result = bind_fn(device, buffer, handle.memory, handle.offset);
485            if result != VkResult::Success {
486                free_allocation(device, allocation_id)?;
487                return Err(IcdError::VulkanError(result));
488            }
489        }
490    }
491    
492    Ok(allocation_id)
493}
494
495#[cfg(test)]
496mod tests {
497    use super::*;
498    
499    #[test]
500    fn test_pool_type_flags() {
501        assert_eq!(
502            PoolType::DeviceLocal.required_flags(),
503            VkMemoryPropertyFlags::DEVICE_LOCAL
504        );
505        assert!(PoolType::HostVisibleCoherent.should_map());
506        assert!(!PoolType::DeviceLocal.should_map());
507    }
508    
509    #[test]
510    fn test_slab_allocation() {
511        let memory = VkDeviceMemory::from_raw(0x1234);
512        let mut slab = MemorySlab {
513            memory,
514            size: 1024,
515            mapped_ptr: None,
516            allocations: Vec::new(),
517            free_space: 1024,
518        };
519        
520        // Test allocation
521        let offset1 = slab.allocate(256, 16).unwrap();
522        assert_eq!(offset1, 0);
523        assert_eq!(slab.free_space, 768);
524        
525        let offset2 = slab.allocate(256, 16).unwrap();
526        assert_eq!(offset2, 256);
527        assert_eq!(slab.free_space, 512);
528        
529        // Test free
530        assert!(slab.free(offset1));
531        assert_eq!(slab.free_space, 768);
532    }
533}