use std::cmp;
use std::ops::Range;
use std::sync::Arc;
use std::sync::Mutex;
use device::Device;
use instance::Instance;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::DeviceMemoryAllocError;
use memory::MappedDeviceMemory;
#[derive(Debug)]
pub struct StdHostVisibleMemoryTypePool {
device: Arc<Device>,
memory_type: u32,
occupied: Mutex<Vec<(Arc<MappedDeviceMemory>, Vec<Range<usize>>)>>,
}
impl StdHostVisibleMemoryTypePool {
#[inline]
pub fn new(device: Arc<Device>, memory_type: MemoryType) -> Arc<StdHostVisibleMemoryTypePool> {
assert_eq!(&**device.physical_device().instance() as *const Instance,
&**memory_type.physical_device().instance() as *const Instance);
assert_eq!(device.physical_device().index(),
memory_type.physical_device().index());
assert!(memory_type.is_host_visible());
Arc::new(StdHostVisibleMemoryTypePool {
device: device.clone(),
memory_type: memory_type.id(),
occupied: Mutex::new(Vec::new()),
})
}
pub fn alloc(me: &Arc<Self>, size: usize, alignment: usize)
-> Result<StdHostVisibleMemoryTypePoolAlloc, DeviceMemoryAllocError> {
assert!(size != 0);
assert!(alignment != 0);
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
let mut occupied = me.occupied.lock().unwrap();
for &mut (ref dev_mem, ref mut entries) in occupied.iter_mut() {
for i in 0 .. entries.len().saturating_sub(1) {
let entry1 = entries[i].clone();
let entry1_end = align(entry1.end, alignment);
let entry2 = entries[i + 1].clone();
if entry1_end + size <= entry2.start {
entries.insert(i + 1, entry1_end .. entry1_end + size);
return Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size: size,
});
}
}
let last_end = entries.last().map(|e| align(e.end, alignment)).unwrap_or(0);
if last_end + size <= (**dev_mem).as_ref().size() {
entries.push(last_end .. last_end + size);
return Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size: size,
});
}
}
let new_block = {
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
let new_block =
DeviceMemory::alloc_and_map(me.device.clone(), me.memory_type(), to_alloc)?;
Arc::new(new_block)
};
occupied.push((new_block.clone(), vec![0 .. size]));
Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: new_block,
offset: 0,
size: size,
})
}
#[inline]
pub fn device(&self) -> &Arc<Device> {
&self.device
}
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device
.physical_device()
.memory_type_by_id(self.memory_type)
.unwrap()
}
}
#[derive(Debug)]
pub struct StdHostVisibleMemoryTypePoolAlloc {
pool: Arc<StdHostVisibleMemoryTypePool>,
memory: Arc<MappedDeviceMemory>,
offset: usize,
size: usize,
}
impl StdHostVisibleMemoryTypePoolAlloc {
#[inline]
pub fn memory(&self) -> &MappedDeviceMemory {
&self.memory
}
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
}
impl Drop for StdHostVisibleMemoryTypePoolAlloc {
fn drop(&mut self) {
let mut occupied = self.pool.occupied.lock().unwrap();
let entries = occupied
.iter_mut()
.find(|e| &*e.0 as *const MappedDeviceMemory == &*self.memory)
.unwrap();
entries.1.retain(|e| e.start != self.offset);
}
}