use std::error;
use std::fmt;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
use std::os::raw::c_void;
use std::ptr;
use std::sync::Arc;
#[cfg(target_os = "linux")]
use std::fs::File;
#[cfg(target_os = "linux")]
use std::os::unix::io::FromRawFd;
use check_errors;
use device::Device;
use device::DeviceOwned;
use instance::MemoryType;
use memory::Content;
use memory::DedicatedAlloc;
use memory::ExternalMemoryHandleType;
use vk;
use Error;
use OomError;
use VulkanObject;
pub struct DeviceMemory {
memory: vk::DeviceMemory,
device: Arc<Device>,
size: usize,
memory_type_index: u32,
handle_types: ExternalMemoryHandleType,
}
pub struct DeviceMemoryBuilder<'a> {
device: Arc<Device>,
memory_type: MemoryType<'a>,
allocate: vk::MemoryAllocateInfo,
dedicated_info: Option<vk::MemoryDedicatedAllocateInfoKHR>,
export_info: Option<vk::ExportMemoryAllocateInfo>,
import_info: Option<vk::ImportMemoryFdInfoKHR>,
handle_types: ExternalMemoryHandleType,
}
impl<'a> DeviceMemoryBuilder<'a> {
pub fn new(device: Arc<Device>, memory_type: MemoryType, size: usize) -> DeviceMemoryBuilder {
assert!(size > 0);
assert_eq!(
device.physical_device().internal_object(),
memory_type.physical_device().internal_object()
);
let allocate = vk::MemoryAllocateInfo {
sType: vk::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
pNext: ptr::null(),
allocationSize: size as u64,
memoryTypeIndex: memory_type.id(),
};
DeviceMemoryBuilder {
device,
memory_type,
allocate,
dedicated_info: None,
export_info: None,
import_info: None,
handle_types: ExternalMemoryHandleType::none(),
}
}
pub fn dedicated_info(mut self, dedicated: DedicatedAlloc<'a>) -> DeviceMemoryBuilder {
assert!(self.dedicated_info.is_none());
if self.device.loaded_extensions().khr_dedicated_allocation {
self.dedicated_info = match dedicated {
DedicatedAlloc::Buffer(buffer) => Some(vk::MemoryDedicatedAllocateInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: 0,
buffer: buffer.internal_object(),
}),
DedicatedAlloc::Image(image) => Some(vk::MemoryDedicatedAllocateInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: image.internal_object(),
buffer: 0,
}),
DedicatedAlloc::None => return self,
};
let ptr = self
.dedicated_info
.as_ref()
.map(|i| i as *const vk::MemoryDedicatedAllocateInfoKHR)
.unwrap_or(ptr::null()) as *const _;
if let Some(ref mut export_info) = self.export_info {
export_info.pNext = ptr;
} else {
self.allocate.pNext = ptr;
}
}
self
}
pub fn export_info(
mut self,
handle_types: ExternalMemoryHandleType,
) -> DeviceMemoryBuilder<'a> {
assert!(self.export_info.is_none());
assert!(self.import_info.is_none());
assert!(self.device.loaded_extensions().khr_external_memory);
assert!(self.device.loaded_extensions().khr_external_memory_fd);
let handle_bits = handle_types.to_bits();
if handle_bits & vk::EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT != 0 {
assert!(self.device.loaded_extensions().ext_external_memory_dmabuf);
}
let unsupported = handle_bits
& !(vk::EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
| vk::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
assert!(unsupported == 0);
let export_info = vk::ExportMemoryAllocateInfo {
sType: vk::STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
pNext: ptr::null(),
handleTypes: handle_bits,
};
self.export_info = Some(export_info);
let ptr = self
.export_info
.as_ref()
.map(|i| i as *const vk::ExportMemoryAllocateInfo)
.unwrap_or(ptr::null()) as *const _;
if let Some(ref mut dedicated_info) = self.dedicated_info {
dedicated_info.pNext = ptr;
} else {
self.allocate.pNext = ptr;
}
self.handle_types = handle_types;
self
}
pub fn build(self) -> Result<DeviceMemory, DeviceMemoryAllocError> {
let reported_heap_size = self.memory_type.heap().size() as u64;
if reported_heap_size != 0 && self.allocate.allocationSize > reported_heap_size {
return Err(DeviceMemoryAllocError::OomError(
OomError::OutOfDeviceMemory,
));
}
let memory = unsafe {
let physical_device = self.device.physical_device();
let mut allocation_count = self
.device
.allocation_count()
.lock()
.expect("Poisoned mutex");
if *allocation_count >= physical_device.limits().max_memory_allocation_count() {
return Err(DeviceMemoryAllocError::TooManyObjects);
}
let vk = self.device.pointers();
let mut output = MaybeUninit::uninit();
check_errors(vk.AllocateMemory(
self.device.internal_object(),
&self.allocate,
ptr::null(),
output.as_mut_ptr(),
))?;
*allocation_count += 1;
output.assume_init()
};
Ok(DeviceMemory {
memory: memory,
device: self.device,
size: self.allocate.allocationSize as usize,
memory_type_index: self.memory_type.id(),
handle_types: self.handle_types,
})
}
}
impl DeviceMemory {
#[inline]
pub fn alloc(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
DeviceMemoryBuilder::new(device, memory_type, size).build()
}
#[inline]
pub fn dedicated_alloc(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
DeviceMemoryBuilder::new(device, memory_type, size)
.dedicated_info(resource)
.build()
}
#[inline]
pub fn alloc_and_map(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
DeviceMemory::dedicated_alloc_and_map(device, memory_type, size, DedicatedAlloc::None)
}
pub fn dedicated_alloc_and_map(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
assert!(memory_type.is_host_visible());
let mem = DeviceMemory::dedicated_alloc(device.clone(), memory_type, size, resource)?;
let coherent = memory_type.is_host_coherent();
let ptr = unsafe {
let mut output = MaybeUninit::uninit();
check_errors(vk.MapMemory(
device.internal_object(),
mem.memory,
0,
mem.size as vk::DeviceSize,
0,
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(MappedDeviceMemory {
memory: mem,
pointer: ptr,
coherent: coherent,
})
}
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device
.physical_device()
.memory_type_by_id(self.memory_type_index)
.unwrap()
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
#[inline]
#[cfg(target_os = "linux")]
pub fn export_fd(
&self,
handle_type: ExternalMemoryHandleType,
) -> Result<File, DeviceMemoryAllocError> {
let vk = self.device.pointers();
let bits = handle_type.to_bits();
assert!(
bits == vk::EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
|| bits == vk::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT
);
assert!(handle_type.to_bits() & self.handle_types.to_bits() != 0);
let fd = unsafe {
let info = vk::MemoryGetFdInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
pNext: ptr::null(),
memory: self.memory,
handleType: handle_type.to_bits(),
};
let mut output = MaybeUninit::uninit();
check_errors(vk.GetMemoryFdKHR(
self.device.internal_object(),
&info,
output.as_mut_ptr(),
))?;
output.assume_init()
};
let file = unsafe { File::from_raw_fd(fd) };
Ok(file)
}
}
unsafe impl DeviceOwned for DeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl fmt::Debug for DeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("DeviceMemory")
.field("device", &*self.device)
.field("memory_type", &self.memory_type())
.field("size", &self.size)
.finish()
}
}
unsafe impl VulkanObject for DeviceMemory {
type Object = vk::DeviceMemory;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_DEVICE_MEMORY;
#[inline]
fn internal_object(&self) -> vk::DeviceMemory {
self.memory
}
}
impl Drop for DeviceMemory {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.FreeMemory(self.device.internal_object(), self.memory, ptr::null());
let mut allocation_count = self
.device
.allocation_count()
.lock()
.expect("Poisoned mutex");
*allocation_count -= 1;
}
}
}
pub struct MappedDeviceMemory {
memory: DeviceMemory,
pointer: *mut c_void,
coherent: bool,
}
impl MappedDeviceMemory {
pub fn unmap(self) -> DeviceMemory {
unsafe {
let device = self.memory.device();
let vk = device.pointers();
vk.UnmapMemory(device.internal_object(), self.memory.memory);
}
self.memory
}
#[inline]
pub unsafe fn read_write<T: ?Sized>(&self, range: Range<usize>) -> CpuAccess<T>
where
T: Content,
{
let vk = self.memory.device().pointers();
let pointer = T::ref_from_ptr(
(self.pointer as usize + range.start) as *mut _,
range.end - range.start,
)
.unwrap();
if !self.coherent {
let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.memory.internal_object(),
offset: range.start as u64,
size: (range.end - range.start) as u64,
};
vk.InvalidateMappedMemoryRanges(self.memory.device().internal_object(), 1, &range);
}
CpuAccess {
pointer: pointer,
mem: self,
coherent: self.coherent,
range: range,
}
}
}
impl AsRef<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_ref(&self) -> &DeviceMemory {
&self.memory
}
}
impl AsMut<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_mut(&mut self) -> &mut DeviceMemory {
&mut self.memory
}
}
unsafe impl DeviceOwned for MappedDeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
self.memory.device()
}
}
unsafe impl Send for MappedDeviceMemory {}
unsafe impl Sync for MappedDeviceMemory {}
impl fmt::Debug for MappedDeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("MappedDeviceMemory")
.field(&self.memory)
.finish()
}
}
pub struct CpuAccess<'a, T: ?Sized + 'a> {
pointer: *mut T,
mem: &'a MappedDeviceMemory,
coherent: bool,
range: Range<usize>,
}
impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
#[doc(hidden)]
#[inline]
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U>
where
F: FnOnce(*mut T) -> *mut U,
{
CpuAccess {
pointer: f(self.pointer),
mem: self.mem,
coherent: self.coherent,
range: self.range.clone(),
}
}
}
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {}
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {}
impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.pointer }
}
}
impl<'a, T: ?Sized + 'a> DerefMut for CpuAccess<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.pointer }
}
}
impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
#[inline]
fn drop(&mut self) {
if !self.coherent {
let vk = self.mem.as_ref().device().pointers();
let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.mem.as_ref().internal_object(),
offset: self.range.start as u64,
size: (self.range.end - self.range.start) as u64,
};
unsafe {
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(), 1, &range);
}
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DeviceMemoryAllocError {
OomError(OomError),
TooManyObjects,
MemoryMapFailed,
}
impl error::Error for DeviceMemoryAllocError {
#[inline]
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
DeviceMemoryAllocError::OomError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for DeviceMemoryAllocError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
DeviceMemoryAllocError::OomError(_) => "not enough memory available",
DeviceMemoryAllocError::TooManyObjects => {
"the maximum number of allocations has been exceeded"
}
DeviceMemoryAllocError::MemoryMapFailed => "memory map failed",
}
)
}
}
impl From<Error> for DeviceMemoryAllocError {
#[inline]
fn from(err: Error) -> DeviceMemoryAllocError {
match err {
e @ Error::OutOfHostMemory | e @ Error::OutOfDeviceMemory => {
DeviceMemoryAllocError::OomError(e.into())
}
Error::TooManyObjects => DeviceMemoryAllocError::TooManyObjects,
Error::MemoryMapFailed => DeviceMemoryAllocError::MemoryMapFailed,
_ => panic!("unexpected error: {:?}", err),
}
}
}
impl From<OomError> for DeviceMemoryAllocError {
#[inline]
fn from(err: OomError) -> DeviceMemoryAllocError {
DeviceMemoryAllocError::OomError(err)
}
}
#[cfg(test)]
mod tests {
use memory::DeviceMemory;
use memory::DeviceMemoryAllocError;
use OomError;
#[test]
fn create() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
}
#[test]
fn zero_size() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
assert_should_panic!({
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0);
});
}
#[test]
#[cfg(target_pointer_width = "64")]
fn oom_single() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)) => (),
_ => panic!(),
}
}
#[test]
#[ignore]
fn oom_multi() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
let heap_size = mem_ty.heap().size();
let mut allocs = Vec::new();
for _ in 0..4 {
match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)) => return,
Ok(a) => allocs.push(a),
_ => (),
}
}
panic!()
}
#[test]
fn allocation_count() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 0);
let mem1 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 1);
{
let mem2 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 2);
}
assert_eq!(*device.allocation_count().lock().unwrap(), 1);
}
}