use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
use std::os::raw::c_void;
use std::ptr;
use std::sync::Arc;
use std::sync::Mutex;
#[cfg(target_os = "linux")]
use std::fs::File;
#[cfg(target_os = "linux")]
use std::os::unix::io::{FromRawFd, IntoRawFd};
use crate::check_errors;
use crate::device::Device;
use crate::device::DeviceOwned;
use crate::instance::MemoryType;
use crate::memory::Content;
use crate::memory::DedicatedAlloc;
use crate::memory::ExternalMemoryHandleType;
use crate::vk;
use crate::Error;
use crate::OomError;
use crate::VulkanObject;
#[repr(C)]
pub struct BaseOutStructure {
pub s_type: i32,
pub p_next: *mut BaseOutStructure,
}
pub(crate) unsafe fn ptr_chain_iter<T>(ptr: &mut T) -> impl Iterator<Item = *mut BaseOutStructure> {
let ptr: *mut BaseOutStructure = ptr as *mut T as _;
(0..).scan(ptr, |p_ptr, _| {
if p_ptr.is_null() {
return None;
}
let n_ptr = (**p_ptr).p_next as *mut BaseOutStructure;
let old = *p_ptr;
*p_ptr = n_ptr;
Some(old)
})
}
pub unsafe trait ExtendsMemoryAllocateInfo {}
unsafe impl ExtendsMemoryAllocateInfo for vk::MemoryDedicatedAllocateInfoKHR {}
unsafe impl ExtendsMemoryAllocateInfo for vk::ExportMemoryAllocateInfo {}
unsafe impl ExtendsMemoryAllocateInfo for vk::ImportMemoryFdInfoKHR {}
pub struct DeviceMemory {
memory: vk::DeviceMemory,
device: Arc<Device>,
size: usize,
memory_type_index: u32,
handle_types: ExternalMemoryHandleType,
mapped: Mutex<bool>,
}
pub struct DeviceMemoryBuilder<'a> {
device: Arc<Device>,
allocate: vk::MemoryAllocateInfo,
dedicated_info: Option<vk::MemoryDedicatedAllocateInfoKHR>,
export_info: Option<vk::ExportMemoryAllocateInfo>,
import_info: Option<vk::ImportMemoryFdInfoKHR>,
marker: PhantomData<&'a ()>,
}
impl<'a> DeviceMemoryBuilder<'a> {
pub fn new(device: Arc<Device>, memory_index: u32, size: usize) -> DeviceMemoryBuilder<'a> {
let allocate = vk::MemoryAllocateInfo {
sType: vk::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
pNext: ptr::null(),
allocationSize: size as u64,
memoryTypeIndex: memory_index,
};
DeviceMemoryBuilder {
device,
allocate,
dedicated_info: None,
export_info: None,
import_info: None,
marker: PhantomData,
}
}
pub fn dedicated_info(mut self, dedicated: DedicatedAlloc<'a>) -> DeviceMemoryBuilder {
assert!(self.dedicated_info.is_none());
let mut dedicated_info = match dedicated {
DedicatedAlloc::Buffer(buffer) => vk::MemoryDedicatedAllocateInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: 0,
buffer: buffer.internal_object(),
},
DedicatedAlloc::Image(image) => vk::MemoryDedicatedAllocateInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: image.internal_object(),
buffer: 0,
},
DedicatedAlloc::None => return self,
};
self = self.push_next(&mut dedicated_info);
self.dedicated_info = Some(dedicated_info);
self
}
pub fn export_info(
mut self,
handle_types: ExternalMemoryHandleType,
) -> DeviceMemoryBuilder<'a> {
assert!(self.export_info.is_none());
let mut export_info = vk::ExportMemoryAllocateInfo {
sType: vk::STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
pNext: ptr::null(),
handleTypes: handle_types.into(),
};
self = self.push_next(&mut export_info);
self.export_info = Some(export_info);
self
}
#[cfg(target_os = "linux")]
pub fn import_info(
mut self,
fd: File,
handle_types: ExternalMemoryHandleType,
) -> DeviceMemoryBuilder<'a> {
assert!(self.import_info.is_none());
let mut import_info = vk::ImportMemoryFdInfoKHR {
sType: vk::STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
pNext: ptr::null(),
handleType: handle_types.into(),
fd: fd.into_raw_fd(),
};
self = self.push_next(&mut import_info);
self.import_info = Some(import_info);
self
}
fn push_next<T: ExtendsMemoryAllocateInfo>(self, next: &mut T) -> DeviceMemoryBuilder<'a> {
unsafe {
let next_ptr = next as *mut T as *mut BaseOutStructure;
let mut prev = self.allocate.pNext as *mut BaseOutStructure;
let last_next = ptr_chain_iter(&mut prev).last().unwrap();
(*last_next).p_next = next_ptr as _;
}
self
}
pub fn build(self) -> Result<Arc<DeviceMemory>, DeviceMemoryAllocError> {
if self.allocate.allocationSize == 0 {
return Err(DeviceMemoryAllocError::InvalidSize)?;
}
let memory_type = self
.device
.physical_device()
.memory_type_by_id(self.allocate.memoryTypeIndex)
.ok_or(DeviceMemoryAllocError::SpecViolation(1714))?;
if self.device.physical_device().internal_object()
!= memory_type.physical_device().internal_object()
{
return Err(DeviceMemoryAllocError::SpecViolation(1714));
}
let reported_heap_size = memory_type.heap().size() as u64;
if reported_heap_size != 0 && self.allocate.allocationSize > reported_heap_size {
return Err(DeviceMemoryAllocError::SpecViolation(1713));
}
let mut export_handle_bits = 0;
if self.dedicated_info.is_some() {
if !self.device.loaded_extensions().khr_dedicated_allocation {
return Err(DeviceMemoryAllocError::MissingExtension(
"khr_dedicated_allocation",
));
}
}
if self.export_info.is_some() || self.import_info.is_some() {
export_handle_bits = match self.export_info {
Some(export_info) => export_info.handleTypes,
None => 0,
};
let import_handle_bits = match self.import_info {
Some(import_info) => import_info.handleType,
None => 0,
};
if export_handle_bits & vk::EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT != 0 {
if !self.device.loaded_extensions().ext_external_memory_dmabuf {
return Err(DeviceMemoryAllocError::MissingExtension(
"ext_external_memory_dmabuf",
));
};
}
if export_handle_bits & vk::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT != 0 {
if !self.device.loaded_extensions().khr_external_memory_fd {
return Err(DeviceMemoryAllocError::MissingExtension(
"khr_external_memory_fd",
));
}
}
if import_handle_bits & vk::EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT != 0 {
if !self.device.loaded_extensions().ext_external_memory_dmabuf {
return Err(DeviceMemoryAllocError::MissingExtension(
"ext_external_memory_dmabuf",
));
}
}
if import_handle_bits & vk::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT != 0 {
if !self.device.loaded_extensions().khr_external_memory_fd {
return Err(DeviceMemoryAllocError::MissingExtension(
"khr_external_memory_fd",
));
}
}
}
let memory = unsafe {
let physical_device = self.device.physical_device();
let mut allocation_count = self
.device
.allocation_count()
.lock()
.expect("Poisoned mutex");
if *allocation_count >= physical_device.limits().max_memory_allocation_count() {
return Err(DeviceMemoryAllocError::TooManyObjects);
}
let vk = self.device.pointers();
let mut output = MaybeUninit::uninit();
check_errors(vk.AllocateMemory(
self.device.internal_object(),
&self.allocate,
ptr::null(),
output.as_mut_ptr(),
))?;
*allocation_count += 1;
output.assume_init()
};
Ok(Arc::new(DeviceMemory {
memory: memory,
device: self.device,
size: self.allocate.allocationSize as usize,
memory_type_index: self.allocate.memoryTypeIndex,
handle_types: ExternalMemoryHandleType::from(export_handle_bits),
mapped: Mutex::new(false),
}))
}
}
impl DeviceMemory {
#[inline]
pub fn alloc(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size).build()?;
Ok(Arc::try_unwrap(memory).unwrap())
}
#[inline]
pub fn dedicated_alloc(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size)
.dedicated_info(resource)
.build()?;
Ok(Arc::try_unwrap(memory).unwrap())
}
#[inline]
pub fn alloc_and_map(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
DeviceMemory::dedicated_alloc_and_map(device, memory_type, size, DedicatedAlloc::None)
}
pub fn dedicated_alloc_and_map(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
assert!(memory_type.is_host_visible());
let mem = DeviceMemory::dedicated_alloc(device.clone(), memory_type, size, resource)?;
Self::map_allocation(device.clone(), mem)
}
#[inline]
#[cfg(target_os = "linux")]
pub fn alloc_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size)
.export_info(ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
})
.build()?;
Ok(Arc::try_unwrap(memory).unwrap())
}
#[inline]
#[cfg(target_os = "linux")]
pub fn dedicated_alloc_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size)
.export_info(ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
})
.dedicated_info(resource)
.build()?;
Ok(Arc::try_unwrap(memory).unwrap())
}
#[inline]
#[cfg(target_os = "linux")]
pub fn alloc_and_map_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
DeviceMemory::dedicated_alloc_and_map_with_exportable_fd(
device,
memory_type,
size,
DedicatedAlloc::None,
)
}
#[inline]
#[cfg(target_os = "linux")]
pub fn dedicated_alloc_and_map_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
assert!(memory_type.is_host_visible());
let mem = DeviceMemory::dedicated_alloc_with_exportable_fd(
device.clone(),
memory_type,
size,
resource,
)?;
Self::map_allocation(device.clone(), mem)
}
fn map_allocation(
device: Arc<Device>,
mem: DeviceMemory,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
let coherent = mem.memory_type().is_host_coherent();
let ptr = unsafe {
let mut output = MaybeUninit::uninit();
check_errors(vk.MapMemory(
device.internal_object(),
mem.memory,
0,
mem.size as vk::DeviceSize,
0,
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(MappedDeviceMemory {
memory: mem,
pointer: ptr,
coherent,
})
}
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device
.physical_device()
.memory_type_by_id(self.memory_type_index)
.unwrap()
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
#[inline]
#[cfg(target_os = "linux")]
pub fn export_fd(
&self,
handle_type: ExternalMemoryHandleType,
) -> Result<File, DeviceMemoryAllocError> {
let vk = self.device.pointers();
let bits = vk::ExternalMemoryHandleTypeFlags::from(handle_type);
if bits != vk::EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
&& bits != vk::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT
{
return Err(DeviceMemoryAllocError::SpecViolation(672))?;
}
if bits & vk::ExternalMemoryHandleTypeFlags::from(self.handle_types) == 0 {
return Err(DeviceMemoryAllocError::SpecViolation(671))?;
}
let fd = unsafe {
let info = vk::MemoryGetFdInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
pNext: ptr::null(),
memory: self.memory,
handleType: handle_type.into(),
};
let mut output = MaybeUninit::uninit();
check_errors(vk.GetMemoryFdKHR(
self.device.internal_object(),
&info,
output.as_mut_ptr(),
))?;
output.assume_init()
};
let file = unsafe { File::from_raw_fd(fd) };
Ok(file)
}
}
unsafe impl DeviceOwned for DeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl fmt::Debug for DeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("DeviceMemory")
.field("device", &*self.device)
.field("memory_type", &self.memory_type())
.field("size", &self.size)
.finish()
}
}
unsafe impl VulkanObject for DeviceMemory {
type Object = vk::DeviceMemory;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_DEVICE_MEMORY;
#[inline]
fn internal_object(&self) -> vk::DeviceMemory {
self.memory
}
}
impl Drop for DeviceMemory {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.FreeMemory(self.device.internal_object(), self.memory, ptr::null());
let mut allocation_count = self
.device
.allocation_count()
.lock()
.expect("Poisoned mutex");
*allocation_count -= 1;
}
}
}
pub struct MappedDeviceMemory {
memory: DeviceMemory,
pointer: *mut c_void,
coherent: bool,
}
impl MappedDeviceMemory {
pub fn unmap(self) -> DeviceMemory {
unsafe {
let device = self.memory.device();
let vk = device.pointers();
vk.UnmapMemory(device.internal_object(), self.memory.memory);
}
self.memory
}
#[inline]
pub unsafe fn read_write<T: ?Sized>(&self, range: Range<usize>) -> CpuAccess<T>
where
T: Content,
{
let vk = self.memory.device().pointers();
let pointer = T::ref_from_ptr(
(self.pointer as usize + range.start) as *mut _,
range.end - range.start,
)
.unwrap();
if !self.coherent {
let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.memory.internal_object(),
offset: range.start as u64,
size: (range.end - range.start) as u64,
};
vk.InvalidateMappedMemoryRanges(self.memory.device().internal_object(), 1, &range);
}
CpuAccess {
pointer: pointer,
mem: self,
coherent: self.coherent,
range: range,
}
}
}
impl AsRef<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_ref(&self) -> &DeviceMemory {
&self.memory
}
}
impl AsMut<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_mut(&mut self) -> &mut DeviceMemory {
&mut self.memory
}
}
unsafe impl DeviceOwned for MappedDeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
self.memory.device()
}
}
unsafe impl Send for MappedDeviceMemory {}
unsafe impl Sync for MappedDeviceMemory {}
impl fmt::Debug for MappedDeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("MappedDeviceMemory")
.field(&self.memory)
.finish()
}
}
unsafe impl Send for DeviceMemoryMapping {}
unsafe impl Sync for DeviceMemoryMapping {}
pub struct DeviceMemoryMapping {
device: Arc<Device>,
memory: Arc<DeviceMemory>,
pointer: *mut c_void,
coherent: bool,
}
impl DeviceMemoryMapping {
pub fn new(
device: Arc<Device>,
memory: Arc<DeviceMemory>,
offset: u64,
size: u64,
flags: u32,
) -> Result<DeviceMemoryMapping, DeviceMemoryAllocError> {
let mut mapped = memory.mapped.lock().expect("Poisoned mutex");
if *mapped {
return Err(DeviceMemoryAllocError::SpecViolation(678));
}
if size != vk::WHOLE_SIZE && offset >= memory.size() as u64 {
return Err(DeviceMemoryAllocError::SpecViolation(679));
}
if size != vk::WHOLE_SIZE && size == 0 {
return Err(DeviceMemoryAllocError::SpecViolation(680));
}
if size != vk::WHOLE_SIZE && size > memory.size() as u64 - offset {
return Err(DeviceMemoryAllocError::SpecViolation(681));
}
let coherent = memory.memory_type().is_host_coherent();
if !coherent {
return Err(DeviceMemoryAllocError::SpecViolation(682));
}
if device.internal_object() != memory.device().internal_object() {
return Err(DeviceMemoryAllocError::ImplicitSpecViolation(
"VUID-vkMapMemory-memory-parent",
));
}
if flags != 0 {
return Err(DeviceMemoryAllocError::ImplicitSpecViolation(
"VUID-vkMapMemory-flags-zerobitmask",
));
}
let vk = device.pointers();
let ptr = unsafe {
let mut output = MaybeUninit::uninit();
check_errors(vk.MapMemory(
device.internal_object(),
memory.memory,
0,
memory.size as vk::DeviceSize,
0,
output.as_mut_ptr(),
))?;
output.assume_init()
};
*mapped = true;
Ok(DeviceMemoryMapping {
device: device.clone(),
memory: memory.clone(),
pointer: ptr,
coherent,
})
}
pub unsafe fn as_ptr(&self) -> *mut u8 {
self.pointer as *mut u8
}
}
impl Drop for DeviceMemoryMapping {
#[inline]
fn drop(&mut self) {
let mut mapped = self.memory.mapped.lock().expect("Poisoned mutex");
unsafe {
let vk = self.device.pointers();
vk.UnmapMemory(self.device.internal_object(), self.memory.memory);
}
*mapped = false;
}
}
pub struct CpuAccess<'a, T: ?Sized + 'a> {
pointer: *mut T,
mem: &'a MappedDeviceMemory,
coherent: bool,
range: Range<usize>,
}
impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
#[doc(hidden)]
#[inline]
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U>
where
F: FnOnce(*mut T) -> *mut U,
{
CpuAccess {
pointer: f(self.pointer),
mem: self.mem,
coherent: self.coherent,
range: self.range.clone(),
}
}
}
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {}
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {}
impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.pointer }
}
}
impl<'a, T: ?Sized + 'a> DerefMut for CpuAccess<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.pointer }
}
}
impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
#[inline]
fn drop(&mut self) {
if !self.coherent {
let vk = self.mem.as_ref().device().pointers();
let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.mem.as_ref().internal_object(),
offset: self.range.start as u64,
size: (self.range.end - self.range.start) as u64,
};
unsafe {
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(), 1, &range);
}
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DeviceMemoryAllocError {
OomError(OomError),
TooManyObjects,
MemoryMapFailed,
MemoryIndexInvalid,
StructureTypeAlreadyPresent,
SpecViolation(u32),
ImplicitSpecViolation(&'static str),
MissingExtension(&'static str),
InvalidSize,
}
impl error::Error for DeviceMemoryAllocError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
DeviceMemoryAllocError::OomError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for DeviceMemoryAllocError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
DeviceMemoryAllocError::OomError(_) => write!(fmt, "not enough memory available"),
DeviceMemoryAllocError::TooManyObjects => {
write!(fmt, "the maximum number of allocations has been exceeded")
}
DeviceMemoryAllocError::MemoryMapFailed => write!(fmt, "memory map failed"),
DeviceMemoryAllocError::MemoryIndexInvalid => write!(fmt, "memory index invalid"),
DeviceMemoryAllocError::StructureTypeAlreadyPresent => {
write!(fmt, "structure type already present")
}
DeviceMemoryAllocError::SpecViolation(u) => {
write!(fmt, "valid usage ID check {} failed", u)
}
DeviceMemoryAllocError::MissingExtension(s) => {
write!(fmt, "Missing the following extension: {}", s)
}
DeviceMemoryAllocError::ImplicitSpecViolation(e) => {
write!(fmt, "Implicit spec violation failed {}", e)
}
DeviceMemoryAllocError::InvalidSize => write!(fmt, "invalid size"),
}
}
}
impl From<Error> for DeviceMemoryAllocError {
#[inline]
fn from(err: Error) -> DeviceMemoryAllocError {
match err {
e @ Error::OutOfHostMemory | e @ Error::OutOfDeviceMemory => {
DeviceMemoryAllocError::OomError(e.into())
}
Error::TooManyObjects => DeviceMemoryAllocError::TooManyObjects,
Error::MemoryMapFailed => DeviceMemoryAllocError::MemoryMapFailed,
_ => panic!("unexpected error: {:?}", err),
}
}
}
impl From<OomError> for DeviceMemoryAllocError {
#[inline]
fn from(err: OomError) -> DeviceMemoryAllocError {
DeviceMemoryAllocError::OomError(err)
}
}
#[cfg(test)]
mod tests {
use crate::memory::DeviceMemory;
use crate::memory::DeviceMemoryAllocError;
use crate::OomError;
#[test]
fn create() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
}
#[test]
fn zero_size() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
assert_should_panic!({
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0).unwrap();
});
}
#[test]
#[cfg(target_pointer_width = "64")]
fn oom_single() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
Err(DeviceMemoryAllocError::SpecViolation(u)) => (),
_ => panic!(),
}
}
#[test]
#[ignore]
fn oom_multi() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
let heap_size = mem_ty.heap().size();
let mut allocs = Vec::new();
for _ in 0..4 {
match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)) => return,
Ok(a) => allocs.push(a),
_ => (),
}
}
panic!()
}
#[test]
fn allocation_count() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 0);
let mem1 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 1);
{
let mem2 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 2);
}
assert_eq!(*device.allocation_count().lock().unwrap(), 1);
}
}