use super::{DedicatedAllocation, DedicatedTo, DeviceAlignment};
use crate::{
device::{Device, DeviceOwned},
instance::InstanceOwnedDebugWrapper,
macros::{impl_id_counter, vulkan_bitflags, vulkan_bitflags_enum},
memory::{is_aligned, MemoryPropertyFlags},
DeviceSize, Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version,
VulkanError, VulkanObject,
};
use std::{
ffi::c_void,
fs::File,
mem::MaybeUninit,
num::NonZeroU64,
ops::Range,
ptr::{self, NonNull},
slice,
sync::{atomic::Ordering, Arc},
};
#[derive(Debug)]
pub struct DeviceMemory {
handle: ash::vk::DeviceMemory,
device: InstanceOwnedDebugWrapper<Arc<Device>>,
id: NonZeroU64,
allocation_size: DeviceSize,
memory_type_index: u32,
dedicated_to: Option<DedicatedTo>,
export_handle_types: ExternalMemoryHandleTypes,
imported_handle_type: Option<ExternalMemoryHandleType>,
flags: MemoryAllocateFlags,
mapping_state: Option<MappingState>,
atom_size: DeviceAlignment,
is_coherent: bool,
}
impl DeviceMemory {
#[inline]
pub fn allocate(
device: Arc<Device>,
mut allocate_info: MemoryAllocateInfo<'_>,
) -> Result<Self, Validated<VulkanError>> {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_dedicated_allocation)
{
allocate_info.dedicated_allocation = None;
}
Self::validate_allocate(&device, &allocate_info, None)?;
unsafe { Ok(Self::allocate_unchecked(device, allocate_info, None)?) }
}
#[inline]
pub unsafe fn import(
device: Arc<Device>,
mut allocate_info: MemoryAllocateInfo<'_>,
import_info: MemoryImportInfo,
) -> Result<Self, Validated<VulkanError>> {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_dedicated_allocation)
{
allocate_info.dedicated_allocation = None;
}
Self::validate_allocate(&device, &allocate_info, Some(&import_info))?;
Ok(Self::allocate_unchecked(
device,
allocate_info,
Some(import_info),
)?)
}
#[inline(never)]
fn validate_allocate(
device: &Device,
allocate_info: &MemoryAllocateInfo<'_>,
import_info: Option<&MemoryImportInfo>,
) -> Result<(), Box<ValidationError>> {
allocate_info
.validate(device)
.map_err(|err| err.add_context("allocate_info"))?;
if let Some(import_info) = import_info {
import_info
.validate(device)
.map_err(|err| err.add_context("import_info"))?;
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline(never)]
pub unsafe fn allocate_unchecked(
device: Arc<Device>,
mut allocate_info: MemoryAllocateInfo<'_>,
import_info: Option<MemoryImportInfo>,
) -> Result<Self, VulkanError> {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_dedicated_allocation)
{
allocate_info.dedicated_allocation = None;
}
let MemoryAllocateInfo {
allocation_size,
memory_type_index,
dedicated_allocation,
export_handle_types,
flags,
_ne: _,
} = allocate_info;
let mut allocate_info_vk = ash::vk::MemoryAllocateInfo {
allocation_size,
memory_type_index,
..Default::default()
};
let mut dedicated_allocate_info_vk = None;
let mut export_allocate_info_vk = None;
let mut import_fd_info_vk = None;
let mut import_win32_handle_info_vk = None;
let mut flags_info_vk = None;
if let Some(dedicated_allocation) = dedicated_allocation {
let next = dedicated_allocate_info_vk.insert(match dedicated_allocation {
DedicatedAllocation::Buffer(buffer) => ash::vk::MemoryDedicatedAllocateInfo {
buffer: buffer.handle(),
..Default::default()
},
DedicatedAllocation::Image(image) => ash::vk::MemoryDedicatedAllocateInfo {
image: image.handle(),
..Default::default()
},
});
next.p_next = allocate_info_vk.p_next;
allocate_info_vk.p_next = next as *const _ as *const _;
}
if !export_handle_types.is_empty() {
let next = export_allocate_info_vk.insert(ash::vk::ExportMemoryAllocateInfo {
handle_types: export_handle_types.into(),
..Default::default()
});
next.p_next = allocate_info_vk.p_next;
allocate_info_vk.p_next = next as *const _ as *const _;
}
let imported_handle_type = import_info.as_ref().map(|import_info| match import_info {
MemoryImportInfo::Fd { handle_type, .. } => *handle_type,
MemoryImportInfo::Win32 { handle_type, .. } => *handle_type,
});
if let Some(import_info) = import_info {
match import_info {
MemoryImportInfo::Fd { handle_type, file } => {
#[cfg(unix)]
let fd = {
use std::os::fd::IntoRawFd;
file.into_raw_fd()
};
#[cfg(not(unix))]
let fd = {
let _ = file;
-1
};
let next = import_fd_info_vk.insert(ash::vk::ImportMemoryFdInfoKHR {
handle_type: handle_type.into(),
fd,
..Default::default()
});
next.p_next = allocate_info_vk.p_next;
allocate_info_vk.p_next = next as *const _ as *const _;
}
MemoryImportInfo::Win32 {
handle_type,
handle,
} => {
let next = import_win32_handle_info_vk.insert(
ash::vk::ImportMemoryWin32HandleInfoKHR {
handle_type: handle_type.into(),
handle,
..Default::default()
},
);
next.p_next = allocate_info_vk.p_next;
allocate_info_vk.p_next = next as *const _ as *const _;
}
}
}
if !flags.is_empty() {
let next = flags_info_vk.insert(ash::vk::MemoryAllocateFlagsInfo {
flags: flags.into(),
..Default::default()
});
next.p_next = allocate_info_vk.p_next;
allocate_info_vk.p_next = next as *const _ as *const _;
}
let max_allocations = device
.physical_device()
.properties()
.max_memory_allocation_count;
device
.allocation_count
.fetch_update(Ordering::Acquire, Ordering::Relaxed, move |count| {
(count < max_allocations).then_some(count + 1)
})
.map_err(|_| VulkanError::TooManyObjects)?;
let handle = {
let fns = device.fns();
let mut output = MaybeUninit::uninit();
(fns.v1_0.allocate_memory)(
device.handle(),
&allocate_info_vk,
ptr::null(),
output.as_mut_ptr(),
)
.result()
.map_err(|e| {
device.allocation_count.fetch_sub(1, Ordering::Release);
VulkanError::from(e)
})?;
output.assume_init()
};
let atom_size = device.physical_device().properties().non_coherent_atom_size;
let is_coherent = device.physical_device().memory_properties().memory_types
[memory_type_index as usize]
.property_flags
.intersects(MemoryPropertyFlags::HOST_COHERENT);
Ok(DeviceMemory {
handle,
device: InstanceOwnedDebugWrapper(device),
id: Self::next_id(),
allocation_size,
memory_type_index,
dedicated_to: dedicated_allocation.map(Into::into),
export_handle_types,
imported_handle_type,
flags,
mapping_state: None,
atom_size,
is_coherent,
})
}
#[inline]
pub unsafe fn from_handle(
device: Arc<Device>,
handle: ash::vk::DeviceMemory,
allocate_info: MemoryAllocateInfo<'_>,
) -> Self {
let MemoryAllocateInfo {
allocation_size,
memory_type_index,
dedicated_allocation,
export_handle_types,
flags,
_ne: _,
} = allocate_info;
let atom_size = device.physical_device().properties().non_coherent_atom_size;
let is_coherent = device.physical_device().memory_properties().memory_types
[memory_type_index as usize]
.property_flags
.intersects(MemoryPropertyFlags::HOST_COHERENT);
DeviceMemory {
handle,
device: InstanceOwnedDebugWrapper(device),
id: Self::next_id(),
allocation_size,
memory_type_index,
dedicated_to: dedicated_allocation.map(Into::into),
export_handle_types,
imported_handle_type: None,
flags,
mapping_state: None,
atom_size,
is_coherent,
}
}
#[inline]
pub fn memory_type_index(&self) -> u32 {
self.memory_type_index
}
#[inline]
pub fn allocation_size(&self) -> DeviceSize {
self.allocation_size
}
#[inline]
pub fn is_dedicated(&self) -> bool {
self.dedicated_to.is_some()
}
pub(crate) fn dedicated_to(&self) -> Option<DedicatedTo> {
self.dedicated_to
}
#[inline]
pub fn export_handle_types(&self) -> ExternalMemoryHandleTypes {
self.export_handle_types
}
#[inline]
pub fn imported_handle_type(&self) -> Option<ExternalMemoryHandleType> {
self.imported_handle_type
}
#[inline]
pub fn flags(&self) -> MemoryAllocateFlags {
self.flags
}
#[inline]
pub fn mapping_state(&self) -> Option<&MappingState> {
self.mapping_state.as_ref()
}
pub(crate) fn atom_size(&self) -> DeviceAlignment {
self.atom_size
}
pub(crate) fn is_coherent(&self) -> bool {
self.is_coherent
}
#[inline]
pub fn map(&mut self, map_info: MemoryMapInfo) -> Result<(), Validated<VulkanError>> {
self.validate_map(&map_info)?;
unsafe { Ok(self.map_unchecked(map_info)?) }
}
fn validate_map(&self, map_info: &MemoryMapInfo) -> Result<(), Box<ValidationError>> {
if self.mapping_state.is_some() {
return Err(Box::new(ValidationError {
problem: "this device memory is already host-mapped".into(),
vuids: &["VUID-vkMapMemory-memory-00678"],
..Default::default()
}));
}
map_info
.validate(self)
.map_err(|err| err.add_context("map_info"))?;
let memory_type = &self
.device()
.physical_device()
.memory_properties()
.memory_types[self.memory_type_index() as usize];
if !memory_type
.property_flags
.intersects(MemoryPropertyFlags::HOST_VISIBLE)
{
return Err(Box::new(ValidationError {
problem: "`self.memory_type_index()` refers to a memory type whose \
`property_flags` does not contain `MemoryPropertyFlags::HOST_VISIBLE`"
.into(),
vuids: &["VUID-vkMapMemory-memory-00682"],
..Default::default()
}));
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn map_unchecked(&mut self, map_info: MemoryMapInfo) -> Result<(), VulkanError> {
let MemoryMapInfo {
offset,
size,
_ne: _,
} = map_info;
assert!(size <= isize::MAX.try_into().unwrap());
let device = self.device();
let ptr = {
let fns = device.fns();
let mut output = MaybeUninit::uninit();
if device.enabled_extensions().khr_map_memory2 {
let map_info_vk = ash::vk::MemoryMapInfoKHR {
flags: ash::vk::MemoryMapFlags::empty(),
memory: self.handle(),
offset,
size,
..Default::default()
};
(fns.khr_map_memory2.map_memory2_khr)(
device.handle(),
&map_info_vk,
output.as_mut_ptr(),
)
.result()
.map_err(VulkanError::from)?;
} else {
(fns.v1_0.map_memory)(
device.handle(),
self.handle,
offset,
size,
ash::vk::MemoryMapFlags::empty(),
output.as_mut_ptr(),
)
.result()
.map_err(VulkanError::from)?;
}
output.assume_init()
};
let ptr = NonNull::new(ptr).unwrap();
let range = offset..offset + size;
self.mapping_state = Some(MappingState { ptr, range });
Ok(())
}
#[inline]
pub fn unmap(&mut self, unmap_info: MemoryUnmapInfo) -> Result<(), Validated<VulkanError>> {
self.validate_unmap(&unmap_info)?;
unsafe { self.unmap_unchecked(unmap_info) }?;
Ok(())
}
fn validate_unmap(&self, unmap_info: &MemoryUnmapInfo) -> Result<(), Box<ValidationError>> {
if self.mapping_state.is_none() {
return Err(Box::new(ValidationError {
problem: "this device memory is not currently host-mapped".into(),
vuids: &["VUID-vkUnmapMemory-memory-00689"],
..Default::default()
}));
}
unmap_info
.validate(self)
.map_err(|err| err.add_context("unmap_info"))?;
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn unmap_unchecked(
&mut self,
unmap_info: MemoryUnmapInfo,
) -> Result<(), VulkanError> {
let MemoryUnmapInfo { _ne: _ } = unmap_info;
let device = self.device();
let fns = device.fns();
if device.enabled_extensions().khr_map_memory2 {
let unmap_info_vk = ash::vk::MemoryUnmapInfoKHR {
flags: ash::vk::MemoryUnmapFlagsKHR::empty(),
memory: self.handle(),
..Default::default()
};
(fns.khr_map_memory2.unmap_memory2_khr)(device.handle(), &unmap_info_vk)
.result()
.map_err(VulkanError::from)?;
} else {
(fns.v1_0.unmap_memory)(device.handle(), self.handle);
}
self.mapping_state = None;
Ok(())
}
#[inline]
pub unsafe fn invalidate_range(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), Validated<VulkanError>> {
self.validate_memory_range(&memory_range)?;
Ok(self.invalidate_range_unchecked(memory_range)?)
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn invalidate_range_unchecked(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), VulkanError> {
if self.is_coherent {
return Ok(());
}
let MappedMemoryRange {
offset,
size,
_ne: _,
} = memory_range;
let memory_range_vk = ash::vk::MappedMemoryRange {
memory: self.handle(),
offset,
size,
..Default::default()
};
let fns = self.device().fns();
(fns.v1_0.invalidate_mapped_memory_ranges)(self.device().handle(), 1, &memory_range_vk)
.result()
.map_err(VulkanError::from)?;
Ok(())
}
#[inline]
pub unsafe fn flush_range(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), Validated<VulkanError>> {
self.validate_memory_range(&memory_range)?;
Ok(self.flush_range_unchecked(memory_range)?)
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn flush_range_unchecked(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), VulkanError> {
if self.is_coherent {
return Ok(());
}
let MappedMemoryRange {
offset,
size,
_ne: _,
} = memory_range;
let memory_range_vk = ash::vk::MappedMemoryRange {
memory: self.handle(),
offset,
size,
..Default::default()
};
let fns = self.device().fns();
(fns.v1_0.flush_mapped_memory_ranges)(self.device().handle(), 1, &memory_range_vk)
.result()
.map_err(VulkanError::from)?;
Ok(())
}
fn validate_memory_range(
&self,
memory_range: &MappedMemoryRange,
) -> Result<(), Box<ValidationError>> {
memory_range
.validate(self)
.map_err(|err| err.add_context("memory_range"))?;
Ok(())
}
#[inline]
pub fn commitment(&self) -> Result<DeviceSize, Box<ValidationError>> {
self.validate_commitment()?;
unsafe { Ok(self.commitment_unchecked()) }
}
fn validate_commitment(&self) -> Result<(), Box<ValidationError>> {
let memory_type = &self
.device
.physical_device()
.memory_properties()
.memory_types[self.memory_type_index as usize];
if !memory_type
.property_flags
.intersects(MemoryPropertyFlags::LAZILY_ALLOCATED)
{
return Err(Box::new(ValidationError {
problem: "the `property_flags` of the memory type does not contain the \
`MemoryPropertyFlags::LAZILY_ALLOCATED` flag"
.into(),
vuids: &["VUID-vkGetDeviceMemoryCommitment-memory-00690"],
..Default::default()
}));
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn commitment_unchecked(&self) -> DeviceSize {
let mut output: DeviceSize = 0;
let fns = self.device.fns();
(fns.v1_0.get_device_memory_commitment)(self.device.handle(), self.handle, &mut output);
output
}
#[inline]
pub fn export_fd(
&self,
handle_type: ExternalMemoryHandleType,
) -> Result<File, Validated<VulkanError>> {
self.validate_export_fd(handle_type)?;
unsafe { Ok(self.export_fd_unchecked(handle_type)?) }
}
fn validate_export_fd(
&self,
handle_type: ExternalMemoryHandleType,
) -> Result<(), Box<ValidationError>> {
handle_type.validate_device(&self.device).map_err(|err| {
err.add_context("handle_type")
.set_vuids(&["VUID-VkMemoryGetFdInfoKHR-handleType-parameter"])
})?;
if !matches!(
handle_type,
ExternalMemoryHandleType::OpaqueFd | ExternalMemoryHandleType::DmaBuf
) {
return Err(Box::new(ValidationError {
context: "handle_type".into(),
problem: "is not `ExternalMemoryHandleType::OpaqueFd` or \
`ExternalMemoryHandleType::DmaBuf`"
.into(),
vuids: &["VUID-VkMemoryGetFdInfoKHR-handleType-00672"],
..Default::default()
}));
}
if !self.export_handle_types.contains_enum(handle_type) {
return Err(Box::new(ValidationError {
context: "handle_type".into(),
problem: "is not contained in this memory's `export_handle_types`".into(),
vuids: &["VUID-VkMemoryGetFdInfoKHR-handleType-00671"],
..Default::default()
}));
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn export_fd_unchecked(
&self,
handle_type: ExternalMemoryHandleType,
) -> Result<File, VulkanError> {
let info_vk = ash::vk::MemoryGetFdInfoKHR {
memory: self.handle,
handle_type: handle_type.into(),
..Default::default()
};
let fns = self.device.fns();
let mut output = MaybeUninit::uninit();
(fns.khr_external_memory_fd.get_memory_fd_khr)(
self.device.handle(),
&info_vk,
output.as_mut_ptr(),
)
.result()
.map_err(VulkanError::from)?;
#[cfg(unix)]
{
use std::os::unix::io::FromRawFd;
Ok(File::from_raw_fd(output.assume_init()))
}
#[cfg(not(unix))]
{
let _ = output;
unreachable!("`khr_external_memory_fd` was somehow enabled on a non-Unix system");
}
}
}
impl Drop for DeviceMemory {
#[inline]
fn drop(&mut self) {
unsafe {
let fns = self.device.fns();
(fns.v1_0.free_memory)(self.device.handle(), self.handle, ptr::null());
self.device.allocation_count.fetch_sub(1, Ordering::Release);
}
}
}
unsafe impl VulkanObject for DeviceMemory {
type Handle = ash::vk::DeviceMemory;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
}
}
unsafe impl DeviceOwned for DeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_id_counter!(DeviceMemory);
#[derive(Clone, Debug)]
pub struct MemoryAllocateInfo<'d> {
pub allocation_size: DeviceSize,
pub memory_type_index: u32,
pub dedicated_allocation: Option<DedicatedAllocation<'d>>,
pub export_handle_types: ExternalMemoryHandleTypes,
pub flags: MemoryAllocateFlags,
pub _ne: crate::NonExhaustive,
}
impl Default for MemoryAllocateInfo<'static> {
#[inline]
fn default() -> Self {
Self {
allocation_size: 0,
memory_type_index: u32::MAX,
dedicated_allocation: None,
export_handle_types: ExternalMemoryHandleTypes::empty(),
flags: MemoryAllocateFlags::empty(),
_ne: crate::NonExhaustive(()),
}
}
}
impl<'d> MemoryAllocateInfo<'d> {
#[inline]
pub fn dedicated_allocation(dedicated_allocation: DedicatedAllocation<'d>) -> Self {
Self {
allocation_size: 0,
memory_type_index: u32::MAX,
dedicated_allocation: Some(dedicated_allocation),
export_handle_types: ExternalMemoryHandleTypes::empty(),
flags: MemoryAllocateFlags::empty(),
_ne: crate::NonExhaustive(()),
}
}
pub(crate) fn validate(&self, device: &Device) -> Result<(), Box<ValidationError>> {
let &Self {
allocation_size,
memory_type_index,
ref dedicated_allocation,
export_handle_types,
flags,
_ne: _,
} = self;
let memory_properties = device.physical_device().memory_properties();
let memory_type = memory_properties
.memory_types
.get(memory_type_index as usize)
.ok_or_else(|| {
Box::new(ValidationError {
context: "memory_type_index".into(),
problem: "is not less than the number of memory types in the device".into(),
vuids: &["VUID-vkAllocateMemory-pAllocateInfo-01714"],
..Default::default()
})
})?;
let memory_heap = &memory_properties.memory_heaps[memory_type.heap_index as usize];
if memory_type
.property_flags
.intersects(MemoryPropertyFlags::PROTECTED)
&& !device.enabled_features().protected_memory
{
return Err(Box::new(ValidationError {
context: "memory_type_index".into(),
problem: "refers to a memory type where `property_flags` contains \
`MemoryPropertyFlags::PROTECTED`"
.into(),
requires_one_of: RequiresOneOf(&[RequiresAllOf(&[Requires::Feature(
"protected_memory",
)])]),
vuids: &["VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872"],
}));
}
if memory_type
.property_flags
.intersects(MemoryPropertyFlags::DEVICE_COHERENT)
&& !device.enabled_features().device_coherent_memory
{
return Err(Box::new(ValidationError {
context: "memory_type_index".into(),
problem: "refers to a memory type where `property_flags` contains \
`MemoryPropertyFlags::DEVICE_COHERENT`"
.into(),
requires_one_of: RequiresOneOf(&[RequiresAllOf(&[Requires::Feature(
"device_coherent_memory",
)])]),
vuids: &["VUID-vkAllocateMemory-deviceCoherentMemory-02790"],
}));
}
if allocation_size == 0 {
return Err(Box::new(ValidationError {
context: "allocation_size".into(),
problem: "is zero".into(),
vuids: &["VUID-VkMemoryAllocateInfo-pNext-01874"],
..Default::default()
}));
}
if memory_heap.size != 0 && allocation_size > memory_heap.size {
return Err(Box::new(ValidationError {
context: "allocation_size".into(),
problem: "is greater than the size of the memory heap".into(),
vuids: &["VUID-vkAllocateMemory-pAllocateInfo-01713"],
..Default::default()
}));
}
if let Some(dedicated_allocation) = dedicated_allocation {
match dedicated_allocation {
DedicatedAllocation::Buffer(buffer) => {
assert_eq!(device, buffer.device().as_ref());
let required_size = buffer.memory_requirements().layout.size();
if allocation_size != required_size {
return Err(Box::new(ValidationError {
problem: "`allocation_size` does not equal the size required for the \
buffer specified in `dedicated_allocation`"
.into(),
vuids: &["VUID-VkMemoryDedicatedAllocateInfo-buffer-02965"],
..Default::default()
}));
}
}
DedicatedAllocation::Image(image) => {
assert_eq!(device, image.device().as_ref());
let required_size = image.memory_requirements()[0].layout.size();
if allocation_size != required_size {
return Err(Box::new(ValidationError {
problem: "`allocation_size` does not equal the size required for the \
image specified in `dedicated_allocation`"
.into(),
vuids: &["VUID-VkMemoryDedicatedAllocateInfo-image-02964"],
..Default::default()
}));
}
}
}
}
if !export_handle_types.is_empty() {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_external_memory)
{
return Err(Box::new(ValidationError {
context: "export_handle_types".into(),
problem: "is not empty".into(),
requires_one_of: RequiresOneOf(&[
RequiresAllOf(&[Requires::APIVersion(Version::V1_1)]),
RequiresAllOf(&[Requires::DeviceExtension("khr_external_memory")]),
]),
..Default::default()
}));
}
export_handle_types.validate_device(device).map_err(|err| {
err.add_context("export_handle_types")
.set_vuids(&["VUID-VkExportMemoryAllocateInfo-handleTypes-parameter"])
})?;
}
if !flags.is_empty() {
if !(device.physical_device().api_version() >= Version::V1_1
|| device.enabled_extensions().khr_device_group)
{
return Err(Box::new(ValidationError {
context: "flags".into(),
problem: "is not empty".into(),
requires_one_of: RequiresOneOf(&[
RequiresAllOf(&[Requires::APIVersion(Version::V1_1)]),
RequiresAllOf(&[Requires::DeviceExtension("khr_device_group")]),
]),
..Default::default()
}));
}
if flags.intersects(MemoryAllocateFlags::DEVICE_ADDRESS) {
if !((device.api_version() >= Version::V1_2
|| device.enabled_extensions().khr_buffer_device_address)
&& device.enabled_features().buffer_device_address)
{
return Err(Box::new(ValidationError {
context: "flags".into(),
problem: "contains `MemoryAllocateFlags::DEVICE_ADDRESS`".into(),
requires_one_of: RequiresOneOf(&[
RequiresAllOf(&[
Requires::APIVersion(Version::V1_2),
Requires::Feature("buffer_device_address"),
]),
RequiresAllOf(&[
Requires::DeviceExtension("khr_buffer_device_address"),
Requires::Feature("buffer_device_address"),
]),
]),
vuids: &["VUID-VkMemoryAllocateInfo-flags-03331"],
}));
}
}
}
Ok(())
}
}
#[derive(Debug)]
#[non_exhaustive]
pub enum MemoryImportInfo {
Fd {
handle_type: ExternalMemoryHandleType,
file: File,
},
Win32 {
handle_type: ExternalMemoryHandleType,
handle: ash::vk::HANDLE,
},
}
impl MemoryImportInfo {
pub(crate) fn validate(&self, device: &Device) -> Result<(), Box<ValidationError>> {
match self {
MemoryImportInfo::Fd {
handle_type,
file: _,
} => {
if !device.enabled_extensions().khr_external_memory_fd {
return Err(Box::new(ValidationError {
problem: "is `MemoryImportInfo::Fd`".into(),
requires_one_of: RequiresOneOf(&[RequiresAllOf(&[
Requires::DeviceExtension("khr_external_memory_fd"),
])]),
..Default::default()
}));
}
handle_type.validate_device(device).map_err(|err| {
err.add_context("handle_type")
.set_vuids(&["VUID-VkImportMemoryFdInfoKHR-handleType-parameter"])
})?;
match handle_type {
ExternalMemoryHandleType::OpaqueFd => {
}
ExternalMemoryHandleType::DmaBuf => {}
_ => {
return Err(Box::new(ValidationError {
context: "handle_type".into(),
problem: "is not `ExternalMemoryHandleType::OpaqueFd` or \
`ExternalMemoryHandleType::DmaBuf`"
.into(),
vuids: &["VUID-VkImportMemoryFdInfoKHR-handleType-00669"],
..Default::default()
}));
}
}
}
MemoryImportInfo::Win32 {
handle_type,
handle: _,
} => {
if !device.enabled_extensions().khr_external_memory_win32 {
return Err(Box::new(ValidationError {
problem: "is `MemoryImportInfo::Win32`".into(),
requires_one_of: RequiresOneOf(&[RequiresAllOf(&[
Requires::DeviceExtension("khr_external_memory_win32"),
])]),
..Default::default()
}));
}
handle_type.validate_device(device).map_err(|err| {
err.add_context("handle_type")
.set_vuids(&["VUID-VkImportMemoryWin32HandleInfoKHR-handleType-parameter"])
})?;
match handle_type {
ExternalMemoryHandleType::OpaqueWin32
| ExternalMemoryHandleType::OpaqueWin32Kmt => {
}
_ => {
return Err(Box::new(ValidationError {
context: "handle_type".into(),
problem: "is not `ExternalMemoryHandleType::OpaqueWin32` or \
`ExternalMemoryHandleType::OpaqueWin32Kmt`"
.into(),
vuids: &["VUID-VkImportMemoryWin32HandleInfoKHR-handleType-00660"],
..Default::default()
}));
}
}
}
}
Ok(())
}
}
vulkan_bitflags_enum! {
#[non_exhaustive]
ExternalMemoryHandleTypes,
ExternalMemoryHandleType,
= ExternalMemoryHandleTypeFlags(u32);
OPAQUE_FD, OpaqueFd = OPAQUE_FD,
OPAQUE_WIN32, OpaqueWin32 = OPAQUE_WIN32,
OPAQUE_WIN32_KMT, OpaqueWin32Kmt = OPAQUE_WIN32_KMT,
D3D11_TEXTURE, D3D11Texture = D3D11_TEXTURE,
D3D11_TEXTURE_KMT, D3D11TextureKmt = D3D11_TEXTURE_KMT,
D3D12_HEAP, D3D12Heap = D3D12_HEAP,
D3D12_RESOURCE, D3D12Resource = D3D12_RESOURCE,
DMA_BUF, DmaBuf = DMA_BUF_EXT
RequiresOneOf([
RequiresAllOf([DeviceExtension(ext_external_memory_dma_buf)]),
]),
ANDROID_HARDWARE_BUFFER, AndroidHardwareBuffer = ANDROID_HARDWARE_BUFFER_ANDROID
RequiresOneOf([
RequiresAllOf([DeviceExtension(android_external_memory_android_hardware_buffer)]),
]),
HOST_ALLOCATION, HostAllocation = HOST_ALLOCATION_EXT
RequiresOneOf([
RequiresAllOf([DeviceExtension(ext_external_memory_host)]),
]),
HOST_MAPPED_FOREIGN_MEMORY, HostMappedForeignMemory = HOST_MAPPED_FOREIGN_MEMORY_EXT
RequiresOneOf([
RequiresAllOf([DeviceExtension(ext_external_memory_host)]),
]),
ZIRCON_VMO, ZirconVmo = ZIRCON_VMO_FUCHSIA
RequiresOneOf([
RequiresAllOf([DeviceExtension(fuchsia_external_memory)]),
]),
RDMA_ADDRESS, RdmaAddress = RDMA_ADDRESS_NV
RequiresOneOf([
RequiresAllOf([DeviceExtension(nv_external_memory_rdma)]),
]),
}
vulkan_bitflags! {
#[non_exhaustive]
MemoryAllocateFlags = MemoryAllocateFlags(u32);
DEVICE_ADDRESS = DEVICE_ADDRESS,
}
#[derive(Debug)]
pub struct MemoryMapInfo {
pub offset: DeviceSize,
pub size: DeviceSize,
pub _ne: crate::NonExhaustive,
}
impl MemoryMapInfo {
pub(crate) fn validate(&self, memory: &DeviceMemory) -> Result<(), Box<ValidationError>> {
let &Self {
offset,
size,
_ne: _,
} = self;
if !(offset < memory.allocation_size()) {
return Err(Box::new(ValidationError {
context: "offset".into(),
problem: "is not less than `self.allocation_size()`".into(),
vuids: &["VUID-vkMapMemory-offset-00679"],
..Default::default()
}));
}
if size == 0 {
return Err(Box::new(ValidationError {
context: "size".into(),
problem: "is zero".into(),
vuids: &["VUID-vkMapMemory-size-00680"],
..Default::default()
}));
}
if !(size <= memory.allocation_size() - offset) {
return Err(Box::new(ValidationError {
context: "size".into(),
problem: "is not less than or equal to `self.allocation_size()` minus `offset`"
.into(),
vuids: &["VUID-vkMapMemory-size-00681"],
..Default::default()
}));
}
let atom_size = memory.atom_size();
if !memory.is_coherent
&& (!is_aligned(offset, atom_size)
|| (!is_aligned(size, atom_size) && offset + size != memory.allocation_size()))
{
return Err(Box::new(ValidationError {
problem: "`self.memory_type_index()` refers to a memory type whose \
`property_flags` does not contain `MemoryPropertyFlags::HOST_COHERENT`, and \
`offset` and/or `size` are not aligned to the `non_coherent_atom_size` device \
property"
.into(),
..Default::default()
}));
}
Ok(())
}
}
impl Default for MemoryMapInfo {
#[inline]
fn default() -> Self {
MemoryMapInfo {
offset: 0,
size: 0,
_ne: crate::NonExhaustive(()),
}
}
}
#[derive(Debug)]
pub struct MemoryUnmapInfo {
pub _ne: crate::NonExhaustive,
}
impl MemoryUnmapInfo {
pub(crate) fn validate(&self, _memory: &DeviceMemory) -> Result<(), Box<ValidationError>> {
let &Self { _ne: _ } = self;
Ok(())
}
}
impl Default for MemoryUnmapInfo {
#[inline]
fn default() -> Self {
MemoryUnmapInfo {
_ne: crate::NonExhaustive(()),
}
}
}
#[derive(Debug)]
pub struct MappingState {
ptr: NonNull<c_void>,
range: Range<DeviceSize>,
}
unsafe impl Send for MappingState {}
unsafe impl Sync for MappingState {}
impl MappingState {
#[inline]
pub fn ptr(&self) -> NonNull<c_void> {
self.ptr
}
#[inline]
pub fn offset(&self) -> DeviceSize {
self.range.start
}
#[inline]
pub fn size(&self) -> DeviceSize {
self.range.end - self.range.start
}
#[inline]
pub fn slice(&self, range: Range<DeviceSize>) -> Option<NonNull<[u8]>> {
if self.range.start <= range.start
&& range.start <= range.end
&& range.end <= self.range.end
{
Some(unsafe { self.slice_unchecked(range) })
} else {
None
}
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn slice_unchecked(&self, range: Range<DeviceSize>) -> NonNull<[u8]> {
let ptr = self.ptr.as_ptr();
let ptr = ptr.add((range.start - self.range.start) as usize);
let len = (range.end - range.start) as usize;
let ptr = ptr::slice_from_raw_parts_mut(<*mut c_void>::cast::<u8>(ptr), len);
NonNull::new_unchecked(ptr)
}
}
#[derive(Debug)]
pub struct MappedMemoryRange {
pub offset: DeviceSize,
pub size: DeviceSize,
pub _ne: crate::NonExhaustive,
}
impl MappedMemoryRange {
pub(crate) fn validate(&self, memory: &DeviceMemory) -> Result<(), Box<ValidationError>> {
let &Self {
offset,
size,
_ne: _,
} = self;
if let Some(state) = &memory.mapping_state {
if !(state.range.start <= offset && size <= state.range.end - offset) {
return Err(Box::new(ValidationError {
problem: "is not contained within the mapped range of this device memory"
.into(),
vuids: &["VUID-VkMappedMemoryRange-size-00685"],
..Default::default()
}));
}
} else {
return Err(Box::new(ValidationError {
problem: "this device memory is not currently host-mapped".into(),
vuids: &["VUID-VkMappedMemoryRange-memory-00684"],
..Default::default()
}));
}
if !is_aligned(offset, memory.atom_size()) {
return Err(Box::new(ValidationError {
context: "offset".into(),
problem: "is not aligned to the `non_coherent_atom_size` device property".into(),
vuids: &["VUID-VkMappedMemoryRange-offset-00687"],
..Default::default()
}));
}
if !(is_aligned(size, memory.atom_size()) || size == memory.allocation_size() - offset) {
return Err(Box::new(ValidationError {
context: "size".into(),
problem: "is not aligned to the `non_coherent_atom_size` device property nor \
equal to `self.allocation_size()` minus `offset`"
.into(),
vuids: &["VUID-VkMappedMemoryRange-size-01390"],
..Default::default()
}));
}
Ok(())
}
}
impl Default for MappedMemoryRange {
#[inline]
fn default() -> Self {
MappedMemoryRange {
offset: 0,
size: 0,
_ne: crate::NonExhaustive(()),
}
}
}
#[derive(Debug)]
#[deprecated(
since = "0.34.0",
note = "use the methods provided directly on `DeviceMemory` instead"
)]
pub struct MappedDeviceMemory {
memory: DeviceMemory,
pointer: *mut c_void, range: Range<DeviceSize>,
atom_size: DeviceAlignment,
is_coherent: bool,
}
#[allow(deprecated)]
impl MappedDeviceMemory {
#[inline]
pub fn new(
memory: DeviceMemory,
range: Range<DeviceSize>,
) -> Result<Self, Validated<VulkanError>> {
Self::validate_new(&memory, range.clone())?;
unsafe { Ok(Self::new_unchecked(memory, range)?) }
}
fn validate_new(
memory: &DeviceMemory,
range: Range<DeviceSize>,
) -> Result<(), Box<ValidationError>> {
if range.is_empty() {
return Err(Box::new(ValidationError {
context: "range".into(),
problem: "is empty".into(),
vuids: &["VUID-vkMapMemory-size-00680"],
..Default::default()
}));
}
let device = memory.device();
let memory_type = &device.physical_device().memory_properties().memory_types
[memory.memory_type_index() as usize];
if !memory_type
.property_flags
.intersects(MemoryPropertyFlags::HOST_VISIBLE)
{
return Err(Box::new(ValidationError {
context: "memory".into(),
problem: "has a memory type whose `property_flags` does not contain \
`MemoryPropertyFlags::HOST_VISIBLE`"
.into(),
vuids: &["VUID-vkMapMemory-memory-00682"],
..Default::default()
}));
}
if memory.mapping_state().is_some() {
return Err(Box::new(ValidationError {
context: "memory".into(),
problem: "is already host-mapped".into(),
vuids: &["VUID-vkMapMemory-memory-00678"],
..Default::default()
}));
}
if range.end > memory.allocation_size {
return Err(Box::new(ValidationError {
problem: "`range.end` is greater than `memory.allocation_size()`".into(),
vuids: &[
"VUID-vkMapMemory-offset-00679",
"VUID-vkMapMemory-size-00681",
],
..Default::default()
}));
}
let is_coherent = memory_type
.property_flags
.intersects(MemoryPropertyFlags::HOST_COHERENT);
let atom_size = device.physical_device().properties().non_coherent_atom_size;
if !is_coherent
&& (!is_aligned(range.start, atom_size)
|| (!is_aligned(range.end, atom_size) && range.end != memory.allocation_size))
{
return Err(Box::new(ValidationError {
problem: "`memory` has a memory type whose `property_flags` does not contain \
`MemoryPropertyFlags::HOST_COHERENT`, and `range.start` and/or `range.end` \
are not aligned to the `non_coherent_atom_size` device property"
.into(),
..Default::default()
}));
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn new_unchecked(
memory: DeviceMemory,
range: Range<DeviceSize>,
) -> Result<Self, VulkanError> {
assert!(range.end - range.start <= isize::MAX.try_into().unwrap());
let device = memory.device();
let pointer = unsafe {
let fns = device.fns();
let mut output = MaybeUninit::uninit();
(fns.v1_0.map_memory)(
device.handle(),
memory.handle,
range.start,
range.end - range.start,
ash::vk::MemoryMapFlags::empty(),
output.as_mut_ptr(),
)
.result()
.map_err(VulkanError::from)?;
output.assume_init()
};
let atom_size = device.physical_device().properties().non_coherent_atom_size;
let memory_type = &device.physical_device().memory_properties().memory_types
[memory.memory_type_index() as usize];
let is_coherent = memory_type
.property_flags
.intersects(MemoryPropertyFlags::HOST_COHERENT);
Ok(MappedDeviceMemory {
memory,
pointer,
range,
atom_size,
is_coherent,
})
}
#[inline]
pub fn unmap(self) -> DeviceMemory {
unsafe {
let device = self.memory.device();
let fns = device.fns();
(fns.v1_0.unmap_memory)(device.handle(), self.memory.handle);
}
self.memory
}
#[inline]
pub unsafe fn invalidate_range(
&self,
range: Range<DeviceSize>,
) -> Result<(), Validated<VulkanError>> {
self.validate_range(range.clone())?;
Ok(self.invalidate_range_unchecked(range)?)
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn invalidate_range_unchecked(
&self,
range: Range<DeviceSize>,
) -> Result<(), VulkanError> {
if self.is_coherent {
return Ok(());
}
let range = ash::vk::MappedMemoryRange {
memory: self.memory.handle(),
offset: range.start,
size: range.end - range.start,
..Default::default()
};
let fns = self.memory.device().fns();
(fns.v1_0.invalidate_mapped_memory_ranges)(self.memory.device().handle(), 1, &range)
.result()
.map_err(VulkanError::from)?;
Ok(())
}
#[inline]
pub unsafe fn flush_range(
&self,
range: Range<DeviceSize>,
) -> Result<(), Validated<VulkanError>> {
self.validate_range(range.clone())?;
Ok(self.flush_range_unchecked(range)?)
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn flush_range_unchecked(
&self,
range: Range<DeviceSize>,
) -> Result<(), VulkanError> {
if self.is_coherent {
return Ok(());
}
let range = ash::vk::MappedMemoryRange {
memory: self.memory.handle(),
offset: range.start,
size: range.end - range.start,
..Default::default()
};
let fns = self.device().fns();
(fns.v1_0.flush_mapped_memory_ranges)(self.memory.device().handle(), 1, &range)
.result()
.map_err(VulkanError::from)?;
Ok(())
}
#[inline]
pub unsafe fn read(&self, range: Range<DeviceSize>) -> Result<&[u8], Box<ValidationError>> {
self.validate_range(range.clone())?;
Ok(self.read_unchecked(range))
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn read_unchecked(&self, range: Range<DeviceSize>) -> &[u8] {
slice::from_raw_parts(
self.pointer.add((range.start - self.range.start) as usize) as *const u8,
(range.end - range.start) as usize,
)
}
#[inline]
pub unsafe fn write(
&self,
range: Range<DeviceSize>,
) -> Result<&mut [u8], Box<ValidationError>> {
self.validate_range(range.clone())?;
Ok(self.write_unchecked(range))
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
#[allow(clippy::mut_from_ref)]
pub unsafe fn write_unchecked(&self, range: Range<DeviceSize>) -> &mut [u8] {
slice::from_raw_parts_mut(
self.pointer.add((range.start - self.range.start) as usize) as *mut u8,
(range.end - range.start) as usize,
)
}
#[inline]
fn validate_range(&self, range: Range<DeviceSize>) -> Result<(), Box<ValidationError>> {
if range.is_empty() {
return Err(Box::new(ValidationError {
context: "range".into(),
problem: "is empty".into(),
..Default::default()
}));
}
if range.start < self.range.start || range.end > self.range.end {
return Err(Box::new(ValidationError {
context: "range".into(),
problem: "is not within the mapped range of this mapped device memory".into(),
vuids: &["VUID-VkMappedMemoryRange-size-00685"],
..Default::default()
}));
}
if !self.is_coherent {
if !is_aligned(range.start, self.atom_size)
|| (!is_aligned(range.end, self.atom_size)
&& range.end != self.memory.allocation_size)
{
return Err(Box::new(ValidationError {
problem: "this mapped device memory has a memory type whose `property_flags` \
does not contain `MemoryPropertyFlags::HOST_COHERENT`, and \
`range.start` and/or `range.end` are not aligned to the \
`non_coherent_atom_size` device property"
.into(),
vuids: &[
"VUID-VkMappedMemoryRange-offset-00687",
"VUID-VkMappedMemoryRange-size-01390",
],
..Default::default()
}));
}
}
Ok(())
}
}
#[allow(deprecated)]
impl AsRef<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_ref(&self) -> &DeviceMemory {
&self.memory
}
}
#[allow(deprecated)]
impl AsMut<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_mut(&mut self) -> &mut DeviceMemory {
&mut self.memory
}
}
#[allow(deprecated)]
unsafe impl DeviceOwned for MappedDeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
self.memory.device()
}
}
#[allow(deprecated)]
unsafe impl Send for MappedDeviceMemory {}
#[allow(deprecated)]
unsafe impl Sync for MappedDeviceMemory {}
#[cfg(test)]
mod tests {
use super::MemoryAllocateInfo;
use crate::memory::{DeviceMemory, MemoryPropertyFlags};
#[test]
fn create() {
let (device, _) = gfx_dev_and_queue!();
let _ = DeviceMemory::allocate(
device,
MemoryAllocateInfo {
allocation_size: 256,
memory_type_index: 0,
..Default::default()
},
)
.unwrap();
}
#[test]
fn zero_size() {
let (device, _) = gfx_dev_and_queue!();
assert_should_panic!({
let _ = DeviceMemory::allocate(
device.clone(),
MemoryAllocateInfo {
allocation_size: 0,
memory_type_index: 0,
..Default::default()
},
)
.unwrap();
});
}
#[test]
#[cfg(target_pointer_width = "64")]
fn oom_single() {
let (device, _) = gfx_dev_and_queue!();
let memory_type_index = device
.physical_device()
.memory_properties()
.memory_types
.iter()
.enumerate()
.find_map(|(i, m)| {
(!m.property_flags
.intersects(MemoryPropertyFlags::LAZILY_ALLOCATED))
.then_some(i as u32)
})
.unwrap();
match DeviceMemory::allocate(
device,
MemoryAllocateInfo {
allocation_size: 0xffffffffffffffff,
memory_type_index,
..Default::default()
},
) {
Err(_) => (),
Ok(_) => panic!(),
}
}
#[test]
#[ignore] fn oom_multi() {
let (device, _) = gfx_dev_and_queue!();
let (memory_type_index, memory_type) = device
.physical_device()
.memory_properties()
.memory_types
.iter()
.enumerate()
.find_map(|(i, m)| {
(!m.property_flags
.intersects(MemoryPropertyFlags::LAZILY_ALLOCATED))
.then_some((i as u32, m))
})
.unwrap();
let heap_size = device.physical_device().memory_properties().memory_heaps
[memory_type.heap_index as usize]
.size;
let mut allocs = Vec::new();
for _ in 0..4 {
match DeviceMemory::allocate(
device.clone(),
MemoryAllocateInfo {
allocation_size: heap_size / 3,
memory_type_index,
..Default::default()
},
) {
Err(_) => return, Ok(a) => allocs.push(a),
}
}
panic!()
}
#[test]
fn allocation_count() {
let (device, _) = gfx_dev_and_queue!();
assert_eq!(device.allocation_count(), 0);
let _mem1 = DeviceMemory::allocate(
device.clone(),
MemoryAllocateInfo {
allocation_size: 256,
memory_type_index: 0,
..Default::default()
},
)
.unwrap();
assert_eq!(device.allocation_count(), 1);
{
let _mem2 = DeviceMemory::allocate(
device.clone(),
MemoryAllocateInfo {
allocation_size: 256,
memory_type_index: 0,
..Default::default()
},
)
.unwrap();
assert_eq!(device.allocation_count(), 2);
}
assert_eq!(device.allocation_count(), 1);
}
}