use self::allocator::{
align_up, AllocationHandle, AllocationType, DeviceLayout, MemoryAlloc, MemoryAllocator,
Suballocation,
};
pub use self::{alignment::*, device_memory::*};
use crate::{
buffer::{sys::RawBuffer, Subbuffer},
device::{Device, DeviceOwned, DeviceOwnedDebugWrapper},
image::{sys::RawImage, Image, ImageAspects},
macros::vulkan_bitflags,
sync::{semaphore::Semaphore, HostAccessError},
DeviceSize, Validated, ValidationError, VulkanError,
};
use std::{
cmp,
mem::ManuallyDrop,
num::NonZeroU64,
ops::{Bound, Range, RangeBounds, RangeTo},
ptr::NonNull,
sync::Arc,
};
mod alignment;
pub mod allocator;
mod device_memory;
#[derive(Debug)]
pub struct ResourceMemory {
device_memory: ManuallyDrop<DeviceOwnedDebugWrapper<Arc<DeviceMemory>>>,
offset: DeviceSize,
size: DeviceSize,
allocation_type: AllocationType,
allocation_handle: AllocationHandle,
suballocation_handle: Option<AllocationHandle>,
allocator: Option<Arc<dyn MemoryAllocator>>,
}
impl ResourceMemory {
pub fn new_dedicated(device_memory: DeviceMemory) -> Self {
unsafe { Self::new_dedicated_unchecked(Arc::new(device_memory)) }
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn new_dedicated_unchecked(device_memory: Arc<DeviceMemory>) -> Self {
ResourceMemory {
offset: 0,
size: device_memory.allocation_size(),
allocation_type: AllocationType::Unknown,
allocation_handle: AllocationHandle::null(),
suballocation_handle: None,
allocator: None,
device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(device_memory)),
}
}
#[inline]
pub unsafe fn from_allocation(
allocator: Arc<dyn MemoryAllocator>,
allocation: MemoryAlloc,
) -> Self {
if let Some(suballocation) = allocation.suballocation {
ResourceMemory {
offset: suballocation.offset,
size: suballocation.size,
allocation_type: suballocation.allocation_type,
allocation_handle: allocation.allocation_handle,
suballocation_handle: Some(suballocation.handle),
allocator: Some(allocator),
device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(allocation.device_memory)),
}
} else {
ResourceMemory {
offset: 0,
size: allocation.device_memory.allocation_size(),
allocation_type: AllocationType::Unknown,
allocation_handle: allocation.allocation_handle,
suballocation_handle: None,
allocator: Some(allocator),
device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(allocation.device_memory)),
}
}
}
#[inline]
pub fn device_memory(&self) -> &Arc<DeviceMemory> {
&self.device_memory
}
#[inline]
pub fn offset(&self) -> DeviceSize {
self.offset
}
#[inline]
pub fn size(&self) -> DeviceSize {
self.size
}
#[inline]
pub fn allocation_type(&self) -> AllocationType {
self.allocation_type
}
fn suballocation(&self) -> Option<Suballocation> {
self.suballocation_handle.map(|handle| Suballocation {
offset: self.offset,
size: self.size,
allocation_type: self.allocation_type,
handle,
})
}
#[inline]
pub fn mapped_slice(
&self,
range: impl RangeBounds<DeviceSize>,
) -> Option<Result<NonNull<[u8]>, HostAccessError>> {
let mut range = self::range(range, ..self.size())?;
range.start += self.offset();
range.end += self.offset();
let res = if let Some(state) = self.device_memory().mapping_state() {
state.slice(range).ok_or(HostAccessError::OutOfMappedRange)
} else {
Err(HostAccessError::NotHostMapped)
};
Some(res)
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn mapped_slice_unchecked(
&self,
range: impl RangeBounds<DeviceSize>,
) -> Result<NonNull<[u8]>, HostAccessError> {
let mut range = self::range_unchecked(range, ..self.size());
range.start += self.offset();
range.end += self.offset();
if let Some(state) = self.device_memory().mapping_state() {
state.slice(range).ok_or(HostAccessError::OutOfMappedRange)
} else {
Err(HostAccessError::NotHostMapped)
}
}
pub(crate) fn atom_size(&self) -> Option<DeviceAlignment> {
let memory = self.device_memory();
(!memory.is_coherent()).then_some(memory.atom_size())
}
#[inline]
pub unsafe fn invalidate_range(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), Validated<VulkanError>> {
self.validate_memory_range(&memory_range)?;
self.device_memory()
.invalidate_range(self.create_memory_range(memory_range))
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn invalidate_range_unchecked(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), VulkanError> {
self.device_memory()
.invalidate_range_unchecked(self.create_memory_range(memory_range))
}
#[inline]
pub unsafe fn flush_range(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), Validated<VulkanError>> {
self.validate_memory_range(&memory_range)?;
self.device_memory()
.flush_range(self.create_memory_range(memory_range))
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn flush_range_unchecked(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), VulkanError> {
self.device_memory()
.flush_range_unchecked(self.create_memory_range(memory_range))
}
fn validate_memory_range(
&self,
memory_range: &MappedMemoryRange,
) -> Result<(), Box<ValidationError>> {
let &MappedMemoryRange {
offset,
size,
_ne: _,
} = memory_range;
if !(offset <= self.size() && size <= self.size() - offset) {
return Err(Box::new(ValidationError {
context: "memory_range".into(),
problem: "is not contained within the allocation".into(),
..Default::default()
}));
}
Ok(())
}
fn create_memory_range(&self, memory_range: MappedMemoryRange) -> MappedMemoryRange {
let MappedMemoryRange {
mut offset,
mut size,
_ne: _,
} = memory_range;
let memory = self.device_memory();
offset += self.offset();
if memory_range.offset + size == self.size() {
let end = cmp::min(
align_up(offset + size, memory.atom_size()),
memory.allocation_size(),
);
size = end - offset;
}
MappedMemoryRange {
offset,
size,
_ne: crate::NonExhaustive(()),
}
}
}
impl Drop for ResourceMemory {
#[inline]
fn drop(&mut self) {
let device_memory = unsafe { ManuallyDrop::take(&mut self.device_memory) }.0;
if let Some(allocator) = &self.allocator {
let allocation = MemoryAlloc {
device_memory,
suballocation: self.suballocation(),
allocation_handle: self.allocation_handle,
};
unsafe { allocator.deallocate(allocation) };
}
}
}
unsafe impl DeviceOwned for ResourceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
self.device_memory().device()
}
}
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct MemoryProperties {
pub memory_types: Vec<MemoryType>,
pub memory_heaps: Vec<MemoryHeap>,
}
impl From<ash::vk::PhysicalDeviceMemoryProperties> for MemoryProperties {
#[inline]
fn from(val: ash::vk::PhysicalDeviceMemoryProperties) -> Self {
Self {
memory_types: val.memory_types[0..val.memory_type_count as usize]
.iter()
.map(|vk_memory_type| MemoryType {
property_flags: vk_memory_type.property_flags.into(),
heap_index: vk_memory_type.heap_index,
})
.collect(),
memory_heaps: val.memory_heaps[0..val.memory_heap_count as usize]
.iter()
.map(|vk_memory_heap| MemoryHeap {
size: vk_memory_heap.size,
flags: vk_memory_heap.flags.into(),
})
.collect(),
}
}
}
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct MemoryType {
pub property_flags: MemoryPropertyFlags,
pub heap_index: u32,
}
vulkan_bitflags! {
#[non_exhaustive]
MemoryPropertyFlags = MemoryPropertyFlags(u32);
DEVICE_LOCAL = DEVICE_LOCAL,
HOST_VISIBLE = HOST_VISIBLE,
HOST_COHERENT = HOST_COHERENT,
HOST_CACHED = HOST_CACHED,
LAZILY_ALLOCATED = LAZILY_ALLOCATED,
PROTECTED = PROTECTED
RequiresOneOf([
RequiresAllOf([APIVersion(V1_1)]),
]),
DEVICE_COHERENT = DEVICE_COHERENT_AMD
RequiresOneOf([
RequiresAllOf([DeviceExtension(amd_device_coherent_memory)]),
]),
DEVICE_UNCACHED = DEVICE_UNCACHED_AMD
RequiresOneOf([
RequiresAllOf([DeviceExtension(amd_device_coherent_memory)]),
]),
RDMA_CAPABLE = RDMA_CAPABLE_NV
RequiresOneOf([
RequiresAllOf([DeviceExtension(nv_external_memory_rdma)]),
]),
}
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct MemoryHeap {
pub size: DeviceSize,
pub flags: MemoryHeapFlags,
}
vulkan_bitflags! {
#[non_exhaustive]
MemoryHeapFlags = MemoryHeapFlags(u32);
DEVICE_LOCAL = DEVICE_LOCAL,
MULTI_INSTANCE = MULTI_INSTANCE
RequiresOneOf([
RequiresAllOf([APIVersion(V1_1)]),
RequiresAllOf([InstanceExtension(khr_device_group_creation)]),
]),
}
#[derive(Clone, Copy, Debug)]
pub struct MemoryRequirements {
pub layout: DeviceLayout,
pub memory_type_bits: u32,
pub prefers_dedicated_allocation: bool,
pub requires_dedicated_allocation: bool,
}
#[derive(Clone, Copy, Debug)]
pub enum DedicatedAllocation<'a> {
Buffer(&'a RawBuffer),
Image(&'a RawImage),
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum DedicatedTo {
Buffer(NonZeroU64),
Image(NonZeroU64),
}
impl From<DedicatedAllocation<'_>> for DedicatedTo {
fn from(dedicated_allocation: DedicatedAllocation<'_>) -> Self {
match dedicated_allocation {
DedicatedAllocation::Buffer(buffer) => Self::Buffer(buffer.id()),
DedicatedAllocation::Image(image) => Self::Image(image.id()),
}
}
}
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
pub struct ExternalMemoryProperties {
pub dedicated_only: bool,
pub exportable: bool,
pub importable: bool,
pub export_from_imported_handle_types: ExternalMemoryHandleTypes,
pub compatible_handle_types: ExternalMemoryHandleTypes,
}
impl From<ash::vk::ExternalMemoryProperties> for ExternalMemoryProperties {
#[inline]
fn from(val: ash::vk::ExternalMemoryProperties) -> Self {
Self {
dedicated_only: val
.external_memory_features
.intersects(ash::vk::ExternalMemoryFeatureFlags::DEDICATED_ONLY),
exportable: val
.external_memory_features
.intersects(ash::vk::ExternalMemoryFeatureFlags::EXPORTABLE),
importable: val
.external_memory_features
.intersects(ash::vk::ExternalMemoryFeatureFlags::IMPORTABLE),
export_from_imported_handle_types: val.export_from_imported_handle_types.into(),
compatible_handle_types: val.compatible_handle_types.into(),
}
}
}
#[derive(Clone, Debug)]
pub struct BindSparseInfo {
pub wait_semaphores: Vec<Arc<Semaphore>>,
pub buffer_binds: Vec<(Subbuffer<[u8]>, Vec<SparseBufferMemoryBind>)>,
pub image_opaque_binds: Vec<(Arc<Image>, Vec<SparseImageOpaqueMemoryBind>)>,
pub image_binds: Vec<(Arc<Image>, Vec<SparseImageMemoryBind>)>,
pub signal_semaphores: Vec<Arc<Semaphore>>,
pub _ne: crate::NonExhaustive,
}
impl Default for BindSparseInfo {
#[inline]
fn default() -> Self {
Self {
wait_semaphores: Vec::new(),
buffer_binds: Vec::new(),
image_opaque_binds: Vec::new(),
image_binds: Vec::new(),
signal_semaphores: Vec::new(),
_ne: crate::NonExhaustive(()),
}
}
}
#[derive(Clone, Debug, Default)]
pub struct SparseBufferMemoryBind {
pub offset: DeviceSize,
pub size: DeviceSize,
pub memory: Option<(Arc<DeviceMemory>, DeviceSize)>,
}
#[derive(Clone, Debug, Default)]
pub struct SparseImageOpaqueMemoryBind {
pub offset: DeviceSize,
pub size: DeviceSize,
pub memory: Option<(Arc<DeviceMemory>, DeviceSize)>,
pub metadata: bool,
}
#[derive(Clone, Debug, Default)]
pub struct SparseImageMemoryBind {
pub aspects: ImageAspects,
pub mip_level: u32,
pub array_layer: u32,
pub offset: [u32; 3],
pub extent: [u32; 3],
pub memory: Option<(Arc<DeviceMemory>, DeviceSize)>,
}
#[inline(always)]
pub(crate) fn is_aligned(offset: DeviceSize, alignment: DeviceAlignment) -> bool {
offset & (alignment.as_devicesize() - 1) == 0
}
pub(crate) fn range(
range: impl RangeBounds<DeviceSize>,
bounds: RangeTo<DeviceSize>,
) -> Option<Range<DeviceSize>> {
let len = bounds.end;
let start = match range.start_bound() {
Bound::Included(&start) => start,
Bound::Excluded(start) => start.checked_add(1)?,
Bound::Unbounded => 0,
};
let end = match range.end_bound() {
Bound::Included(end) => end.checked_add(1)?,
Bound::Excluded(&end) => end,
Bound::Unbounded => len,
};
(start <= end && end <= len).then_some(Range { start, end })
}
pub(crate) fn range_unchecked(
range: impl RangeBounds<DeviceSize>,
bounds: RangeTo<DeviceSize>,
) -> Range<DeviceSize> {
let len = bounds.end;
let start = match range.start_bound() {
Bound::Included(&start) => start,
Bound::Excluded(start) => start + 1,
Bound::Unbounded => 0,
};
let end = match range.end_bound() {
Bound::Included(end) => end + 1,
Bound::Excluded(&end) => end,
Bound::Unbounded => len,
};
debug_assert!(start <= end && end <= len);
Range { start, end }
}