pub use self::{subbuffer::*, sys::*, usage::*};
use crate::{
device::{physical::PhysicalDevice, Device, DeviceOwned},
macros::{vulkan_bitflags, vulkan_enum},
memory::{
allocator::{
AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator,
MemoryAllocatorError,
},
DedicatedAllocation, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
ExternalMemoryProperties, MemoryRequirements, ResourceMemory,
},
range_map::RangeMap,
sync::{future::AccessError, AccessConflict, CurrentAccess, Sharing},
DeviceSize, NonNullDeviceAddress, NonZeroDeviceSize, Requires, RequiresAllOf, RequiresOneOf,
Validated, ValidationError, Version, VulkanError, VulkanObject,
};
use parking_lot::{Mutex, MutexGuard};
use smallvec::SmallVec;
use std::{
error::Error,
fmt::{Display, Formatter},
hash::{Hash, Hasher},
ops::Range,
sync::Arc,
};
pub mod allocator;
pub mod subbuffer;
pub mod sys;
mod usage;
pub mod view;
#[derive(Debug)]
pub struct Buffer {
inner: RawBuffer,
memory: BufferMemory,
state: Mutex<BufferState>,
}
#[derive(Debug)]
pub enum BufferMemory {
Normal(ResourceMemory),
Sparse,
}
impl Buffer {
pub fn from_data<T>(
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
data: T,
) -> Result<Subbuffer<T>, Validated<AllocateBufferError>>
where
T: BufferContents,
{
let buffer = Buffer::new_sized(allocator, create_info, allocation_info)?;
{
let mut write_guard = buffer.write().unwrap();
*write_guard = data;
}
Ok(buffer)
}
pub fn from_iter<T, I>(
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
iter: I,
) -> Result<Subbuffer<[T]>, Validated<AllocateBufferError>>
where
T: BufferContents,
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
{
let iter = iter.into_iter();
let buffer = Buffer::new_slice(
allocator,
create_info,
allocation_info,
iter.len().try_into().unwrap(),
)?;
{
let mut write_guard = buffer.write().unwrap();
for (o, i) in write_guard.iter_mut().zip(iter) {
*o = i;
}
}
Ok(buffer)
}
pub fn new_sized<T>(
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
) -> Result<Subbuffer<T>, Validated<AllocateBufferError>>
where
T: BufferContents,
{
let layout = T::LAYOUT.unwrap_sized();
let buffer = Subbuffer::new(Buffer::new(
allocator,
create_info,
allocation_info,
layout,
)?);
Ok(unsafe { buffer.reinterpret_unchecked() })
}
pub fn new_slice<T>(
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
len: DeviceSize,
) -> Result<Subbuffer<[T]>, Validated<AllocateBufferError>>
where
T: BufferContents,
{
Buffer::new_unsized(allocator, create_info, allocation_info, len)
}
pub fn new_unsized<T>(
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
len: DeviceSize,
) -> Result<Subbuffer<T>, Validated<AllocateBufferError>>
where
T: BufferContents + ?Sized,
{
let len = NonZeroDeviceSize::new(len).expect("empty slices are not valid buffer contents");
let layout = T::LAYOUT.layout_for_len(len).unwrap();
let buffer = Subbuffer::new(Buffer::new(
allocator,
create_info,
allocation_info,
layout,
)?);
Ok(unsafe { buffer.reinterpret_unchecked() })
}
pub fn new(
allocator: Arc<dyn MemoryAllocator>,
mut create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
layout: DeviceLayout,
) -> Result<Arc<Self>, Validated<AllocateBufferError>> {
assert!(layout.alignment().as_devicesize() <= 64);
assert_eq!(
create_info.size, 0,
"`Buffer::new*` functions set the `create_info.size` field themselves, you should not \
set it yourself"
);
create_info.size = layout.size();
let raw_buffer =
RawBuffer::new(allocator.device().clone(), create_info).map_err(|err| match err {
Validated::Error(err) => Validated::Error(AllocateBufferError::CreateBuffer(err)),
Validated::ValidationError(err) => err.into(),
})?;
let mut requirements = *raw_buffer.memory_requirements();
requirements.layout = requirements.layout.align_to(layout.alignment()).unwrap();
let allocation = allocator
.allocate(
requirements,
AllocationType::Linear,
allocation_info,
Some(DedicatedAllocation::Buffer(&raw_buffer)),
)
.map_err(AllocateBufferError::AllocateMemory)?;
let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) };
let buffer = raw_buffer.bind_memory(allocation).map_err(|(err, _, _)| {
err.map(AllocateBufferError::BindMemory)
.map_validation(|err| err.add_context("RawBuffer::bind_memory"))
})?;
Ok(Arc::new(buffer))
}
fn from_raw(inner: RawBuffer, memory: BufferMemory) -> Self {
let state = Mutex::new(BufferState::new(inner.size()));
Buffer {
inner,
memory,
state,
}
}
#[inline]
pub fn memory(&self) -> &BufferMemory {
&self.memory
}
#[inline]
pub fn memory_requirements(&self) -> &MemoryRequirements {
self.inner.memory_requirements()
}
#[inline]
pub fn flags(&self) -> BufferCreateFlags {
self.inner.flags()
}
#[inline]
pub fn size(&self) -> DeviceSize {
self.inner.size()
}
#[inline]
pub fn usage(&self) -> BufferUsage {
self.inner.usage()
}
#[inline]
pub fn sharing(&self) -> &Sharing<SmallVec<[u32; 4]>> {
self.inner.sharing()
}
#[inline]
pub fn external_memory_handle_types(&self) -> ExternalMemoryHandleTypes {
self.inner.external_memory_handle_types()
}
pub fn device_address(&self) -> Result<NonNullDeviceAddress, Box<ValidationError>> {
self.validate_device_address()?;
unsafe { Ok(self.device_address_unchecked()) }
}
fn validate_device_address(&self) -> Result<(), Box<ValidationError>> {
let device = self.device();
if !device.enabled_features().buffer_device_address {
return Err(Box::new(ValidationError {
requires_one_of: RequiresOneOf(&[RequiresAllOf(&[Requires::Feature(
"buffer_device_address",
)])]),
vuids: &["VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324"],
..Default::default()
}));
}
if !self.usage().intersects(BufferUsage::SHADER_DEVICE_ADDRESS) {
return Err(Box::new(ValidationError {
context: "self.usage()".into(),
problem: "does not contain `BufferUsage::SHADER_DEVICE_ADDRESS`".into(),
vuids: &["VUID-VkBufferDeviceAddressInfo-buffer-02601"],
..Default::default()
}));
}
Ok(())
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn device_address_unchecked(&self) -> NonNullDeviceAddress {
let device = self.device();
let info_vk = ash::vk::BufferDeviceAddressInfo {
buffer: self.handle(),
..Default::default()
};
let ptr = {
let fns = device.fns();
let f = if device.api_version() >= Version::V1_2 {
fns.v1_2.get_buffer_device_address
} else if device.enabled_extensions().khr_buffer_device_address {
fns.khr_buffer_device_address.get_buffer_device_address_khr
} else {
fns.ext_buffer_device_address.get_buffer_device_address_ext
};
f(device.handle(), &info_vk)
};
NonNullDeviceAddress::new(ptr).unwrap()
}
pub(crate) fn state(&self) -> MutexGuard<'_, BufferState> {
self.state.lock()
}
}
unsafe impl VulkanObject for Buffer {
type Handle = ash::vk::Buffer;
#[inline]
fn handle(&self) -> Self::Handle {
self.inner.handle()
}
}
unsafe impl DeviceOwned for Buffer {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
impl PartialEq for Buffer {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl Eq for Buffer {}
impl Hash for Buffer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner.hash(state);
}
}
#[derive(Clone, Debug)]
pub enum AllocateBufferError {
CreateBuffer(VulkanError),
AllocateMemory(MemoryAllocatorError),
BindMemory(VulkanError),
}
impl Error for AllocateBufferError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::CreateBuffer(err) => Some(err),
Self::AllocateMemory(err) => Some(err),
Self::BindMemory(err) => Some(err),
}
}
}
impl Display for AllocateBufferError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::CreateBuffer(_) => write!(f, "creating the buffer failed"),
Self::AllocateMemory(_) => write!(f, "allocating memory for the buffer failed"),
Self::BindMemory(_) => write!(f, "binding memory to the buffer failed"),
}
}
}
impl From<AllocateBufferError> for Validated<AllocateBufferError> {
fn from(err: AllocateBufferError) -> Self {
Self::Error(err)
}
}
#[derive(Debug)]
pub(crate) struct BufferState {
ranges: RangeMap<DeviceSize, BufferRangeState>,
}
impl BufferState {
fn new(size: DeviceSize) -> Self {
BufferState {
ranges: [(
0..size,
BufferRangeState {
current_access: CurrentAccess::Shared {
cpu_reads: 0,
gpu_reads: 0,
},
},
)]
.into_iter()
.collect(),
}
}
pub(crate) fn check_cpu_read(&self, range: Range<DeviceSize>) -> Result<(), AccessConflict> {
for (_range, state) in self.ranges.range(&range) {
match &state.current_access {
CurrentAccess::CpuExclusive { .. } => return Err(AccessConflict::HostWrite),
CurrentAccess::GpuExclusive { .. } => return Err(AccessConflict::DeviceWrite),
CurrentAccess::Shared { .. } => (),
}
}
Ok(())
}
pub(crate) unsafe fn cpu_read_lock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
CurrentAccess::Shared { cpu_reads, .. } => {
*cpu_reads += 1;
}
_ => unreachable!("Buffer is being written by the CPU or GPU"),
}
}
}
pub(crate) unsafe fn cpu_read_unlock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
CurrentAccess::Shared { cpu_reads, .. } => *cpu_reads -= 1,
_ => unreachable!("Buffer was not locked for CPU read"),
}
}
}
pub(crate) fn check_cpu_write(&self, range: Range<DeviceSize>) -> Result<(), AccessConflict> {
for (_range, state) in self.ranges.range(&range) {
match &state.current_access {
CurrentAccess::CpuExclusive => return Err(AccessConflict::HostWrite),
CurrentAccess::GpuExclusive { .. } => return Err(AccessConflict::DeviceWrite),
CurrentAccess::Shared {
cpu_reads: 0,
gpu_reads: 0,
} => (),
CurrentAccess::Shared { cpu_reads, .. } if *cpu_reads > 0 => {
return Err(AccessConflict::HostRead);
}
CurrentAccess::Shared { .. } => return Err(AccessConflict::DeviceRead),
}
}
Ok(())
}
pub(crate) unsafe fn cpu_write_lock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
state.current_access = CurrentAccess::CpuExclusive;
}
}
pub(crate) unsafe fn cpu_write_unlock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
CurrentAccess::CpuExclusive => {
state.current_access = CurrentAccess::Shared {
cpu_reads: 0,
gpu_reads: 0,
}
}
_ => unreachable!("Buffer was not locked for CPU write"),
}
}
}
pub(crate) fn check_gpu_read(&self, range: Range<DeviceSize>) -> Result<(), AccessError> {
for (_range, state) in self.ranges.range(&range) {
match &state.current_access {
CurrentAccess::Shared { .. } => (),
_ => return Err(AccessError::AlreadyInUse),
}
}
Ok(())
}
pub(crate) unsafe fn gpu_read_lock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
CurrentAccess::GpuExclusive { gpu_reads, .. }
| CurrentAccess::Shared { gpu_reads, .. } => *gpu_reads += 1,
_ => unreachable!("Buffer is being written by the CPU"),
}
}
}
pub(crate) unsafe fn gpu_read_unlock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
CurrentAccess::GpuExclusive { gpu_reads, .. } => *gpu_reads -= 1,
CurrentAccess::Shared { gpu_reads, .. } => *gpu_reads -= 1,
_ => unreachable!("Buffer was not locked for GPU read"),
}
}
}
pub(crate) fn check_gpu_write(&self, range: Range<DeviceSize>) -> Result<(), AccessError> {
for (_range, state) in self.ranges.range(&range) {
match &state.current_access {
CurrentAccess::Shared {
cpu_reads: 0,
gpu_reads: 0,
} => (),
_ => return Err(AccessError::AlreadyInUse),
}
}
Ok(())
}
pub(crate) unsafe fn gpu_write_lock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
CurrentAccess::GpuExclusive { gpu_writes, .. } => *gpu_writes += 1,
&mut CurrentAccess::Shared {
cpu_reads: 0,
gpu_reads,
} => {
state.current_access = CurrentAccess::GpuExclusive {
gpu_reads,
gpu_writes: 1,
}
}
_ => unreachable!("Buffer is being accessed by the CPU"),
}
}
}
pub(crate) unsafe fn gpu_write_unlock(&mut self, range: Range<DeviceSize>) {
self.ranges.split_at(&range.start);
self.ranges.split_at(&range.end);
for (_range, state) in self.ranges.range_mut(&range) {
match &mut state.current_access {
&mut CurrentAccess::GpuExclusive {
gpu_reads,
gpu_writes: 1,
} => {
state.current_access = CurrentAccess::Shared {
cpu_reads: 0,
gpu_reads,
}
}
CurrentAccess::GpuExclusive { gpu_writes, .. } => *gpu_writes -= 1,
_ => unreachable!("Buffer was not locked for GPU write"),
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct BufferRangeState {
current_access: CurrentAccess,
}
vulkan_bitflags! {
#[non_exhaustive]
BufferCreateFlags = BufferCreateFlags(u32);
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ExternalBufferInfo {
pub flags: BufferCreateFlags,
pub usage: BufferUsage,
pub handle_type: ExternalMemoryHandleType,
pub _ne: crate::NonExhaustive,
}
impl ExternalBufferInfo {
#[inline]
pub fn handle_type(handle_type: ExternalMemoryHandleType) -> Self {
Self {
flags: BufferCreateFlags::empty(),
usage: BufferUsage::empty(),
handle_type,
_ne: crate::NonExhaustive(()),
}
}
pub(crate) fn validate(
&self,
physical_device: &PhysicalDevice,
) -> Result<(), Box<ValidationError>> {
let &Self {
flags,
usage,
handle_type,
_ne: _,
} = self;
flags
.validate_physical_device(physical_device)
.map_err(|err| {
err.add_context("flags")
.set_vuids(&["VUID-VkPhysicalDeviceExternalBufferInfo-flags-parameter"])
})?;
usage
.validate_physical_device(physical_device)
.map_err(|err| {
err.add_context("usage")
.set_vuids(&["VUID-VkPhysicalDeviceExternalBufferInfo-usage-parameter"])
})?;
if usage.is_empty() {
return Err(Box::new(ValidationError {
context: "usage".into(),
problem: "is empty".into(),
vuids: &["VUID-VkPhysicalDeviceExternalBufferInfo-usage-requiredbitmask"],
..Default::default()
}));
}
handle_type
.validate_physical_device(physical_device)
.map_err(|err| {
err.add_context("handle_type")
.set_vuids(&["VUID-VkPhysicalDeviceExternalBufferInfo-handleType-parameter"])
})?;
Ok(())
}
}
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct ExternalBufferProperties {
pub external_memory_properties: ExternalMemoryProperties,
}
vulkan_enum! {
#[non_exhaustive]
IndexType = IndexType(i32);
U8 = UINT8_EXT
RequiresOneOf([
RequiresAllOf([DeviceExtension(ext_index_type_uint8)]),
]),
U16 = UINT16,
U32 = UINT32,
}
impl IndexType {
#[inline]
pub fn size(self) -> DeviceSize {
match self {
IndexType::U8 => 1,
IndexType::U16 => 2,
IndexType::U32 => 4,
}
}
}
#[derive(Clone, Debug)]
pub enum IndexBuffer {
U8(Subbuffer<[u8]>),
U16(Subbuffer<[u16]>),
U32(Subbuffer<[u32]>),
}
impl IndexBuffer {
#[inline]
pub fn index_type(&self) -> IndexType {
match self {
Self::U8(_) => IndexType::U8,
Self::U16(_) => IndexType::U16,
Self::U32(_) => IndexType::U32,
}
}
#[inline]
pub fn as_bytes(&self) -> &Subbuffer<[u8]> {
match self {
IndexBuffer::U8(buffer) => buffer.as_bytes(),
IndexBuffer::U16(buffer) => buffer.as_bytes(),
IndexBuffer::U32(buffer) => buffer.as_bytes(),
}
}
#[inline]
pub fn len(&self) -> DeviceSize {
match self {
IndexBuffer::U8(buffer) => buffer.len(),
IndexBuffer::U16(buffer) => buffer.len(),
IndexBuffer::U32(buffer) => buffer.len(),
}
}
}
impl From<Subbuffer<[u8]>> for IndexBuffer {
#[inline]
fn from(value: Subbuffer<[u8]>) -> Self {
Self::U8(value)
}
}
impl From<Subbuffer<[u16]>> for IndexBuffer {
#[inline]
fn from(value: Subbuffer<[u16]>) -> Self {
Self::U16(value)
}
}
impl From<Subbuffer<[u32]>> for IndexBuffer {
#[inline]
fn from(value: Subbuffer<[u32]>) -> Self {
Self::U32(value)
}
}