use super::{allocator::Arena, Buffer, BufferMemory};
use crate::{
device::{Device, DeviceOwned, DeviceOwnedDebugWrapper},
macros::try_opt,
memory::{
self,
allocator::{align_down, align_up, DeviceLayout},
is_aligned, DeviceAlignment, MappedMemoryRange,
},
sync::HostAccessError,
DeviceSize, NonNullDeviceAddress, NonZeroDeviceSize, ValidationError,
};
use bytemuck::AnyBitPattern;
use std::{
alloc::Layout,
cmp,
hash::{Hash, Hasher},
marker::PhantomData,
mem::{self, align_of, size_of},
ops::{Deref, DerefMut, Range, RangeBounds},
ptr::{self, NonNull},
sync::Arc,
thread,
};
#[cfg(feature = "macros")]
pub use vulkano_macros::BufferContents;
#[derive(Debug)]
#[repr(C)]
pub struct Subbuffer<T: ?Sized> {
offset: DeviceSize,
size: DeviceSize,
parent: SubbufferParent,
marker: PhantomData<Arc<T>>,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
enum SubbufferParent {
Arena(Arc<Arena>),
Buffer(DeviceOwnedDebugWrapper<Arc<Buffer>>),
}
impl<T: ?Sized> Subbuffer<T> {
pub(super) fn from_arena(arena: Arc<Arena>, offset: DeviceSize, size: DeviceSize) -> Self {
Subbuffer {
offset,
size,
parent: SubbufferParent::Arena(arena),
marker: PhantomData,
}
}
pub fn offset(&self) -> DeviceSize {
self.offset
}
fn memory_offset(&self) -> DeviceSize {
let allocation = match self.buffer().memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => unreachable!(),
};
allocation.offset() + self.offset
}
pub fn size(&self) -> DeviceSize {
self.size
}
pub(crate) fn range(&self) -> Range<DeviceSize> {
self.offset..self.offset + self.size
}
pub fn buffer(&self) -> &Arc<Buffer> {
match &self.parent {
SubbufferParent::Arena(arena) => arena.buffer(),
SubbufferParent::Buffer(buffer) => buffer,
}
}
pub fn mapped_slice(&self) -> Result<NonNull<[u8]>, HostAccessError> {
match self.buffer().memory() {
BufferMemory::Normal(allocation) => {
unsafe { allocation.mapped_slice_unchecked(self.range()) }
}
BufferMemory::Sparse => unreachable!(),
}
}
pub fn device_address(&self) -> Result<NonNullDeviceAddress, Box<ValidationError>> {
self.buffer().device_address().map(|ptr| {
unsafe { NonNullDeviceAddress::new_unchecked(ptr.get() + self.offset) }
})
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn device_address_unchecked(&self) -> NonNullDeviceAddress {
NonNullDeviceAddress::new_unchecked(
self.buffer().device_address_unchecked().get() + self.offset,
)
}
pub fn into_bytes(self) -> Subbuffer<[u8]> {
unsafe { self.reinterpret_unchecked_inner() }
}
pub fn as_bytes(&self) -> &Subbuffer<[u8]> {
unsafe { self.reinterpret_unchecked_ref_inner() }
}
#[inline(always)]
unsafe fn reinterpret_unchecked_inner<U: ?Sized>(self) -> Subbuffer<U> {
mem::transmute::<Subbuffer<T>, Subbuffer<U>>(self)
}
#[inline(always)]
unsafe fn reinterpret_unchecked_ref_inner<U: ?Sized>(&self) -> &Subbuffer<U> {
assert!(size_of::<Subbuffer<T>>() == size_of::<Subbuffer<U>>());
assert!(align_of::<Subbuffer<T>>() == align_of::<Subbuffer<U>>());
mem::transmute::<&Subbuffer<T>, &Subbuffer<U>>(self)
}
}
impl<T> Subbuffer<T>
where
T: BufferContents + ?Sized,
{
pub fn reinterpret<U>(self) -> Subbuffer<U>
where
U: BufferContents + ?Sized,
{
self.validate_reinterpret(U::LAYOUT);
unsafe { self.reinterpret_unchecked_inner() }
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn reinterpret_unchecked<U>(self) -> Subbuffer<U>
where
U: BufferContents + ?Sized,
{
#[cfg(debug_assertions)]
self.validate_reinterpret(U::LAYOUT);
self.reinterpret_unchecked_inner()
}
pub fn reinterpret_ref<U>(&self) -> &Subbuffer<U>
where
U: BufferContents + ?Sized,
{
self.validate_reinterpret(U::LAYOUT);
unsafe { self.reinterpret_unchecked_ref_inner() }
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn reinterpret_unchecked_ref<U>(&self) -> &Subbuffer<U>
where
U: BufferContents + ?Sized,
{
#[cfg(debug_assertions)]
self.validate_reinterpret(U::LAYOUT);
self.reinterpret_unchecked_ref_inner()
}
fn validate_reinterpret(&self, new_layout: BufferContentsLayout) {
assert!(is_aligned(self.memory_offset(), new_layout.alignment()));
if new_layout.is_sized() {
assert!(self.size == new_layout.unwrap_sized().size());
} else {
assert!(self.size > new_layout.head_size());
assert!((self.size - new_layout.head_size()) % new_layout.element_size().unwrap() == 0);
assert!(is_aligned(self.size(), new_layout.alignment()));
}
}
pub fn read(&self) -> Result<BufferReadGuard<'_, T>, HostAccessError> {
let allocation = match self.buffer().memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => todo!("`Subbuffer::read` doesn't support sparse binding yet"),
};
let range = if let Some(atom_size) = allocation.atom_size() {
let start = align_down(self.offset, atom_size);
let end = cmp::min(
align_up(self.offset + self.size, atom_size),
allocation.size(),
);
Range { start, end }
} else {
self.range()
};
let mut state = self.buffer().state();
state
.check_cpu_read(range.clone())
.map_err(HostAccessError::AccessConflict)?;
unsafe { state.cpu_read_lock(range.clone()) };
let mapped_slice = self.mapped_slice()?;
if allocation.atom_size().is_some() {
let memory_range = MappedMemoryRange {
offset: range.start,
size: range.end - range.start,
_ne: crate::NonExhaustive(()),
};
unsafe { allocation.invalidate_range_unchecked(memory_range) }
.map_err(HostAccessError::Invalidate)?;
}
let data = unsafe { &*T::ptr_from_slice(mapped_slice) };
Ok(BufferReadGuard {
subbuffer: self,
data,
range,
})
}
pub fn write(&self) -> Result<BufferWriteGuard<'_, T>, HostAccessError> {
let allocation = match self.buffer().memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => todo!("`Subbuffer::write` doesn't support sparse binding yet"),
};
let range = if let Some(atom_size) = allocation.atom_size() {
let start = align_down(self.offset, atom_size);
let end = cmp::min(
align_up(self.offset + self.size, atom_size),
allocation.size(),
);
Range { start, end }
} else {
self.range()
};
let mut state = self.buffer().state();
state
.check_cpu_write(range.clone())
.map_err(HostAccessError::AccessConflict)?;
unsafe { state.cpu_write_lock(range.clone()) };
let mapped_slice = self.mapped_slice()?;
if allocation.atom_size().is_some() {
let memory_range = MappedMemoryRange {
offset: range.start,
size: range.end - range.start,
_ne: crate::NonExhaustive(()),
};
unsafe { allocation.invalidate_range_unchecked(memory_range) }
.map_err(HostAccessError::Invalidate)?;
}
let data = unsafe { &mut *T::ptr_from_slice(mapped_slice) };
Ok(BufferWriteGuard {
subbuffer: self,
data,
range,
})
}
}
impl<T> Subbuffer<T> {
pub fn into_slice(self) -> Subbuffer<[T]> {
unsafe { self.reinterpret_unchecked_inner() }
}
pub fn as_slice(&self) -> &Subbuffer<[T]> {
unsafe { self.reinterpret_unchecked_ref_inner() }
}
}
impl<T> Subbuffer<[T]> {
pub fn len(&self) -> DeviceSize {
debug_assert!(self.size % size_of::<T>() as DeviceSize == 0);
self.size / size_of::<T>() as DeviceSize
}
pub fn index(self, index: DeviceSize) -> Subbuffer<T> {
assert!(index <= self.len());
unsafe { self.index_unchecked(index) }
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn index_unchecked(self, index: DeviceSize) -> Subbuffer<T> {
Subbuffer {
offset: self.offset + index * size_of::<T>() as DeviceSize,
size: size_of::<T>() as DeviceSize,
parent: self.parent,
marker: PhantomData,
}
}
pub fn slice(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
let Range { start, end } = memory::range(range, ..self.len()).unwrap();
self.offset += start * size_of::<T>() as DeviceSize;
self.size = (end - start) * size_of::<T>() as DeviceSize;
assert!(self.size != 0);
self
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn slice_unchecked(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
let Range { start, end } = memory::range_unchecked(range, ..self.len());
self.offset += start * size_of::<T>() as DeviceSize;
self.size = (end - start) * size_of::<T>() as DeviceSize;
debug_assert!(self.size != 0);
self
}
pub fn split_at(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
assert!(0 < mid && mid < self.len());
unsafe { self.split_at_unchecked(mid) }
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn split_at_unchecked(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
(
self.clone().slice_unchecked(..mid),
self.slice_unchecked(mid..),
)
}
}
impl Subbuffer<[u8]> {
#[inline]
pub fn new(buffer: Arc<Buffer>) -> Self {
Subbuffer {
offset: 0,
size: buffer.size(),
parent: SubbufferParent::Buffer(DeviceOwnedDebugWrapper(buffer)),
marker: PhantomData,
}
}
pub fn cast_aligned<T>(self) -> Subbuffer<[T]>
where
T: BufferContents,
{
let layout = DeviceLayout::from_layout(Layout::new::<T>()).unwrap();
let aligned = self.align_to(layout);
unsafe { aligned.reinterpret_unchecked() }
}
#[inline]
pub fn align_to(mut self, layout: DeviceLayout) -> Subbuffer<[u8]> {
assert!(layout.alignment().as_devicesize() <= 64);
let offset = self.memory_offset();
let padding_front = align_up(offset, layout.alignment()) - offset;
self.offset += padding_front;
self.size = self.size.checked_sub(padding_front).unwrap();
self.size -= self.size % layout.size();
self
}
}
impl From<Arc<Buffer>> for Subbuffer<[u8]> {
#[inline]
fn from(buffer: Arc<Buffer>) -> Self {
Self::new(buffer)
}
}
impl<T: ?Sized> Clone for Subbuffer<T> {
fn clone(&self) -> Self {
Subbuffer {
parent: self.parent.clone(),
..*self
}
}
}
unsafe impl<T: ?Sized> DeviceOwned for Subbuffer<T> {
fn device(&self) -> &Arc<Device> {
self.buffer().device()
}
}
impl<T: ?Sized> PartialEq for Subbuffer<T> {
fn eq(&self, other: &Self) -> bool {
self.parent == other.parent && self.offset == other.offset && self.size == other.size
}
}
impl<T: ?Sized> Eq for Subbuffer<T> {}
impl<T: ?Sized> Hash for Subbuffer<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.parent.hash(state);
self.offset.hash(state);
self.size.hash(state);
}
}
#[derive(Debug)]
pub struct BufferReadGuard<'a, T: ?Sized> {
subbuffer: &'a Subbuffer<T>,
data: &'a T,
range: Range<DeviceSize>,
}
impl<T: ?Sized> Drop for BufferReadGuard<'_, T> {
fn drop(&mut self) {
let mut state = self.subbuffer.buffer().state();
unsafe { state.cpu_read_unlock(self.range.clone()) };
}
}
impl<T: ?Sized> Deref for BufferReadGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.data
}
}
#[derive(Debug)]
pub struct BufferWriteGuard<'a, T: ?Sized> {
subbuffer: &'a Subbuffer<T>,
data: &'a mut T,
range: Range<DeviceSize>,
}
impl<T: ?Sized> Drop for BufferWriteGuard<'_, T> {
fn drop(&mut self) {
let allocation = match self.subbuffer.buffer().memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => unreachable!(),
};
if allocation.atom_size().is_some() && !thread::panicking() {
let memory_range = MappedMemoryRange {
offset: self.range.start,
size: self.range.end - self.range.start,
_ne: crate::NonExhaustive(()),
};
unsafe { allocation.flush_range_unchecked(memory_range).unwrap() };
}
let mut state = self.subbuffer.buffer().state();
unsafe { state.cpu_write_unlock(self.range.clone()) };
}
}
impl<T: ?Sized> Deref for BufferWriteGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.data
}
}
impl<T: ?Sized> DerefMut for BufferWriteGuard<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data
}
}
pub unsafe trait BufferContents: Send + Sync + 'static {
const LAYOUT: BufferContentsLayout;
#[doc(hidden)]
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self;
}
unsafe impl<T> BufferContents for T
where
T: AnyBitPattern + Send + Sync,
{
const LAYOUT: BufferContentsLayout =
if let Some(layout) = BufferContentsLayout::from_sized(Layout::new::<T>()) {
layout
} else {
panic!("zero-sized types are not valid buffer contents");
};
#[inline(always)]
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
debug_assert!(slice.len() == size_of::<T>());
<*mut [u8]>::cast::<T>(slice.as_ptr())
}
}
unsafe impl<T> BufferContents for [T]
where
T: BufferContents,
{
const LAYOUT: BufferContentsLayout = BufferContentsLayout(BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout: T::LAYOUT.unwrap_sized(),
});
#[inline(always)]
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
let data = <*mut [u8]>::cast::<T>(slice.as_ptr());
let len = slice.len() / size_of::<T>();
debug_assert!(slice.len() % size_of::<T>() == 0);
ptr::slice_from_raw_parts_mut(data, len)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct BufferContentsLayout(BufferContentsLayoutInner);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum BufferContentsLayoutInner {
Sized(DeviceLayout),
Unsized {
head_layout: Option<DeviceLayout>,
element_layout: DeviceLayout,
},
}
impl BufferContentsLayout {
#[inline]
pub const fn head_size(&self) -> DeviceSize {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => sized.size(),
BufferContentsLayoutInner::Unsized {
head_layout: None, ..
} => 0,
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
..
} => head_layout.size(),
}
}
#[inline]
pub const fn element_size(&self) -> Option<DeviceSize> {
match &self.0 {
BufferContentsLayoutInner::Sized(_) => None,
BufferContentsLayoutInner::Unsized { element_layout, .. } => {
Some(element_layout.size())
}
}
}
#[inline]
pub const fn alignment(&self) -> DeviceAlignment {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => sized.alignment(),
BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout,
} => element_layout.alignment(),
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
..
} => head_layout.alignment(),
}
}
#[inline]
pub const fn layout_for_len(&self, len: NonZeroDeviceSize) -> Option<DeviceLayout> {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => Some(*sized),
BufferContentsLayoutInner::Unsized {
head_layout,
element_layout,
} => {
let (tail_layout, _) = try_opt!(element_layout.repeat(len));
if let Some(head_layout) = head_layout {
let (layout, _) = try_opt!(head_layout.extend(tail_layout));
Some(layout.pad_to_alignment())
} else {
Some(tail_layout)
}
}
}
}
#[doc(hidden)]
#[inline]
pub const fn from_sized(sized: Layout) -> Option<Self> {
assert!(
sized.align() <= 64,
"types with alignments above 64 are not valid buffer contents",
);
if let Ok(sized) = DeviceLayout::from_layout(sized) {
Some(Self(BufferContentsLayoutInner::Sized(sized)))
} else {
None
}
}
#[doc(hidden)]
#[inline]
pub const fn from_head_element_layout(
head_layout: Layout,
element_layout: Layout,
) -> Option<Self> {
if head_layout.align() > 64 || element_layout.align() > 64 {
panic!("types with alignments above 64 are not valid buffer contents");
}
let head_layout = if let Ok(head_layout) = DeviceLayout::from_layout(head_layout) {
Some(head_layout)
} else {
None
};
if let Ok(element_layout) = DeviceLayout::from_layout(element_layout) {
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout,
element_layout,
}))
} else {
None
}
}
#[doc(hidden)]
#[inline]
pub const fn extend_from_layout(self, previous: &Layout) -> Option<Self> {
assert!(
previous.align() <= 64,
"types with alignments above 64 are not valid buffer contents",
);
match self.0 {
BufferContentsLayoutInner::Sized(sized) => {
let (sized, _) = try_opt!(sized.extend_from_layout(previous));
Some(Self(BufferContentsLayoutInner::Sized(sized)))
}
BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout,
} => {
let head_layout = if let Ok(head_layout) = DeviceLayout::from_layout(*previous) {
Some(head_layout)
} else {
None
};
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout,
element_layout,
}))
}
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout,
} => {
let (head_layout, _) = try_opt!(head_layout.extend_from_layout(previous));
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout,
}))
}
}
}
#[doc(hidden)]
#[inline]
pub const fn pad_to_alignment(&self) -> Option<Self> {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => Some(Self(
BufferContentsLayoutInner::Sized(sized.pad_to_alignment()),
)),
BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout,
} => Some(Self(BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout: *element_layout,
})),
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout,
} => {
let padded_head_size =
head_layout.size() + head_layout.padding_needed_for(element_layout.alignment());
let padded_head_size =
unsafe { NonZeroDeviceSize::new_unchecked(padded_head_size) };
let alignment =
DeviceAlignment::max(head_layout.alignment(), element_layout.alignment());
if let Some(head_layout) = DeviceLayout::new(padded_head_size, alignment) {
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout: *element_layout,
}))
} else {
None
}
}
}
}
fn is_sized(&self) -> bool {
matches!(
self,
BufferContentsLayout(BufferContentsLayoutInner::Sized(..)),
)
}
pub(super) const fn unwrap_sized(self) -> DeviceLayout {
match self.0 {
BufferContentsLayoutInner::Sized(sized) => sized,
BufferContentsLayoutInner::Unsized { .. } => {
panic!("called `BufferContentsLayout::unwrap_sized` on an unsized layout");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
buffer::{
sys::{BufferCreateInfo, RawBuffer},
BufferUsage,
},
memory::{
allocator::{
AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator,
StandardMemoryAllocator,
},
MemoryRequirements, ResourceMemory,
},
};
#[test]
fn derive_buffer_contents() {
#[derive(BufferContents)]
#[repr(C)]
struct Test1(u32, u64, u8);
assert_eq!(Test1::LAYOUT.head_size() as usize, size_of::<Test1>());
assert_eq!(Test1::LAYOUT.element_size(), None);
assert_eq!(
Test1::LAYOUT.alignment().as_devicesize() as usize,
align_of::<Test1>(),
);
#[derive(BufferContents)]
#[repr(C)]
struct Composite1(Test1, [f32; 10], Test1);
assert_eq!(
Composite1::LAYOUT.head_size() as usize,
size_of::<Composite1>(),
);
assert_eq!(Composite1::LAYOUT.element_size(), None);
assert_eq!(
Composite1::LAYOUT.alignment().as_devicesize() as usize,
align_of::<Composite1>(),
);
#[derive(BufferContents)]
#[repr(C)]
struct Test2(u64, u8, [u32]);
assert_eq!(
Test2::LAYOUT.head_size() as usize,
size_of::<u64>() + size_of::<u32>(),
);
assert_eq!(
Test2::LAYOUT.element_size().unwrap() as usize,
size_of::<u32>(),
);
assert_eq!(
Test2::LAYOUT.alignment().as_devicesize() as usize,
align_of::<u64>(),
);
#[derive(BufferContents)]
#[repr(C)]
struct Composite2(Test1, [f32; 10], Test2);
assert_eq!(
Composite2::LAYOUT.head_size() as usize,
size_of::<Test1>() + size_of::<[f32; 10]>() + size_of::<u64>() + size_of::<u32>(),
);
assert_eq!(
Composite2::LAYOUT.element_size().unwrap() as usize,
size_of::<u32>(),
);
assert_eq!(
Composite2::LAYOUT.alignment().as_devicesize() as usize,
align_of::<u64>(),
);
}
#[test]
fn split_at() {
let (device, _) = gfx_dev_and_queue!();
let allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<u32>(
allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
AllocationCreateInfo::default(),
6,
)
.unwrap();
{
let (left, right) = buffer.clone().split_at(2);
assert!(left.len() == 2);
assert!(right.len() == 4);
}
{
let (left, right) = buffer.clone().split_at(5);
assert!(left.len() == 5);
assert!(right.len() == 1);
}
{
assert_should_panic!({ buffer.clone().split_at(0) });
}
{
assert_should_panic!({ buffer.split_at(6) });
}
}
#[test]
fn cast_aligned() {
let (device, _) = gfx_dev_and_queue!();
let allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let raw_buffer = RawBuffer::new(
device,
BufferCreateInfo {
size: 32,
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
)
.unwrap();
let requirements = MemoryRequirements {
layout: DeviceLayout::from_size_alignment(32, 1).unwrap(),
memory_type_bits: 1,
prefers_dedicated_allocation: false,
requires_dedicated_allocation: false,
};
let _junk = allocator
.allocate(
MemoryRequirements {
layout: DeviceLayout::from_size_alignment(17, 1).unwrap(),
..requirements
},
AllocationType::Linear,
AllocationCreateInfo::default(),
None,
)
.unwrap();
let allocation = allocator
.allocate(
requirements,
AllocationType::Linear,
AllocationCreateInfo::default(),
None,
)
.unwrap();
let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) };
let buffer = Buffer::from_raw(raw_buffer, BufferMemory::Normal(allocation));
let buffer = Subbuffer::from(Arc::new(buffer));
assert!(buffer.memory_offset() >= 17);
{
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C, align(16))]
struct Test([u8; 16]);
let aligned = buffer.clone().cast_aligned::<Test>();
assert_eq!(aligned.memory_offset() % 16, 0);
assert_eq!(aligned.size(), 16);
}
{
let aligned = buffer.clone().cast_aligned::<[u8; 16]>();
assert_eq!(aligned.size() % 16, 0);
}
{
let layout = DeviceLayout::from_size_alignment(32, 16).unwrap();
let aligned = buffer.clone().align_to(layout);
assert!(is_aligned(aligned.memory_offset(), layout.alignment()));
assert_eq!(aligned.size(), 0);
}
{
let layout = DeviceLayout::from_size_alignment(1, 64).unwrap();
assert_should_panic!({ buffer.align_to(layout) });
}
}
}