Skip to main content

vulkano/buffer/
subbuffer.rs

1//! A subpart of a buffer.
2
3use super::{allocator::Arena, Buffer, BufferMemory};
4use crate::{
5    device::{Device, DeviceOwned, DeviceOwnedDebugWrapper},
6    macros::try_opt,
7    memory::{
8        self,
9        allocator::{align_down, align_up, DeviceLayout},
10        is_aligned, DeviceAlignment, MappedMemoryRange,
11    },
12    sync::HostAccessError,
13    DeviceSize, NonNullDeviceAddress, NonZeroDeviceSize, ValidationError,
14};
15use bytemuck::AnyBitPattern;
16use std::{
17    alloc::Layout,
18    cmp,
19    hash::{Hash, Hasher},
20    marker::PhantomData,
21    mem::{self, align_of, size_of},
22    ops::{Deref, DerefMut, Range, RangeBounds},
23    ptr::{self, NonNull},
24    sync::Arc,
25    thread,
26};
27#[cfg(feature = "macros")]
28pub use vulkano_macros::BufferContents;
29
30/// A subpart of a buffer.
31///
32/// This type doesn't correspond to any Vulkan object, it exists for API convenience. Most Vulkan
33/// functions that work with buffers take the buffer as argument as well as an offset and size
34/// within the buffer, which we can represent with a single subbuffer instead.
35///
36/// `Subbuffer` also has a type parameter, which is a hint for how the data is going to be
37/// interpreted by the host or device (or both). This is useful so that we can allocate
38/// (sub)buffers that are correctly aligned and have the correct size for their content, and for
39/// type-safety. For example, when reading/writing a subbuffer from the host, you can use
40/// [`Subbuffer::read`]/[`Subbuffer::write`] without worrying about the alignment and size being
41/// correct and about converting your data from/to raw bytes.
42///
43/// There are two ways to get a `Subbuffer`:
44///
45/// - By using the functions on [`Buffer`], which create a new buffer and memory allocation each
46///   time, and give you a `Subbuffer` that has an entire `Buffer` dedicated to it.
47/// - By using the [`SubbufferAllocator`], which creates `Subbuffer`s by suballocating existing
48///   `Buffer`s such that the `Buffer`s can keep being reused.
49///
50/// Alternatively, you can also create a `Buffer` manually and convert it to a `Subbuffer<[u8]>`.
51///
52/// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
53#[derive(Debug)]
54#[repr(C)]
55pub struct Subbuffer<T: ?Sized> {
56    offset: DeviceSize,
57    size: DeviceSize,
58    parent: SubbufferParent,
59    marker: PhantomData<Arc<T>>,
60}
61
62#[derive(Clone, Debug, PartialEq, Eq, Hash)]
63enum SubbufferParent {
64    Arena(Arc<Arena>),
65    Buffer(DeviceOwnedDebugWrapper<Arc<Buffer>>),
66}
67
68impl<T: ?Sized> Subbuffer<T> {
69    pub(super) fn from_arena(arena: Arc<Arena>, offset: DeviceSize, size: DeviceSize) -> Self {
70        Subbuffer {
71            offset,
72            size,
73            parent: SubbufferParent::Arena(arena),
74            marker: PhantomData,
75        }
76    }
77
78    /// Returns the offset of the subbuffer, in bytes, relative to the buffer.
79    pub fn offset(&self) -> DeviceSize {
80        self.offset
81    }
82
83    /// Returns the offset of the subbuffer, in bytes, relative to the [`DeviceMemory`] block.
84    fn memory_offset(&self) -> DeviceSize {
85        let allocation = match self.buffer().memory() {
86            BufferMemory::Normal(a) => a,
87            BufferMemory::Sparse | BufferMemory::External => unreachable!(),
88        };
89
90        allocation.offset() + self.offset
91    }
92
93    /// Returns the size of the subbuffer in bytes.
94    pub fn size(&self) -> DeviceSize {
95        self.size
96    }
97
98    /// Returns the range the subbuffer occupies, in bytes, relative to the buffer.
99    pub(crate) fn range(&self) -> Range<DeviceSize> {
100        self.offset..self.offset + self.size
101    }
102
103    /// Returns the buffer that this subbuffer is a part of.
104    pub fn buffer(&self) -> &Arc<Buffer> {
105        match &self.parent {
106            SubbufferParent::Arena(arena) => arena.buffer(),
107            SubbufferParent::Buffer(buffer) => buffer,
108        }
109    }
110
111    /// Returns the mapped pointer to the range of memory of `self`.
112    ///
113    /// The subbuffer must fall within the range of the memory mapping given to
114    /// [`DeviceMemory::map`].
115    ///
116    /// See [`MappingState::slice`] for the safety invariants of the returned pointer.
117    ///
118    /// [`DeviceMemory::map`]: memory::DeviceMemory::map
119    /// [`MappingState::slice`]: memory::MappingState::slice
120    pub fn mapped_slice(&self) -> Result<NonNull<[u8]>, HostAccessError> {
121        match self.buffer().memory() {
122            BufferMemory::Normal(allocation) => {
123                // SAFETY: `self.range()` is in bounds of the allocation.
124                unsafe { allocation.mapped_slice_unchecked(self.range()) }
125            }
126            BufferMemory::Sparse | BufferMemory::External => unreachable!(),
127        }
128    }
129
130    /// Returns the device address for this subbuffer.
131    pub fn device_address(&self) -> Result<NonNullDeviceAddress, Box<ValidationError>> {
132        self.buffer().device_address().map(|ptr| {
133            // SAFETY: The original address came from the Vulkan implementation, and allocation
134            // sizes are guaranteed to not exceed `DeviceLayout::MAX_SIZE`, so the offset better be
135            // in range.
136            unsafe { NonNullDeviceAddress::new_unchecked(ptr.get() + self.offset) }
137        })
138    }
139
140    #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
141    pub unsafe fn device_address_unchecked(&self) -> NonNullDeviceAddress {
142        let buffer_device_address = unsafe { self.buffer().device_address_unchecked() };
143
144        // SAFETY: The original address came from the Vulkan implementation, and allocation
145        // sizes are guaranteed to not exceed `DeviceLayout::MAX_SIZE`, so the offset better be
146        // in range.
147        unsafe { NonNullDeviceAddress::new_unchecked(buffer_device_address.get() + self.offset) }
148    }
149
150    /// Casts the subbuffer to a slice of raw bytes.
151    pub fn into_bytes(self) -> Subbuffer<[u8]> {
152        unsafe { self.reinterpret_unchecked_inner() }
153    }
154
155    /// Same as [`into_bytes`], except it works with a reference to the subbuffer.
156    ///
157    /// [`into_bytes`]: Self::into_bytes
158    pub fn as_bytes(&self) -> &Subbuffer<[u8]> {
159        unsafe { self.reinterpret_unchecked_ref_inner() }
160    }
161
162    #[inline(always)]
163    unsafe fn reinterpret_unchecked_inner<U: ?Sized>(self) -> Subbuffer<U> {
164        // SAFETY: All `Subbuffer`s share the same layout.
165        unsafe { mem::transmute::<Subbuffer<T>, Subbuffer<U>>(self) }
166    }
167
168    #[inline(always)]
169    unsafe fn reinterpret_unchecked_ref_inner<U: ?Sized>(&self) -> &Subbuffer<U> {
170        assert_eq!(size_of::<Subbuffer<T>>(), size_of::<Subbuffer<U>>());
171        assert_eq!(align_of::<Subbuffer<T>>(), align_of::<Subbuffer<U>>());
172
173        // SAFETY: All `Subbuffer`s share the same layout.
174        unsafe { mem::transmute::<&Subbuffer<T>, &Subbuffer<U>>(self) }
175    }
176
177    pub(crate) fn to_vk_device_or_host_address(&self) -> ash::vk::DeviceOrHostAddressKHR {
178        ash::vk::DeviceOrHostAddressKHR {
179            device_address: self
180                .device_address()
181                .expect("Can't get device address. Is the extension enabled?")
182                .into(),
183        }
184    }
185
186    pub(crate) fn to_vk_device_or_host_address_const(
187        &self,
188    ) -> ash::vk::DeviceOrHostAddressConstKHR {
189        ash::vk::DeviceOrHostAddressConstKHR {
190            device_address: self
191                .device_address()
192                .expect("Can't get device address. Is the extension enabled?")
193                .into(),
194        }
195    }
196}
197
198impl<T> Subbuffer<T>
199where
200    T: BufferContents + ?Sized,
201{
202    /// Changes the `T` generic parameter of the subbuffer to the desired type.
203    ///
204    /// # Panics
205    ///
206    /// - Panics if the memory offset of the subbuffer is not a multiple of the alignment of `U`.
207    /// - If `U` is sized, then panics if the subbuffer size doesn't match the size of `U` exactly.
208    /// - If `U` is unsized, then panics if
209    ///   - the subbuffer size isn't greater than the size of the head (sized part) of `U`,
210    ///   - the subbuffer would have slop when reinterpreted as `U`, meaning that the subbuffer
211    ///     size minus the the size of the head of `U` isn't divisible by the element size of `U`,
212    ///     or
213    ///   - the subbuffer size isn't a multiple of the alignment of `U`.
214    pub fn reinterpret<U>(self) -> Subbuffer<U>
215    where
216        U: BufferContents + ?Sized,
217    {
218        self.validate_reinterpret(U::LAYOUT);
219
220        unsafe { self.reinterpret_unchecked_inner() }
221    }
222
223    /// Changes the `T` generic parameter of the subbuffer to the desired type without checking if
224    /// the contents are correctly aligned and sized.
225    ///
226    /// **NEVER use this function** unless you absolutely have to, and even then, open an issue on
227    /// GitHub instead. **An unaligned / incorrectly sized subbuffer is undefined behavior _both on
228    /// the Rust and the Vulkan side!_**
229    ///
230    /// # Safety
231    ///
232    /// - The memory offset of the subbuffer must be a multiple of the alignment of `U`.
233    /// - If `U` is sized, then the subbuffer size must match the size of `U` exactly.
234    /// - If `U` is unsized, then
235    ///   - the subbuffer size must be greater than the size of the head (sized part) of `U`,
236    ///   - the subbuffer must not have slop when reinterpreted as `U`, meaning that the subbuffer
237    ///     size minus the the size of the head of `U` is divisible by the element size of `U`, and
238    ///   - the subbuffer size must be a multiple of the alignment of `U`.
239    #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
240    pub unsafe fn reinterpret_unchecked<U>(self) -> Subbuffer<U>
241    where
242        U: BufferContents + ?Sized,
243    {
244        #[cfg(debug_assertions)]
245        self.validate_reinterpret(U::LAYOUT);
246
247        unsafe { self.reinterpret_unchecked_inner() }
248    }
249
250    /// Same as [`reinterpret`], except it works with a reference to the subbuffer.
251    ///
252    /// [`reinterpret`]: Self::reinterpret
253    pub fn reinterpret_ref<U>(&self) -> &Subbuffer<U>
254    where
255        U: BufferContents + ?Sized,
256    {
257        self.validate_reinterpret(U::LAYOUT);
258
259        unsafe { self.reinterpret_unchecked_ref_inner() }
260    }
261
262    /// Same as [`reinterpret_unchecked`], except it works with a reference to the subbuffer.
263    ///
264    /// # Safety
265    ///
266    /// Please read the safety docs on [`reinterpret_unchecked`] carefully.
267    ///
268    /// [`reinterpret_unchecked`]: Self::reinterpret_unchecked
269    #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
270    pub unsafe fn reinterpret_unchecked_ref<U>(&self) -> &Subbuffer<U>
271    where
272        U: BufferContents + ?Sized,
273    {
274        #[cfg(debug_assertions)]
275        self.validate_reinterpret(U::LAYOUT);
276
277        unsafe { self.reinterpret_unchecked_ref_inner() }
278    }
279
280    fn validate_reinterpret(&self, new_layout: BufferContentsLayout) {
281        assert!(is_aligned(self.memory_offset(), new_layout.alignment()));
282
283        if new_layout.is_sized() {
284            assert_eq!(self.size, new_layout.unwrap_sized().size());
285        } else {
286            assert!(self.size > new_layout.head_size());
287            assert_eq!(
288                (self.size - new_layout.head_size()) % new_layout.element_size().unwrap(),
289                0,
290            );
291            assert!(is_aligned(self.size(), new_layout.alignment()));
292        }
293    }
294
295    /// Locks the subbuffer in order to read its content from the host.
296    ///
297    /// If the subbuffer is currently used in exclusive mode by the device, this function will
298    /// return an error. Similarly if you called [`write`] on the buffer and haven't dropped the
299    /// lock, this function will return an error as well.
300    ///
301    /// After this function successfully locks the subbuffer, any attempt to submit a command
302    /// buffer that uses it in exclusive mode will fail. You can still submit this subbuffer
303    /// for non-exclusive accesses (ie. reads).
304    ///
305    /// If the memory backing the buffer is not [host-coherent], then this function will lock a
306    /// range that is potentially larger than the subbuffer, because the range given to
307    /// [`invalidate_range`] must be aligned to the [`non_coherent_atom_size`]. This means that for
308    /// example if your Vulkan implementation reports an atom size of 64, and you tried to put 2
309    /// subbuffers of size 32 in the same buffer, one at offset 0 and one at offset 32, while the
310    /// buffer is backed by non-coherent memory, then invalidating one subbuffer would also
311    /// invalidate the other subbuffer. This can lead to data races and is therefore not allowed.
312    /// What you should do in that case is ensure that each subbuffer is aligned to the
313    /// non-coherent atom size, so in this case one would be at offset 0 and the other at offset
314    /// 64. [`SubbufferAllocator`] does this automatically.
315    ///
316    /// If the memory backing the buffer is not managed by vulkano, (i.e. this buffer was created
317    /// from [`RawBuffer::assume_bound`]), then it can't be read from using this function.
318    ///
319    /// [host-coherent]: memory::MemoryPropertyFlags::HOST_COHERENT
320    /// [`invalidate_range`]: memory::ResourceMemory::invalidate_range
321    /// [`non_coherent_atom_size`]: crate::device::DeviceProperties::non_coherent_atom_size
322    /// [`write`]: Self::write
323    /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
324    /// [`RawBuffer::assume_bound`]: crate::buffer::sys::RawBuffer::assume_bound
325    pub fn read(&self) -> Result<BufferReadGuard<'_, T>, HostAccessError> {
326        assert!(T::LAYOUT.alignment().as_devicesize() <= 64);
327
328        let allocation = match self.buffer().memory() {
329            BufferMemory::Normal(a) => a,
330            BufferMemory::Sparse => todo!("`Subbuffer::read` doesn't support sparse binding yet"),
331            BufferMemory::External => return Err(HostAccessError::Unmanaged),
332        };
333
334        let range = if let Some(atom_size) = allocation.atom_size() {
335            // This works because the memory allocator must align allocations to the non-coherent
336            // atom size when the memory is host-visible but not host-coherent.
337            let start = align_down(self.offset, atom_size);
338            let end = cmp::min(
339                align_up(self.offset + self.size, atom_size),
340                allocation.size(),
341            );
342
343            Range { start, end }
344        } else {
345            self.range()
346        };
347
348        let mut state = self.buffer().state();
349        state
350            .check_cpu_read(range.clone())
351            .map_err(HostAccessError::AccessConflict)?;
352        unsafe { state.cpu_read_lock(range.clone()) };
353
354        let mapped_slice = self.mapped_slice()?;
355
356        if allocation.atom_size().is_some() {
357            let memory_range = MappedMemoryRange {
358                offset: range.start,
359                size: range.end - range.start,
360                _ne: crate::NonExhaustive(()),
361            };
362
363            // If there are other read locks being held at this point, they also called
364            // `invalidate_range_unchecked` when locking. The device can't write data while the
365            // host holds a read lock, so there will be no new data and this call will do nothing.
366            // TODO: probably still more efficient to call it only if we're the first to acquire a
367            // read lock, but the number of host locks isn't currently tracked anywhere.
368            //
369            // SAFETY:
370            // - `self.mapped_slice()` didn't return an error, which means that the subbuffer falls
371            //   within the mapped range of the memory.
372            // - We ensure that memory mappings are always aligned to the non-coherent atom size for
373            //   non-host-coherent memory, therefore the subbuffer's range aligned to the
374            //   non-coherent atom size must fall within the mapped range of the memory.
375            unsafe { allocation.invalidate_range_unchecked(memory_range) }
376                .map_err(HostAccessError::Invalidate)?;
377        }
378
379        // SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
380        let data_ptr = unsafe { T::ptr_from_slice(mapped_slice) };
381        let data = unsafe { &*data_ptr };
382
383        Ok(BufferReadGuard {
384            subbuffer: self,
385            data,
386            range,
387        })
388    }
389
390    /// Locks the subbuffer in order to write its content from the host.
391    ///
392    /// If the subbuffer is currently in use by the device, this function will return an error.
393    /// Similarly if you called [`read`] on the subbuffer and haven't dropped the lock, this
394    /// function will return an error as well.
395    ///
396    /// After this function successfully locks the buffer, any attempt to submit a command buffer
397    /// that uses it and any attempt to call `read` will return an error.
398    ///
399    /// If the memory backing the buffer is not [host-coherent], then this function will lock a
400    /// range that is potentially larger than the subbuffer, because the range given to
401    /// [`flush_range`] must be aligned to the [`non_coherent_atom_size`]. This means that for
402    /// example if your Vulkan implementation reports an atom size of 64, and you tried to put 2
403    /// subbuffers of size 32 in the same buffer, one at offset 0 and one at offset 32, while the
404    /// buffer is backed by non-coherent memory, then flushing one subbuffer would also flush the
405    /// other subbuffer. This can lead to data races and is therefore not allowed. What you should
406    /// do in that case is ensure that each subbuffer is aligned to the non-coherent atom size, so
407    /// in this case one would be at offset 0 and the other at offset 64. [`SubbufferAllocator`]
408    /// does this automatically.
409    ///
410    /// If the memory backing the buffer is not managed by vulkano, (i.e. this buffer was created
411    /// from [`RawBuffer::assume_bound`]), then it can't be written to using this function.
412    ///
413    /// [host-coherent]: memory::MemoryPropertyFlags::HOST_COHERENT
414    /// [`flush_range`]: memory::ResourceMemory::flush_range
415    /// [`non_coherent_atom_size`]: crate::device::DeviceProperties::non_coherent_atom_size
416    /// [`read`]: Self::read
417    /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
418    /// [`RawBuffer::assume_bound`]: crate::buffer::sys::RawBuffer::assume_bound
419    pub fn write(&self) -> Result<BufferWriteGuard<'_, T>, HostAccessError> {
420        assert!(T::LAYOUT.alignment().as_devicesize() <= 64);
421
422        let allocation = match self.buffer().memory() {
423            BufferMemory::Normal(a) => a,
424            BufferMemory::Sparse => todo!("`Subbuffer::write` doesn't support sparse binding yet"),
425            BufferMemory::External => return Err(HostAccessError::Unmanaged),
426        };
427
428        let range = if let Some(atom_size) = allocation.atom_size() {
429            // This works because the memory allocator must align allocations to the non-coherent
430            // atom size when the memory is host-visible but not host-coherent.
431            let start = align_down(self.offset, atom_size);
432            let end = cmp::min(
433                align_up(self.offset + self.size, atom_size),
434                allocation.size(),
435            );
436
437            Range { start, end }
438        } else {
439            self.range()
440        };
441
442        let mut state = self.buffer().state();
443        state
444            .check_cpu_write(range.clone())
445            .map_err(HostAccessError::AccessConflict)?;
446        unsafe { state.cpu_write_lock(range.clone()) };
447
448        let mapped_slice = self.mapped_slice()?;
449
450        if allocation.atom_size().is_some() {
451            let memory_range = MappedMemoryRange {
452                offset: range.start,
453                size: range.end - range.start,
454                _ne: crate::NonExhaustive(()),
455            };
456
457            // SAFETY:
458            // - `self.mapped_slice()` didn't return an error, which means that the subbuffer falls
459            //   within the mapped range of the memory.
460            // - We ensure that memory mappings are always aligned to the non-coherent atom size for
461            //   non-host-coherent memory, therefore the subbuffer's range aligned to the
462            //   non-coherent atom size must fall within the mapped range of the memory.
463            unsafe { allocation.invalidate_range_unchecked(memory_range) }
464                .map_err(HostAccessError::Invalidate)?;
465        }
466
467        // SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
468        let data_ptr = unsafe { T::ptr_from_slice(mapped_slice) };
469        let data = unsafe { &mut *data_ptr };
470
471        Ok(BufferWriteGuard {
472            subbuffer: self,
473            data,
474            range,
475        })
476    }
477}
478
479impl<T> Subbuffer<T> {
480    /// Converts the subbuffer to a slice of one element.
481    pub fn into_slice(self) -> Subbuffer<[T]> {
482        unsafe { self.reinterpret_unchecked_inner() }
483    }
484
485    /// Same as [`into_slice`], except it works with a reference to the subbuffer.
486    ///
487    /// [`into_slice`]: Self::into_slice
488    pub fn as_slice(&self) -> &Subbuffer<[T]> {
489        unsafe { self.reinterpret_unchecked_ref_inner() }
490    }
491}
492
493impl<T> Subbuffer<[T]> {
494    /// Returns the number of elements in the slice.
495    pub fn len(&self) -> DeviceSize {
496        debug_assert_eq!(self.size % size_of::<T>() as DeviceSize, 0);
497
498        self.size / size_of::<T>() as DeviceSize
499    }
500
501    /// Reduces the subbuffer to just one element of the slice.
502    ///
503    /// # Panics
504    ///
505    /// - Panics if `index` is out of bounds.
506    pub fn index(self, index: DeviceSize) -> Subbuffer<T> {
507        assert!(index <= self.len());
508
509        unsafe { self.index_unchecked(index) }
510    }
511
512    #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
513    pub unsafe fn index_unchecked(self, index: DeviceSize) -> Subbuffer<T> {
514        Subbuffer {
515            offset: self.offset + index * size_of::<T>() as DeviceSize,
516            size: size_of::<T>() as DeviceSize,
517            parent: self.parent,
518            marker: PhantomData,
519        }
520    }
521
522    /// Reduces the subbuffer to just a range of the slice.
523    ///
524    /// # Panics
525    ///
526    /// - Panics if `range` is out of bounds.
527    /// - Panics if `range` is empty.
528    pub fn slice(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
529        let Range { start, end } = memory::range(range, ..self.len()).unwrap();
530
531        self.offset += start * size_of::<T>() as DeviceSize;
532        self.size = (end - start) * size_of::<T>() as DeviceSize;
533        assert_ne!(self.size, 0);
534
535        self
536    }
537
538    #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
539    pub unsafe fn slice_unchecked(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
540        let Range { start, end } = memory::range_unchecked(range, ..self.len());
541
542        self.offset += start * size_of::<T>() as DeviceSize;
543        self.size = (end - start) * size_of::<T>() as DeviceSize;
544        debug_assert!(self.size != 0);
545
546        self
547    }
548
549    /// Splits the subbuffer into two at an index.
550    ///
551    /// # Panics
552    ///
553    /// - Panics if `mid` is not greater than `0`.
554    /// - Panics if `mid` is not less than `self.len()`.
555    pub fn split_at(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
556        assert!(0 < mid && mid < self.len());
557
558        unsafe { self.split_at_unchecked(mid) }
559    }
560
561    #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
562    pub unsafe fn split_at_unchecked(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
563        let first = unsafe { self.clone().slice_unchecked(..mid) };
564        let second = unsafe { self.slice_unchecked(mid..) };
565        (first, second)
566    }
567}
568
569impl Subbuffer<[u8]> {
570    /// Creates a new `Subbuffer<[u8]>` spanning the whole buffer.
571    #[inline]
572    pub fn new(buffer: Arc<Buffer>) -> Self {
573        Subbuffer {
574            offset: 0,
575            size: buffer.size(),
576            parent: SubbufferParent::Buffer(DeviceOwnedDebugWrapper(buffer)),
577            marker: PhantomData,
578        }
579    }
580
581    /// Casts the slice to a different element type while ensuring correct alignment for the type.
582    ///
583    /// The offset of the subbuffer is rounded up to the alignment of `T` and the size abjusted for
584    /// the padding, then the size is rounded down to the nearest multiple of `T`'s size.
585    ///
586    /// # Panics
587    ///
588    /// - Panics if the aligned offset would be out of bounds.
589    pub fn cast_aligned<T>(self) -> Subbuffer<[T]>
590    where
591        T: BufferContents,
592    {
593        let layout = DeviceLayout::from_layout(Layout::new::<T>()).unwrap();
594        let aligned = self.align_to(layout);
595
596        unsafe { aligned.reinterpret_unchecked() }
597    }
598
599    /// Aligns the subbuffer to the given `layout` by rounding the offset up to
600    /// `layout.alignment()` and adjusting the size for the padding, and then rounding the size
601    /// down to the nearest multiple of `layout.size()`.
602    ///
603    /// # Panics
604    ///
605    /// - Panics if the aligned offset would be out of bounds.
606    /// - Panics if `layout.alignment()` exceeds `64`.
607    #[inline]
608    pub fn align_to(mut self, layout: DeviceLayout) -> Subbuffer<[u8]> {
609        assert!(layout.alignment().as_devicesize() <= 64);
610
611        let offset = self.memory_offset();
612        let padding_front = align_up(offset, layout.alignment()) - offset;
613
614        self.offset += padding_front;
615        self.size = self.size.checked_sub(padding_front).unwrap();
616        self.size -= self.size % layout.size();
617
618        self
619    }
620}
621
622impl From<Arc<Buffer>> for Subbuffer<[u8]> {
623    #[inline]
624    fn from(buffer: Arc<Buffer>) -> Self {
625        Self::new(buffer)
626    }
627}
628
629impl<T: ?Sized> Clone for Subbuffer<T> {
630    fn clone(&self) -> Self {
631        Subbuffer {
632            parent: self.parent.clone(),
633            ..*self
634        }
635    }
636}
637
638unsafe impl<T: ?Sized> DeviceOwned for Subbuffer<T> {
639    fn device(&self) -> &Arc<Device> {
640        self.buffer().device()
641    }
642}
643
644impl<T: ?Sized> PartialEq for Subbuffer<T> {
645    fn eq(&self, other: &Self) -> bool {
646        self.parent == other.parent && self.offset == other.offset && self.size == other.size
647    }
648}
649
650impl<T: ?Sized> Eq for Subbuffer<T> {}
651
652impl<T: ?Sized> Hash for Subbuffer<T> {
653    fn hash<H: Hasher>(&self, state: &mut H) {
654        self.parent.hash(state);
655        self.offset.hash(state);
656        self.size.hash(state);
657    }
658}
659
660/// RAII structure used to release the CPU access of a subbuffer when dropped.
661///
662/// This structure is created by the [`read`] method on [`Subbuffer`].
663///
664/// [`read`]: Subbuffer::read
665#[derive(Debug)]
666pub struct BufferReadGuard<'a, T: ?Sized> {
667    subbuffer: &'a Subbuffer<T>,
668    data: &'a T,
669    range: Range<DeviceSize>,
670}
671
672impl<T: ?Sized> Drop for BufferReadGuard<'_, T> {
673    fn drop(&mut self) {
674        let mut state = self.subbuffer.buffer().state();
675        unsafe { state.cpu_read_unlock(self.range.clone()) };
676    }
677}
678
679impl<T: ?Sized> Deref for BufferReadGuard<'_, T> {
680    type Target = T;
681
682    fn deref(&self) -> &Self::Target {
683        self.data
684    }
685}
686
687/// RAII structure used to release the CPU write access of a subbuffer when dropped.
688///
689/// This structure is created by the [`write`] method on [`Subbuffer`].
690///
691/// [`write`]: Subbuffer::write
692#[derive(Debug)]
693pub struct BufferWriteGuard<'a, T: ?Sized> {
694    subbuffer: &'a Subbuffer<T>,
695    data: &'a mut T,
696    range: Range<DeviceSize>,
697}
698
699impl<T: ?Sized> Drop for BufferWriteGuard<'_, T> {
700    fn drop(&mut self) {
701        let allocation = match self.subbuffer.buffer().memory() {
702            BufferMemory::Normal(a) => a,
703            BufferMemory::Sparse => unreachable!(),
704            BufferMemory::External => unreachable!(),
705        };
706
707        if allocation.atom_size().is_some() && !thread::panicking() {
708            let memory_range = MappedMemoryRange {
709                offset: self.range.start,
710                size: self.range.end - self.range.start,
711                _ne: crate::NonExhaustive(()),
712            };
713
714            unsafe { allocation.flush_range_unchecked(memory_range) }.unwrap();
715        }
716
717        let mut state = self.subbuffer.buffer().state();
718        unsafe { state.cpu_write_unlock(self.range.clone()) };
719    }
720}
721
722impl<T: ?Sized> Deref for BufferWriteGuard<'_, T> {
723    type Target = T;
724
725    fn deref(&self) -> &Self::Target {
726        self.data
727    }
728}
729
730impl<T: ?Sized> DerefMut for BufferWriteGuard<'_, T> {
731    fn deref_mut(&mut self) -> &mut Self::Target {
732        self.data
733    }
734}
735
736/// Trait for types of data that can be put in a buffer.
737///
738/// This trait is not intended to be implemented manually (ever) and attempting so will make you
739/// one sad individual very quickly. Rather you should use [the derive macro]. Note also that there
740/// are blanket implementations of this trait: you don't need to implement it if the type in
741/// question already implements bytemuck's [`AnyBitPattern`]. Most if not all linear algebra crates
742/// have a feature flag that you can enable for bytemuck support. The trait is also already
743/// implemented for all slices where the element type implements `BufferContents`.
744///
745/// # Examples
746///
747/// Deriving the trait for sized types:
748///
749/// ```
750/// # use vulkano::buffer::BufferContents;
751/// #[derive(BufferContents)]
752/// #[repr(C)]
753/// struct MyData {
754///     x: f32,
755///     y: f32,
756///     array: [i32; 12],
757/// }
758/// ```
759///
760/// Deriving the trait for unsized types works the same:
761///
762/// ```
763/// # use vulkano::buffer::BufferContents;
764/// #[derive(BufferContents)]
765/// #[repr(C)]
766/// struct MyData {
767///     x: f32,
768///     y: f32,
769///     slice: [i32],
770/// }
771/// ```
772///
773/// This even works if the last field is a user-defined DST too:
774///
775/// ```
776/// # use vulkano::buffer::BufferContents;
777/// #[derive(BufferContents)]
778/// #[repr(C)]
779/// struct MyData {
780///     x: f32,
781///     y: f32,
782///     other: OtherData,
783/// }
784///
785/// #[derive(BufferContents)]
786/// #[repr(C)]
787/// struct OtherData {
788///     slice: [i32],
789/// }
790/// ```
791///
792/// You can also use generics if you please:
793///
794/// ```
795/// # use vulkano::buffer::BufferContents;
796/// #[derive(BufferContents)]
797/// #[repr(C)]
798/// struct MyData<T, U> {
799///     x: T,
800///     y: T,
801///     slice: [U],
802/// }
803/// ```
804///
805/// This even works with dependently-sized types:
806///
807/// ```
808/// # use vulkano::buffer::BufferContents;
809/// #[derive(BufferContents)]
810/// #[repr(C)]
811/// struct MyData<T>
812/// where
813///     T: ?Sized,
814/// {
815///     x: f32,
816///     y: f32,
817///     z: T,
818/// }
819/// ```
820///
821/// [the derive macro]: vulkano_macros::BufferContents
822//
823// If you absolutely *must* implement this trait by hand, here are the safety requirements (but
824// please open an issue on GitHub instead):
825//
826// - The type must be a struct and all fields must implement `BufferContents`.
827// - `LAYOUT` must be the correct layout for the type, which also means the type must either be
828//   sized or if it's unsized then its metadata must be the same as that of a slice. Implementing
829//   `BufferContents` for any other kind of DST is instantaneous horrifically undefined behavior.
830// - `ptr_from_slice` must create a pointer with the same address as the `slice` parameter that is
831//   passed in. The pointer is expected to be aligned properly already.
832// - `ptr_from_slice` must create a pointer that is expected to be valid for reads (and potentially
833//   writes) for exactly `slice.len()` bytes. The `slice.len()` is expected to be valid for the
834//   `LAYOUT`.
835pub unsafe trait BufferContents: Send + Sync + 'static {
836    /// The layout of the contents.
837    const LAYOUT: BufferContentsLayout;
838
839    /// Creates a pointer to `Self` from a pointer to a range of mapped memory.
840    ///
841    /// # Safety
842    ///
843    /// - `slice` must be a pointer that's valid for reads and writes of the entire slice.
844    #[doc(hidden)]
845    unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self;
846}
847
848unsafe impl<T> BufferContents for T
849where
850    T: AnyBitPattern + Send + Sync,
851{
852    const LAYOUT: BufferContentsLayout = BufferContentsLayout::from_sized(Layout::new::<T>());
853
854    #[inline(always)]
855    unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
856        <*mut [u8]>::cast::<T>(slice.as_ptr())
857    }
858}
859
860unsafe impl<T> BufferContents for [T]
861where
862    T: BufferContents,
863{
864    const LAYOUT: BufferContentsLayout = BufferContentsLayout::from_slice(Layout::new::<T>());
865
866    #[inline(always)]
867    unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
868        let data = <*mut [u8]>::cast::<T>(slice.as_ptr());
869        let len = slice.len() / size_of::<T>();
870        debug_assert_eq!(slice.len() % size_of::<T>(), 0);
871
872        ptr::slice_from_raw_parts_mut(data, len)
873    }
874}
875
876/// Describes the layout required for a type so that it can be read from/written to a buffer. This
877/// is used to allocate (sub)buffers generically.
878///
879/// This is similar to [`DeviceLayout`] except that this exists for the sole purpose of describing
880/// the layout of buffer contents specifically. Which means for example that the sizedness of the
881/// type is captured, as well as the layout of the head and tail if the layout is for unsized data,
882/// in order to be able to represent everything that Vulkan can stuff in a buffer.
883///
884/// `BufferContentsLayout` also has an additional invariant compared to `DeviceLayout`: the
885/// alignment of the data must not exceed `64`. This is because that's the guaranteed alignment
886/// that all `DeviceMemory` blocks must be aligned to at minimum, and hence any greater alignment
887/// can't be guaranteed. Other than that, the invariant that sizes must be non-zero applies here as
888/// well, for both sized data and the element type of unsized data.
889#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
890pub struct BufferContentsLayout(BufferContentsLayoutInner);
891
892#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
893enum BufferContentsLayoutInner {
894    Sized(DeviceLayout),
895    Unsized {
896        head_layout: Option<DeviceLayout>,
897        element_layout: DeviceLayout,
898    },
899}
900
901impl BufferContentsLayout {
902    /// Returns the size of the head (sized part). If the data has no sized part, then this will
903    /// return 0.
904    #[inline]
905    pub const fn head_size(&self) -> DeviceSize {
906        match &self.0 {
907            BufferContentsLayoutInner::Sized(sized) => sized.size(),
908            BufferContentsLayoutInner::Unsized {
909                head_layout: None, ..
910            } => 0,
911            BufferContentsLayoutInner::Unsized {
912                head_layout: Some(head_layout),
913                ..
914            } => head_layout.size(),
915        }
916    }
917
918    /// Returns the size of the element type if the data is unsized, or returns [`None`].
919    /// Guaranteed to be non-zero.
920    #[inline]
921    pub const fn element_size(&self) -> Option<DeviceSize> {
922        match &self.0 {
923            BufferContentsLayoutInner::Sized(_) => None,
924            BufferContentsLayoutInner::Unsized { element_layout, .. } => {
925                Some(element_layout.size())
926            }
927        }
928    }
929
930    /// Returns the alignment required for the data. Guaranteed to not exceed `64`.
931    #[inline]
932    pub const fn alignment(&self) -> DeviceAlignment {
933        match &self.0 {
934            BufferContentsLayoutInner::Sized(sized) => sized.alignment(),
935            BufferContentsLayoutInner::Unsized {
936                head_layout: None,
937                element_layout,
938            } => element_layout.alignment(),
939            BufferContentsLayoutInner::Unsized {
940                head_layout: Some(head_layout),
941                ..
942            } => head_layout.alignment(),
943        }
944    }
945
946    /// Returns the [`DeviceLayout`] for the data for the given `len`, or returns [`None`] if `len`
947    /// is zero or if the total size would exceed [`DeviceLayout::MAX_SIZE`].
948    #[inline]
949    pub const fn layout_for_len(&self, len: DeviceSize) -> Option<DeviceLayout> {
950        match &self.0 {
951            BufferContentsLayoutInner::Sized(sized) => Some(*sized),
952            BufferContentsLayoutInner::Unsized {
953                head_layout,
954                element_layout,
955            } => {
956                let (tail_layout, _) = try_opt!(element_layout.repeat(len));
957
958                if let Some(head_layout) = head_layout {
959                    let (layout, _) = try_opt!(head_layout.extend(tail_layout));
960
961                    Some(layout.pad_to_alignment())
962                } else {
963                    Some(tail_layout)
964                }
965            }
966        }
967    }
968
969    /// Creates a new `BufferContentsLayout` from a sized layout. This is intended for use by the
970    /// derive macro only.
971    #[doc(hidden)]
972    #[inline]
973    pub const fn from_sized(sized: Layout) -> Self {
974        assert!(
975            sized.align() <= 64,
976            "types with alignments above 64 are not valid buffer contents",
977        );
978
979        if let Some(sized) = DeviceLayout::from_layout(sized) {
980            Self(BufferContentsLayoutInner::Sized(sized))
981        } else {
982            unreachable!()
983        }
984    }
985
986    /// Creates a new `BufferContentsLayout` from an element layout. This is intended for use by
987    /// the derive macro only.
988    #[doc(hidden)]
989    #[inline]
990    pub const fn from_slice(element_layout: Layout) -> Self {
991        assert!(
992            element_layout.align() <= 64,
993            "types with alignments above 64 are not valid buffer contents",
994        );
995
996        if let Some(element_layout) = DeviceLayout::from_layout(element_layout) {
997            Self(BufferContentsLayoutInner::Unsized {
998                head_layout: None,
999                element_layout,
1000            })
1001        } else {
1002            unreachable!()
1003        }
1004    }
1005
1006    /// Creates a new `BufferContentsLayout` from the given field layouts. This is intended for use
1007    /// by the derive macro only.
1008    #[doc(hidden)]
1009    #[inline]
1010    pub const fn from_field_layouts(field_layouts: &[Layout], last_field_layout: Self) -> Self {
1011        const fn extend(previous: DeviceLayout, next: DeviceLayout) -> DeviceLayout {
1012            match previous.extend(next) {
1013                Some((layout, _)) => layout,
1014                None => unreachable!(),
1015            }
1016        }
1017
1018        let mut head_layout = None;
1019        let mut i = 0;
1020
1021        while i < field_layouts.len() {
1022            head_layout = match DeviceLayout::from_layout(field_layouts[i]) {
1023                Some(field_layout) => Some(match head_layout {
1024                    Some(layout) => extend(layout, field_layout),
1025                    None => field_layout,
1026                }),
1027                None => unreachable!(),
1028            };
1029
1030            i += 1;
1031        }
1032
1033        let layout = Self(match last_field_layout.0 {
1034            BufferContentsLayoutInner::Sized(field_layout) => {
1035                BufferContentsLayoutInner::Sized(match head_layout {
1036                    Some(layout) => extend(layout, field_layout),
1037                    None => field_layout,
1038                })
1039            }
1040            BufferContentsLayoutInner::Unsized {
1041                head_layout: field_head_layout,
1042                element_layout,
1043            } => BufferContentsLayoutInner::Unsized {
1044                head_layout: match (head_layout, field_head_layout) {
1045                    (Some(layout), Some(field_layout)) => Some(extend(layout, field_layout)),
1046                    (Some(layout), None) => Some(layout),
1047                    (None, Some(field_layout)) => Some(field_layout),
1048                    (None, None) => None,
1049                },
1050                element_layout,
1051            },
1052        });
1053
1054        assert!(
1055            layout.alignment().as_devicesize() <= 64,
1056            "types with alignments above 64 are not valid buffer contents",
1057        );
1058
1059        layout.pad_to_alignment()
1060    }
1061
1062    /// Creates a new `BufferContentsLayout` by rounding up the size of the head to the nearest
1063    /// multiple of its alignment if the layout is sized, or by rounding up the size of the head to
1064    /// the nearest multiple of the alignment of the element type and aligning the head to the
1065    /// alignment of the element type if there is a sized part. Doesn't do anything if there is no
1066    /// sized part.
1067    const fn pad_to_alignment(&self) -> Self {
1068        Self(match &self.0 {
1069            BufferContentsLayoutInner::Sized(sized) => {
1070                BufferContentsLayoutInner::Sized(sized.pad_to_alignment())
1071            }
1072            BufferContentsLayoutInner::Unsized {
1073                head_layout: None,
1074                element_layout,
1075            } => BufferContentsLayoutInner::Unsized {
1076                head_layout: None,
1077                element_layout: *element_layout,
1078            },
1079            BufferContentsLayoutInner::Unsized {
1080                head_layout: Some(head_layout),
1081                element_layout,
1082            } => {
1083                // We must pad the head to the alignment of the element type, *not* the alignment
1084                // of the head.
1085                //
1086                // Consider a head layout of `(u8, u8, u8)` and an element layout of `u32`. If we
1087                // padded the head to its own alignment, like is the case for sized layouts, it
1088                // wouldn't change the size. Yet there is padding between the head and the first
1089                // element of the slice.
1090                //
1091                // The reverse is true: consider a head layout of `(u16, u8)` and an element layout
1092                // of `u8`. If we padded the head to its own alignment, it would be too large.
1093                let padded_head_size =
1094                    head_layout.size() + head_layout.padding_needed_for(element_layout.alignment());
1095
1096                // SAFETY: `BufferContentsLayout`'s invariant guarantees that the alignment of the
1097                // element type doesn't exceed 64, which together with the overflow invariant of
1098                // `DeviceLayout` means that this can't overflow.
1099                let padded_head_size =
1100                    unsafe { NonZeroDeviceSize::new_unchecked(padded_head_size) };
1101
1102                // We have to align the head to the alignment of the element type, so that the
1103                // struct as a whole is aligned correctly when a different struct is extended with
1104                // this one.
1105                //
1106                // Note that this is *not* the same as aligning the head to the alignment of the
1107                // element type and then padding the layout to its alignment. Consider the same
1108                // layout from above, with a head layout of `(u16, u8)` and an element layout of
1109                // `u8`. If we aligned the head to the element type and then padded it to its own
1110                // alignment, we would get the same wrong result as above. This instead ensures the
1111                // head is padded to the element and aligned to it, without the alignment of the
1112                // head interfering.
1113                let alignment =
1114                    DeviceAlignment::max(head_layout.alignment(), element_layout.alignment());
1115
1116                if let Some(head_layout) = DeviceLayout::new(padded_head_size, alignment) {
1117                    BufferContentsLayoutInner::Unsized {
1118                        head_layout: Some(head_layout),
1119                        element_layout: *element_layout,
1120                    }
1121                } else {
1122                    unreachable!()
1123                }
1124            }
1125        })
1126    }
1127
1128    fn is_sized(&self) -> bool {
1129        matches!(
1130            self,
1131            BufferContentsLayout(BufferContentsLayoutInner::Sized(..)),
1132        )
1133    }
1134
1135    pub(crate) const fn unwrap_sized(self) -> DeviceLayout {
1136        match self.0 {
1137            BufferContentsLayoutInner::Sized(sized) => sized,
1138            BufferContentsLayoutInner::Unsized { .. } => {
1139                panic!("called `BufferContentsLayout::unwrap_sized` on an unsized layout");
1140            }
1141        }
1142    }
1143}
1144
1145#[cfg(test)]
1146mod tests {
1147    use super::*;
1148    use crate::{
1149        buffer::{
1150            sys::{BufferCreateInfo, RawBuffer},
1151            BufferUsage,
1152        },
1153        memory::{
1154            allocator::{
1155                AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator,
1156                StandardMemoryAllocator,
1157            },
1158            MemoryRequirements, ResourceMemory,
1159        },
1160    };
1161
1162    #[test]
1163    fn derive_buffer_contents() {
1164        #[derive(BufferContents)]
1165        #[repr(C)]
1166        struct Test1(u32, u64, u8);
1167
1168        assert_eq!(Test1::LAYOUT.head_size() as usize, size_of::<Test1>());
1169        assert_eq!(Test1::LAYOUT.element_size(), None);
1170        assert_eq!(
1171            Test1::LAYOUT.alignment().as_devicesize() as usize,
1172            align_of::<Test1>(),
1173        );
1174
1175        #[derive(BufferContents)]
1176        #[repr(C)]
1177        struct Composite1(Test1, [f32; 9], Test1);
1178
1179        assert_eq!(
1180            Composite1::LAYOUT.head_size() as usize,
1181            size_of::<Composite1>(),
1182        );
1183        assert_eq!(Composite1::LAYOUT.element_size(), None);
1184        assert_eq!(
1185            Composite1::LAYOUT.alignment().as_devicesize() as usize,
1186            align_of::<Composite1>(),
1187        );
1188
1189        #[derive(BufferContents)]
1190        #[repr(C)]
1191        struct Test2(u64, u8, [u32]);
1192
1193        assert_eq!(
1194            Test2::LAYOUT.head_size() as usize,
1195            size_of::<u64>() + size_of::<u32>(),
1196        );
1197        assert_eq!(
1198            Test2::LAYOUT.element_size().unwrap() as usize,
1199            size_of::<u32>(),
1200        );
1201        assert_eq!(
1202            Test2::LAYOUT.alignment().as_devicesize() as usize,
1203            align_of::<u64>(),
1204        );
1205
1206        #[derive(BufferContents)]
1207        #[repr(C)]
1208        struct Composite2(Test1, [f32; 9], Test2);
1209
1210        assert_eq!(
1211            Composite2::LAYOUT.head_size() as usize,
1212            size_of::<Test1>() + size_of::<[f32; 10]>() + size_of::<u64>() + size_of::<u32>(),
1213        );
1214        assert_eq!(
1215            Composite2::LAYOUT.element_size().unwrap() as usize,
1216            size_of::<u32>(),
1217        );
1218        assert_eq!(
1219            Composite2::LAYOUT.alignment().as_devicesize() as usize,
1220            align_of::<u64>(),
1221        );
1222    }
1223
1224    #[test]
1225    fn split_at() {
1226        let (device, _) = gfx_dev_and_queue!();
1227        let allocator = Arc::new(StandardMemoryAllocator::new_default(device));
1228
1229        let buffer = Buffer::new_slice::<u32>(
1230            allocator,
1231            BufferCreateInfo {
1232                usage: BufferUsage::TRANSFER_SRC,
1233                ..Default::default()
1234            },
1235            AllocationCreateInfo::default(),
1236            6,
1237        )
1238        .unwrap();
1239
1240        {
1241            let (left, right) = buffer.clone().split_at(2);
1242            assert_eq!(left.len(), 2);
1243            assert_eq!(right.len(), 4);
1244        }
1245
1246        {
1247            let (left, right) = buffer.clone().split_at(5);
1248            assert_eq!(left.len(), 5);
1249            assert_eq!(right.len(), 1);
1250        }
1251
1252        {
1253            assert_should_panic!({ buffer.clone().split_at(0) });
1254        }
1255
1256        {
1257            assert_should_panic!({ buffer.split_at(6) });
1258        }
1259    }
1260
1261    #[test]
1262    fn cast_aligned() {
1263        let (device, _) = gfx_dev_and_queue!();
1264        let allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
1265
1266        let raw_buffer = RawBuffer::new(
1267            device,
1268            BufferCreateInfo {
1269                size: 32,
1270                usage: BufferUsage::TRANSFER_SRC,
1271                ..Default::default()
1272            },
1273        )
1274        .unwrap();
1275
1276        let requirements = MemoryRequirements {
1277            layout: DeviceLayout::from_size_alignment(32, 1).unwrap(),
1278            memory_type_bits: 1,
1279            prefers_dedicated_allocation: false,
1280            requires_dedicated_allocation: false,
1281        };
1282
1283        // Allocate some junk in the same block as the buffer.
1284        let _junk = allocator
1285            .allocate(
1286                MemoryRequirements {
1287                    layout: DeviceLayout::from_size_alignment(17, 1).unwrap(),
1288                    ..requirements
1289                },
1290                AllocationType::Linear,
1291                AllocationCreateInfo::default(),
1292                None,
1293            )
1294            .unwrap();
1295
1296        let allocation = allocator
1297            .allocate(
1298                requirements,
1299                AllocationType::Linear,
1300                AllocationCreateInfo::default(),
1301                None,
1302            )
1303            .unwrap();
1304        let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) };
1305
1306        let buffer = Buffer::from_raw(raw_buffer, BufferMemory::Normal(allocation));
1307        let buffer = Subbuffer::from(Arc::new(buffer));
1308
1309        assert!(buffer.memory_offset() >= 17);
1310
1311        {
1312            #[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
1313            #[repr(C, align(16))]
1314            struct Test([u8; 16]);
1315
1316            let aligned = buffer.clone().cast_aligned::<Test>();
1317            assert_eq!(aligned.memory_offset() % 16, 0);
1318            assert_eq!(aligned.size(), 16);
1319        }
1320
1321        {
1322            let aligned = buffer.clone().cast_aligned::<[u8; 16]>();
1323            assert_eq!(aligned.size() % 16, 0);
1324        }
1325
1326        {
1327            let layout = DeviceLayout::from_size_alignment(32, 16).unwrap();
1328            let aligned = buffer.clone().align_to(layout);
1329            assert!(is_aligned(aligned.memory_offset(), layout.alignment()));
1330            assert_eq!(aligned.size(), 0);
1331        }
1332
1333        {
1334            let layout = DeviceLayout::from_size_alignment(1, 64).unwrap();
1335            assert_should_panic!({ buffer.align_to(layout) });
1336        }
1337    }
1338}