use alloc::{boxed::Box, sync::Arc, vec::Vec};
use core::{
error, fmt,
ops::{Bound, Deref, DerefMut, Range, RangeBounds},
};
use crate::util::Mutex;
use crate::*;
#[derive(Debug, Clone)]
pub struct Buffer {
pub(crate) inner: dispatch::DispatchBuffer,
pub(crate) map_context: Arc<Mutex<MapContext>>,
pub(crate) size: wgt::BufferAddress,
pub(crate) usage: BufferUsages,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(Buffer: Send, Sync);
crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
impl Buffer {
pub fn as_entire_binding(&self) -> BindingResource<'_> {
BindingResource::Buffer(self.as_entire_buffer_binding())
}
pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
BufferBinding {
buffer: self,
offset: 0,
size: None,
}
}
#[doc = crate::hal_type_vulkan!("Buffer")]
#[doc = crate::hal_type_metal!("Buffer")]
#[doc = crate::hal_type_dx12!("Buffer")]
#[doc = crate::hal_type_gles!("Buffer")]
#[cfg(wgpu_core)]
pub unsafe fn as_hal<A: hal::Api>(
&self,
) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
let buffer = self.inner.as_core_opt()?;
unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
}
#[track_caller]
pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
let (offset, size) = range_to_offset_size(bounds, self.size);
check_buffer_bounds(self.size, offset, size);
BufferSlice {
buffer: self,
offset,
size,
}
}
pub fn unmap(&self) {
self.map_context.lock().reset();
self.inner.unmap();
}
pub fn destroy(&self) {
self.inner.destroy();
}
pub fn size(&self) -> BufferAddress {
self.size
}
pub fn usage(&self) -> BufferUsages {
self.usage
}
pub fn map_async<S: RangeBounds<BufferAddress>>(
&self,
mode: MapMode,
bounds: S,
callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
) {
self.slice(bounds).map_async(mode, callback)
}
#[track_caller]
pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView {
self.slice(bounds).get_mapped_range()
}
#[track_caller]
pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferViewMut {
self.slice(bounds).get_mapped_range_mut()
}
#[cfg(custom)]
pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
self.inner.as_custom()
}
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct BufferSlice<'a> {
pub(crate) buffer: &'a Buffer,
pub(crate) offset: BufferAddress,
pub(crate) size: BufferSize,
}
#[cfg(send_sync)]
static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
impl<'a> BufferSlice<'a> {
#[track_caller]
pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
let (offset, size) = range_to_offset_size(bounds, self.size.get());
check_buffer_bounds(self.size.get(), offset, size);
BufferSlice {
buffer: self.buffer,
offset: self.offset + offset, size, }
}
pub fn map_async(
&self,
mode: MapMode,
callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
) {
let mut mc = self.buffer.map_context.lock();
assert_eq!(mc.mapped_range, 0..0, "Buffer is already mapped");
let end = self.offset + self.size.get();
mc.mapped_range = self.offset..end;
self.buffer
.inner
.map_async(mode, self.offset..end, Box::new(callback));
}
#[track_caller]
pub fn get_mapped_range(&self) -> BufferView {
let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Immutable);
self.buffer
.map_context
.lock()
.validate_and_add(subrange.clone());
let range = self.buffer.inner.get_mapped_range(subrange.index);
BufferView {
buffer: self.buffer.clone(),
size: self.size,
offset: self.offset,
inner: range,
}
}
#[track_caller]
pub fn get_mapped_range_mut(&self) -> BufferViewMut {
let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Mutable);
self.buffer
.map_context
.lock()
.validate_and_add(subrange.clone());
let range = self.buffer.inner.get_mapped_range(subrange.index);
BufferViewMut {
buffer: self.buffer.clone(),
size: self.size,
offset: self.offset,
inner: range,
readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
}
}
pub fn buffer(&self) -> &'a Buffer {
self.buffer
}
pub fn offset(&self) -> BufferAddress {
self.offset
}
pub fn size(&self) -> BufferSize {
self.size
}
}
impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
fn from(value: BufferSlice<'a>) -> Self {
BufferBinding {
buffer: value.buffer,
offset: value.offset,
size: Some(value.size),
}
}
}
impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
fn from(value: BufferSlice<'a>) -> Self {
crate::BindingResource::Buffer(crate::BufferBinding::from(value))
}
}
fn range_overlaps(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
a.start < b.end && b.start < a.end
}
#[derive(Debug, Copy, Clone)]
enum RangeMappingKind {
Mutable,
Immutable,
}
impl RangeMappingKind {
fn allowed_concurrently_with(self, other: Self) -> bool {
matches!(
(self, other),
(RangeMappingKind::Immutable, RangeMappingKind::Immutable)
)
}
}
#[derive(Debug, Clone)]
struct Subrange {
index: Range<BufferAddress>,
kind: RangeMappingKind,
}
impl Subrange {
fn new(offset: BufferAddress, size: BufferSize, kind: RangeMappingKind) -> Self {
Self {
index: offset..(offset + size.get()),
kind,
}
}
}
impl fmt::Display for Subrange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}..{} ({:?})",
self.index.start, self.index.end, self.kind
)
}
}
#[derive(Debug)]
pub(crate) struct MapContext {
mapped_range: Range<BufferAddress>,
sub_ranges: Vec<Subrange>,
}
impl MapContext {
pub(crate) fn new(mapped_range: Option<Range<BufferAddress>>) -> Self {
Self {
mapped_range: mapped_range.unwrap_or(0..0),
sub_ranges: Vec::new(),
}
}
fn reset(&mut self) {
self.mapped_range = 0..0;
assert!(
self.sub_ranges.is_empty(),
"You cannot unmap a buffer that still has accessible mapped views"
);
}
#[track_caller]
fn validate_and_add(&mut self, new_sub: Subrange) {
if self.mapped_range.is_empty() {
panic!("tried to call get_mapped_range(_mut) on an unmapped buffer");
}
if !range_overlaps(&self.mapped_range, &new_sub.index) {
panic!(
"tried to call get_mapped_range(_mut) on a range that is not entirely mapped. \
Attempted to get range {}, but the mapped range is {}..{}",
new_sub, self.mapped_range.start, self.mapped_range.end
);
}
for sub in self.sub_ranges.iter() {
if range_overlaps(&sub.index, &new_sub.index)
&& !sub.kind.allowed_concurrently_with(new_sub.kind)
{
panic!(
"tried to call get_mapped_range(_mut) on a range that has already \
been mapped and would break Rust memory aliasing rules. Attempted \
to get range {}, and the conflicting range is {}",
new_sub, sub
);
}
}
self.sub_ranges.push(new_sub);
}
fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
let end = offset + size.get();
let index = self
.sub_ranges
.iter()
.position(|r| r.index == (offset..end))
.expect("unable to remove range from map context");
self.sub_ranges.swap_remove(index);
}
}
pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BufferAsyncError;
static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
impl fmt::Display for BufferAsyncError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Error occurred when trying to async map a buffer")
}
}
impl error::Error for BufferAsyncError {}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum MapMode {
Read,
Write,
}
static_assertions::assert_impl_all!(MapMode: Send, Sync);
#[derive(Debug)]
pub struct BufferView {
buffer: Buffer,
offset: BufferAddress,
size: BufferSize,
inner: dispatch::DispatchBufferMappedRange,
}
#[cfg(webgpu)]
impl BufferView {
pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
self.inner.as_uint8array()
}
}
impl core::ops::Deref for BufferView {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.inner.slice()
}
}
impl AsRef<[u8]> for BufferView {
#[inline]
fn as_ref(&self) -> &[u8] {
self.inner.slice()
}
}
#[derive(Debug)]
pub struct BufferViewMut {
buffer: Buffer,
offset: BufferAddress,
size: BufferSize,
inner: dispatch::DispatchBufferMappedRange,
readable: bool,
}
impl AsMut<[u8]> for BufferViewMut {
#[inline]
fn as_mut(&mut self) -> &mut [u8] {
self.inner.slice_mut()
}
}
impl Deref for BufferViewMut {
type Target = [u8];
fn deref(&self) -> &Self::Target {
if !self.readable {
log::warn!("Reading from a BufferViewMut is slow and not recommended.");
}
self.inner.slice()
}
}
impl DerefMut for BufferViewMut {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.slice_mut()
}
}
impl Drop for BufferView {
fn drop(&mut self) {
self.buffer
.map_context
.lock()
.remove(self.offset, self.size);
}
}
impl Drop for BufferViewMut {
fn drop(&mut self) {
self.buffer
.map_context
.lock()
.remove(self.offset, self.size);
}
}
#[track_caller]
fn check_buffer_bounds(
buffer_size: BufferAddress,
slice_offset: BufferAddress,
slice_size: BufferSize,
) {
if slice_offset >= buffer_size {
panic!(
"slice offset {} is out of range for buffer of size {}",
slice_offset, buffer_size
);
}
let end = slice_offset.checked_add(slice_size.get());
if end.is_none_or(|end| end > buffer_size) {
panic!(
"slice offset {} size {} is out of range for buffer of size {}",
slice_offset, slice_size, buffer_size
);
}
}
#[track_caller]
pub(crate) fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
bounds: S,
whole_size: BufferAddress,
) -> (BufferAddress, BufferSize) {
let offset = match bounds.start_bound() {
Bound::Included(&bound) => bound,
Bound::Excluded(&bound) => bound + 1,
Bound::Unbounded => 0,
};
let size = BufferSize::new(match bounds.end_bound() {
Bound::Included(&bound) => bound + 1 - offset,
Bound::Excluded(&bound) => bound - offset,
Bound::Unbounded => whole_size - offset,
})
.expect("buffer slices can not be empty");
(offset, size)
}
#[cfg(test)]
mod tests {
use super::{
check_buffer_bounds, range_overlaps, range_to_offset_size, BufferAddress, BufferSize,
};
fn bs(value: BufferAddress) -> BufferSize {
BufferSize::new(value).unwrap()
}
#[test]
fn range_to_offset_size_works() {
let whole = 100;
assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
}
#[test]
#[should_panic = "buffer slices can not be empty"]
fn range_to_offset_size_panics_for_empty_range() {
range_to_offset_size(123..123, 200);
}
#[test]
#[should_panic = "buffer slices can not be empty"]
fn range_to_offset_size_panics_for_unbounded_empty_range() {
range_to_offset_size(..0, 100);
}
#[test]
fn check_buffer_bounds_works_for_end_in_range() {
check_buffer_bounds(200, 100, bs(50));
check_buffer_bounds(200, 100, bs(100));
check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
}
#[test]
#[should_panic]
fn check_buffer_bounds_panics_for_end_over_size() {
check_buffer_bounds(200, 100, bs(101));
}
#[test]
#[should_panic]
fn check_buffer_bounds_panics_for_end_wraparound() {
check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
}
#[test]
fn range_overlapping() {
assert_eq!(range_overlaps(&(0..1), &(1..3)), false);
assert_eq!(range_overlaps(&(0..2), &(1..3)), true);
assert_eq!(range_overlaps(&(1..2), &(0..3)), true);
assert_eq!(range_overlaps(&(0..3), &(1..2)), true);
assert_eq!(range_overlaps(&(1..3), &(0..2)), true);
assert_eq!(range_overlaps(&(2..3), &(0..2)), false);
}
}