use core::{borrow, cell, cmp, mem, ops, sync::atomic};
use alloc::borrow::ToOwned;
use alloc::rc::Rc;
use alloc::sync::Arc;
use alloc::vec::Vec;
use crate::texel::{constants::MAX, AtomicPart, MaxAligned, MaxAtomic, MaxCell, Texel, MAX_ALIGN};
#[derive(Clone, Default)]
pub struct Buffer {
inner: Vec<MaxAligned>,
}
#[derive(Clone, Default)]
pub struct AtomicBuffer {
inner: Arc<[MaxAtomic]>,
}
#[derive(Clone, Default)]
pub struct CellBuffer {
inner: Rc<[MaxCell]>,
}
#[repr(transparent)]
#[allow(non_camel_case_types)]
pub struct buf([u8]);
#[repr(transparent)]
#[allow(non_camel_case_types)]
pub struct atomic_buf(pub(crate) [AtomicPart]);
#[repr(transparent)]
#[allow(non_camel_case_types)]
pub struct cell_buf(cell::Cell<[u8]>);
pub struct AtomicSliceRef<'lt, P = u8> {
pub(crate) buf: &'lt atomic_buf,
pub(crate) texel: Texel<P>,
pub(crate) start: usize,
pub(crate) end: usize,
}
pub struct AtomicRef<'lt, P = u8> {
pub(crate) buf: &'lt atomic_buf,
pub(crate) texel: Texel<P>,
pub(crate) start: usize,
}
impl Buffer {
const ELEMENT: MaxAligned = MaxAligned([0; MAX_ALIGN]);
pub fn as_buf(&self) -> &buf {
buf::new(self.inner.as_slice())
}
pub fn as_buf_mut(&mut self) -> &mut buf {
buf::new_mut(self.inner.as_mut_slice())
}
pub fn new(length: usize) -> Self {
let alloc_len = Self::alloc_len(length);
let inner = alloc::vec![Self::ELEMENT; alloc_len];
Buffer { inner }
}
pub fn capacity(&self) -> usize {
self.inner.capacity() * mem::size_of::<MaxAligned>()
}
pub fn grow_to(&mut self, bytes: usize) {
let new_len = Self::alloc_len(bytes);
if self.inner.len() < new_len {
self.inner.resize(new_len, Self::ELEMENT);
}
}
pub fn resize_to(&mut self, bytes: usize) {
let new_len = Self::alloc_len(bytes);
self.inner.resize(new_len, Self::ELEMENT);
self.inner.shrink_to_fit()
}
fn alloc_len(length: usize) -> usize {
const CHUNK_SIZE: usize = mem::size_of::<MaxAligned>();
assert!(CHUNK_SIZE > 1);
length / CHUNK_SIZE + usize::from(length % CHUNK_SIZE != 0)
}
}
impl CellBuffer {
const ELEMENT: MaxCell = MaxCell::zero();
pub fn new(length: usize) -> Self {
let alloc_len = Buffer::alloc_len(length);
let inner: Vec<_> = (0..alloc_len).map(|_| Self::ELEMENT).collect();
CellBuffer {
inner: inner.into(),
}
}
pub fn with_buffer(buffer: Buffer) -> Self {
let inner: Vec<_> = buffer.inner.into_iter().map(MaxCell::new).collect();
CellBuffer {
inner: inner.into(),
}
}
pub fn ptr_eq(&self, other: &Self) -> bool {
Rc::ptr_eq(&self.inner, &other.inner)
}
pub fn capacity(&self) -> usize {
core::mem::size_of_val(&*self.inner)
}
pub fn get_mut(&mut self) -> Option<&mut cell_buf> {
Rc::get_mut(&mut self.inner).map(cell_buf::from_slice_mut)
}
pub fn make_mut(&mut self) -> &mut cell_buf {
if Rc::get_mut(&mut self.inner).is_none() {
*self = self.to_owned().into();
}
Rc::get_mut(&mut self.inner)
.map(cell_buf::from_slice_mut)
.expect("we just made a mutable copy")
}
pub fn to_owned(&self) -> Buffer {
let inner = self.inner.iter().map(|cell| cell.get()).collect();
Buffer { inner }
}
pub fn to_resized(&self, bytes: usize) -> Self {
let mut working_copy = self.to_owned();
working_copy.resize_to(bytes);
Self::with_buffer(working_copy)
}
}
impl AtomicBuffer {
const ELEMENT: MaxAtomic = MaxAtomic::zero();
pub fn new(length: usize) -> Self {
let alloc_len = Buffer::alloc_len(length);
let inner: Vec<_> = (0..alloc_len).map(|_| Self::ELEMENT).collect();
AtomicBuffer {
inner: inner.into(),
}
}
pub fn with_buffer(buffer: Buffer) -> Self {
let inner: Vec<_> = buffer.inner.into_iter().map(MaxAtomic::new).collect();
AtomicBuffer {
inner: inner.into(),
}
}
pub fn ptr_eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
pub fn capacity(&self) -> usize {
core::mem::size_of_val(&*self.inner)
}
pub fn get_mut(&mut self) -> Option<&mut atomic_buf> {
Arc::get_mut(&mut self.inner).map(atomic_buf::from_slice_mut)
}
pub fn make_mut(&mut self) -> &mut atomic_buf {
if Arc::get_mut(&mut self.inner).is_none() {
*self = self.to_owned().into();
}
Arc::get_mut(&mut self.inner)
.map(atomic_buf::from_slice_mut)
.expect("we just made a mutable copy")
}
pub fn to_owned(&self) -> Buffer {
let inner = self
.inner
.iter()
.map(|cell| cell.load(atomic::Ordering::Relaxed))
.collect();
Buffer { inner }
}
pub fn to_resized(&self, bytes: usize) -> Self {
let mut working_copy = self.to_owned();
working_copy.resize_to(bytes);
Self::with_buffer(working_copy)
}
}
impl buf {
pub fn new<T>(data: &T) -> &Self
where
T: AsRef<[MaxAligned]> + ?Sized,
{
let bytes = MAX.to_bytes(data.as_ref());
Self::from_bytes(bytes).unwrap()
}
pub fn new_mut<T>(data: &mut T) -> &mut Self
where
T: AsMut<[MaxAligned]> + ?Sized,
{
let bytes = MAX.to_mut_bytes(data.as_mut());
Self::from_bytes_mut(bytes).unwrap()
}
#[must_use = "Does not mutate self"]
#[track_caller]
pub fn truncate(&self, at: usize) -> &Self {
Self::from_bytes(&self.as_bytes()[..at]).unwrap()
}
#[must_use = "Does not mutate self"]
#[track_caller]
pub fn truncate_mut(&mut self, at: usize) -> &mut Self {
Self::from_bytes_mut(&mut self.as_bytes_mut()[..at]).unwrap()
}
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
&mut self.0
}
#[track_caller]
pub fn split_at(&self, at: usize) -> (&Self, &Self) {
assert!(at % MAX_ALIGN == 0);
let (a, b) = self.0.split_at(at);
let a = MAX.try_to_slice(a).expect("was previously aligned");
let b = MAX.try_to_slice(b).expect("asserted to be aligned");
(Self::new(a), Self::new(b))
}
pub(crate) fn take_at_mut<'a>(this: &mut &'a mut Self, at: usize) -> &'a mut Self {
let (pre, post) = buf::split_at_mut(core::mem::take(this), at);
*this = pre;
post
}
pub fn split_at_mut(&mut self, at: usize) -> (&mut Self, &mut Self) {
assert!(at % MAX_ALIGN == 0);
let (a, b) = self.0.split_at_mut(at);
let a = MAX.try_to_slice_mut(a).expect("was previously aligned");
let b = MAX.try_to_slice_mut(b).expect("asserted to be aligned");
(Self::new_mut(a), Self::new_mut(b))
}
pub fn as_texels<P>(&self, pixel: Texel<P>) -> &[P] {
pixel.cast_buf(self)
}
pub fn as_mut_texels<P>(&mut self, pixel: Texel<P>) -> &mut [P] {
pixel.cast_mut_buf(self)
}
pub fn map_within<P, Q>(
&mut self,
src: impl ops::RangeBounds<usize>,
dest: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
TexelMappingBuffer::map_within(self, src, dest, f, p, q)
}
}
impl TexelMappingBuffer for buf {
fn map_forward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
for idx in 0..len {
let source_idx = idx + src;
let target_idx = idx + dest;
let source = p.copy_val(&self.as_texels(p)[source_idx]);
let target = f(source);
self.as_mut_texels(q)[target_idx] = target;
}
}
fn map_backward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
for idx in (0..len).rev() {
let source_idx = idx + src;
let target_idx = idx + dest;
let source = p.copy_val(&self.as_texels(p)[source_idx]);
let target = f(source);
self.as_mut_texels(q)[target_idx] = target;
}
}
fn texel_len<P>(&self, texel: Texel<P>) -> usize {
self.as_texels(texel).len()
}
}
trait TexelMappingBuffer {
fn map_forward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
);
fn map_backward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
);
fn texel_len<P>(&self, texel: Texel<P>) -> usize;
fn map_within<P, Q>(
&mut self,
src: impl ops::RangeBounds<usize>,
dest: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
fn backwards_past_the_end(start_byte_diff: isize, size_diff: isize) -> Option<usize> {
assert!(size_diff >= 0);
if size_diff == 0 {
if start_byte_diff > 0 {
Some(0)
} else {
None
}
} else if start_byte_diff < 0 {
Some(0)
} else {
let floor = start_byte_diff / size_diff;
let ceil = (floor as usize) + usize::from(start_byte_diff % size_diff != 0);
Some(ceil)
}
}
let p_start = match src.start_bound() {
ops::Bound::Included(&bound) => bound,
ops::Bound::Excluded(&bound) => bound
.checked_add(1)
.expect("Range does not specify a valid bound start"),
ops::Bound::Unbounded => 0,
};
let p_end = match src.end_bound() {
ops::Bound::Excluded(&bound) => bound,
ops::Bound::Included(&bound) => bound
.checked_add(1)
.expect("Range does not specify a valid bound end"),
ops::Bound::Unbounded => self.texel_len(p),
};
let len = p_end.checked_sub(p_start).expect("Bound violates order");
let q_start = dest;
let _ = self
.texel_len(p)
.checked_sub(p_start)
.and_then(|slice| slice.checked_sub(len))
.expect("Source out of bounds");
let _ = self
.texel_len(q)
.checked_sub(q_start)
.and_then(|slice| slice.checked_sub(len))
.expect("Destination out of bounds");
assert!(p.size() as isize > 0);
assert!(q.size() as isize > 0);
if p.size() >= q.size() {
let start_diff = (q.size() * q_start).wrapping_sub(p.size() * p_start) as isize;
let size_diff = p.size() as isize - q.size() as isize;
let backwards_end = backwards_past_the_end(start_diff, size_diff)
.unwrap_or(len)
.min(len);
self.map_backward(p_start, q_start, backwards_end, &f, p, q);
self.map_forward(
p_start + backwards_end,
q_start + backwards_end,
len - backwards_end,
&f,
p,
q,
);
} else {
let start_diff = (p.size() * p_start).wrapping_sub(q.size() * q_start) as isize;
let size_diff = q.size() as isize - p.size() as isize;
let backwards_end = backwards_past_the_end(start_diff, size_diff)
.unwrap_or(len)
.min(len);
self.map_backward(
p_start + backwards_end,
q_start + backwards_end,
len - backwards_end,
&f,
p,
q,
);
self.map_forward(p_start, q_start, backwards_end, &f, p, q);
}
}
}
impl From<&'_ [u8]> for Buffer {
fn from(content: &'_ [u8]) -> Self {
let mut buffer = Buffer::new(content.len());
buffer[..content.len()].copy_from_slice(content);
buffer
}
}
impl From<&'_ [u8]> for AtomicBuffer {
fn from(values: &'_ [u8]) -> Self {
let chunks = values.chunks_exact(MAX_ALIGN);
let remainder = chunks.remainder();
let capacity = Buffer::alloc_len(values.len());
let mut buffer = Vec::with_capacity(capacity);
buffer.extend(chunks.map(|arr| {
let mut data = MaxAligned([0; MAX_ALIGN]);
data.0.copy_from_slice(arr);
MaxAtomic::new(data)
}));
if !remainder.is_empty() {
let mut data = MaxAligned([0; MAX_ALIGN]);
data.0[..remainder.len()].copy_from_slice(remainder);
buffer.push(MaxAtomic::new(data));
}
AtomicBuffer {
inner: buffer.into(),
}
}
}
impl From<Buffer> for AtomicBuffer {
fn from(values: Buffer) -> Self {
Self::from(values.as_bytes())
}
}
impl From<&'_ [u8]> for CellBuffer {
fn from(values: &'_ [u8]) -> Self {
let chunks = values.chunks_exact(MAX_ALIGN);
let remainder = chunks.remainder();
let capacity = Buffer::alloc_len(values.len());
let mut buffer = Vec::with_capacity(capacity);
buffer.extend(chunks.map(|arr| {
let mut data = [0; MAX_ALIGN];
data.copy_from_slice(arr);
MaxCell(cell::Cell::new(data))
}));
if !remainder.is_empty() {
let mut data = [0; MAX_ALIGN];
data[..remainder.len()].copy_from_slice(remainder);
buffer.push(MaxCell(cell::Cell::new(data)));
}
CellBuffer {
inner: buffer.into(),
}
}
}
impl From<Buffer> for CellBuffer {
fn from(values: Buffer) -> Self {
Self::from(values.as_bytes())
}
}
impl From<&'_ buf> for Buffer {
fn from(content: &'_ buf) -> Self {
content.to_owned()
}
}
impl Default for &'_ buf {
fn default() -> Self {
buf::new(&mut [])
}
}
impl Default for &'_ mut buf {
fn default() -> Self {
buf::new_mut(&mut [])
}
}
impl borrow::Borrow<buf> for Buffer {
fn borrow(&self) -> &buf {
&**self
}
}
impl borrow::BorrowMut<buf> for Buffer {
fn borrow_mut(&mut self) -> &mut buf {
&mut **self
}
}
impl alloc::borrow::ToOwned for buf {
type Owned = Buffer;
fn to_owned(&self) -> Buffer {
let mut buffer = Buffer::new(self.len());
buffer.as_bytes_mut().copy_from_slice(self);
buffer
}
}
impl ops::Deref for Buffer {
type Target = buf;
fn deref(&self) -> &buf {
self.as_buf()
}
}
impl ops::DerefMut for Buffer {
fn deref_mut(&mut self) -> &mut buf {
self.as_buf_mut()
}
}
impl ops::Deref for AtomicBuffer {
type Target = atomic_buf;
fn deref(&self) -> &atomic_buf {
atomic_buf::from_slice(&self.inner)
}
}
impl ops::Deref for CellBuffer {
type Target = cell_buf;
fn deref(&self) -> &cell_buf {
cell_buf::from_slice(&self.inner)
}
}
impl ops::Deref for buf {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.as_bytes()
}
}
impl ops::DerefMut for buf {
fn deref_mut(&mut self) -> &mut [u8] {
self.as_bytes_mut()
}
}
impl cmp::PartialEq for buf {
fn eq(&self, other: &buf) -> bool {
self.as_bytes() == other.as_bytes()
}
}
impl cmp::Eq for buf {}
impl cmp::PartialEq for Buffer {
fn eq(&self, other: &Buffer) -> bool {
self.as_bytes() == other.as_bytes()
}
}
impl cmp::Eq for Buffer {}
impl ops::Index<ops::RangeTo<usize>> for buf {
type Output = buf;
fn index(&self, idx: ops::RangeTo<usize>) -> &buf {
self.truncate(idx.end)
}
}
impl ops::IndexMut<ops::RangeTo<usize>> for buf {
fn index_mut(&mut self, idx: ops::RangeTo<usize>) -> &mut buf {
self.truncate_mut(idx.end)
}
}
impl cell_buf {
pub fn new<T>(data: &T) -> &Self
where
T: AsRef<[MaxCell]> + ?Sized,
{
cell_buf::from_slice(data.as_ref())
}
pub fn len(&self) -> usize {
self.0.as_slice_of_cells().len()
}
#[must_use = "Does not mutate self"]
#[track_caller]
pub fn truncate(&self, at: usize) -> &Self {
Self::from_bytes(&self.0.as_slice_of_cells()[..at]).unwrap()
}
#[track_caller]
pub fn split_at(&self, at: usize) -> (&Self, &Self) {
assert!(at % MAX_ALIGN == 0);
let (a, b) = self.0.as_slice_of_cells().split_at(at);
let a = Self::from_bytes(a).expect("was previously aligned");
let b = Self::from_bytes(b).expect("asserted to be aligned");
(a, b)
}
pub fn as_texels<P>(&self, texel: Texel<P>) -> &cell::Cell<[P]> {
let slice = self.0.as_slice_of_cells();
texel
.try_to_cell(slice)
.expect("A cell_buf is always aligned")
}
pub fn map_within<P, Q>(
&self,
src: impl ops::RangeBounds<usize>,
dest: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
let mut that = self;
TexelMappingBuffer::map_within(&mut that, src, dest, f, p, q)
}
}
impl cmp::PartialEq for cell_buf {
fn eq(&self, other: &Self) -> bool {
crate::texels::U8.cell_memory_eq(self.0.as_slice_of_cells(), other.0.as_slice_of_cells())
}
}
impl cmp::PartialEq<[u8]> for cell_buf {
fn eq(&self, other: &[u8]) -> bool {
crate::texels::U8.cell_bytes_eq(self.0.as_slice_of_cells(), other)
}
}
impl cmp::Eq for cell_buf {}
impl cmp::PartialEq for CellBuffer {
fn eq(&self, other: &Self) -> bool {
**self == **other
}
}
impl cmp::Eq for CellBuffer {}
impl TexelMappingBuffer for &'_ cell_buf {
fn map_forward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
let src_buffer = self.as_texels(p).as_slice_of_cells();
let target_buffer = self.as_texels(q).as_slice_of_cells();
for idx in 0..len {
let source_idx = idx + src;
let target_idx = idx + dest;
let source = p.copy_cell(&src_buffer[source_idx]);
let target = f(source);
target_buffer[target_idx].set(target);
}
}
fn map_backward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
let src_buffer = self.as_texels(p).as_slice_of_cells();
let target_buffer = self.as_texels(q).as_slice_of_cells();
for idx in (0..len).rev() {
let source_idx = idx + src;
let target_idx = idx + dest;
let source = p.copy_cell(&src_buffer[source_idx]);
let target = f(source);
target_buffer[target_idx].set(target);
}
}
fn texel_len<P>(&self, texel: Texel<P>) -> usize {
self.as_texels(texel).as_slice_of_cells().len()
}
}
impl atomic_buf {
pub fn new<T>(data: &T) -> &Self
where
T: AsRef<[MaxAtomic]> + ?Sized,
{
atomic_buf::from_slice(data.as_ref())
}
pub fn len(&self) -> usize {
core::mem::size_of_val(self)
}
pub fn as_buf_mut(&mut self) -> &mut buf {
buf::from_bytes_mut(atomic_buf::part_mut_slice(&mut self.0)).unwrap()
}
#[track_caller]
pub fn split_at(&self, at: usize) -> (&Self, &Self) {
use crate::texels::U8;
assert!(at % MAX_ALIGN == 0);
let slice = self.as_texels(U8);
let (a, b) = slice.split_at(at);
let left = atomic_buf::from_bytes(a).expect("was previously aligned");
let right = atomic_buf::from_bytes(b).expect("was previously aligned");
(left, right)
}
pub fn as_texels<P>(&self, texel: Texel<P>) -> AtomicSliceRef<P> {
use crate::texels::U8;
let buffer = AtomicSliceRef {
buf: self,
start: 0,
end: core::mem::size_of_val(self),
texel: U8,
};
texel
.try_to_atomic(buffer)
.expect("An atomic_buf is always aligned")
}
pub fn map_within<P, Q>(
&self,
src: impl ops::RangeBounds<usize>,
dest: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
let mut that = self;
TexelMappingBuffer::map_within(&mut that, src, dest, f, p, q)
}
}
impl cmp::PartialEq for atomic_buf {
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
if (self as *const atomic_buf).addr() == (other as *const atomic_buf).addr() {
return true;
}
let lhs = self.0.iter();
let rhs = other.0.iter();
lhs.zip(rhs)
.all(|(a, b)| a.load(atomic::Ordering::Relaxed) == b.load(atomic::Ordering::Relaxed))
}
}
impl cmp::PartialEq<[u8]> for atomic_buf {
fn eq(&self, other: &[u8]) -> bool {
if self.len() != other.len() {
return false;
}
let lhs = self.0.iter();
let rhs = other.chunks_exact(mem::size_of::<AtomicPart>());
lhs.zip(rhs)
.all(|(a, b)| a.load(atomic::Ordering::Relaxed).to_ne_bytes() == *b)
}
}
impl cmp::Eq for atomic_buf {}
impl cmp::PartialEq for AtomicBuffer {
fn eq(&self, other: &Self) -> bool {
**self == **other
}
}
impl cmp::Eq for AtomicBuffer {}
impl TexelMappingBuffer for &'_ atomic_buf {
fn map_forward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
let src_buffer = self.as_texels(p);
let target_buffer = self.as_texels(q);
for idx in 0..len {
let source_idx = idx + src;
let target_idx = idx + dest;
let source = p.load_atomic(src_buffer.index_one(source_idx));
let target = f(source);
q.store_atomic(target_buffer.index_one(target_idx), target);
}
}
fn map_backward<P, Q>(
&mut self,
src: usize,
dest: usize,
len: usize,
f: impl Fn(P) -> Q,
p: Texel<P>,
q: Texel<Q>,
) {
let src_buffer = self.as_texels(p);
let target_buffer = self.as_texels(q);
for idx in (0..len).rev() {
let source_idx = idx + src;
let target_idx = idx + dest;
let source = p.load_atomic(src_buffer.index_one(source_idx));
let target = f(source);
q.store_atomic(target_buffer.index_one(target_idx), target);
}
}
fn texel_len<P>(&self, texel: Texel<P>) -> usize {
self.as_texels(texel).len()
}
}
impl<'lt, P> AtomicSliceRef<'lt, P> {
#[track_caller]
pub fn index_one(self, idx: usize) -> AtomicRef<'lt, P> {
assert!(idx < self.len());
AtomicRef {
buf: self.buf,
start: self.start + idx * self.texel.size(),
texel: self.texel,
}
}
pub fn get_bounds(self, bounds: (ops::Bound<usize>, ops::Bound<usize>)) -> Option<Self> {
let (start, end) = bounds;
let len = self.len();
let start = match start {
ops::Bound::Included(start) => start,
ops::Bound::Excluded(start) => start.checked_add(1)?,
ops::Bound::Unbounded => 0,
};
let end = match end {
ops::Bound::Included(end) => end.checked_add(1)?,
ops::Bound::Excluded(end) => end,
ops::Bound::Unbounded => len,
};
if start > end || end > len {
None
} else {
Some(AtomicSliceRef {
buf: self.buf,
start: start * self.texel.size(),
end: end * self.texel.size(),
texel: self.texel,
})
}
}
pub fn get(self, bounds: impl core::ops::RangeBounds<usize>) -> Option<Self> {
let start = bounds.start_bound().cloned();
let end = bounds.end_bound().cloned();
self.get_bounds((start, end))
}
#[track_caller]
pub fn index(self, bounds: impl core::ops::RangeBounds<usize>) -> Self {
#[cold]
fn panic_on_bounds() -> ! {
panic!("Bounds are out of range");
}
match self.get(bounds) {
Some(some) => some,
None => panic_on_bounds(),
}
}
#[track_caller]
pub fn read_from_slice(&self, data: &[P]) {
self.texel.store_atomic_slice(*self, data);
}
#[track_caller]
pub fn write_to_slice(&self, data: &mut [P]) {
self.texel.load_atomic_slice(*self, data);
}
#[track_caller]
pub fn split_at(self, at: usize) -> (Self, Self) {
let left = self.index(..at);
let right = self.index(at..);
(left, right)
}
#[must_use = "Does not mutate self"]
#[track_caller]
pub fn truncate_bytes(self, at: usize) -> Self {
let len = (self.end - self.start).min(at);
AtomicSliceRef {
end: self.start + len,
..self
}
}
pub(crate) fn from_ref(value: AtomicRef<'lt, P>) -> Self {
AtomicSliceRef {
buf: value.buf,
start: value.start,
end: value.start + value.texel.size(),
texel: value.texel,
}
}
pub fn len(&self) -> usize {
self.end.saturating_sub(self.start) / self.texel.size()
}
}
impl<P> Clone for AtomicSliceRef<'_, P> {
fn clone(&self) -> Self {
AtomicSliceRef { ..*self }
}
}
impl<P> Copy for AtomicSliceRef<'_, P> {}
impl<P> Clone for AtomicRef<'_, P> {
fn clone(&self) -> Self {
AtomicRef { ..*self }
}
}
impl<P> Copy for AtomicRef<'_, P> {}
#[derive(Clone, Copy, Debug)]
pub struct TexelRange<T> {
texel: Texel<T>,
start_per_align: usize,
end_per_align: usize,
}
impl<T> TexelRange<T> {
pub fn new(texel: Texel<T>, range: ops::Range<usize>) -> Option<Self> {
let end_byte = range
.end
.checked_mul(texel.size())
.filter(|&n| n <= isize::MAX as usize)?;
let start_byte = (range.start.min(range.end))
.checked_mul(texel.size())
.filter(|&n| n <= isize::MAX as usize)?;
debug_assert!(
end_byte % texel.align() == 0,
"Texel must be valid for its type layout"
);
debug_assert!(
start_byte % texel.align() == 0,
"Texel must be valid for its type layout"
);
Some(TexelRange {
texel,
start_per_align: start_byte / texel.align(),
end_per_align: end_byte / texel.align(),
})
}
pub fn from_byte_range(texel: Texel<T>, range: ops::Range<usize>) -> Option<Self> {
let start_byte = range.start;
let end_byte = range.end;
if start_byte % texel.align() != 0 || end_byte % texel.align() != 0 {
return None;
}
Some(TexelRange {
texel,
start_per_align: start_byte / texel.align(),
end_per_align: end_byte / texel.align(),
})
}
}
impl<T> core::ops::Index<TexelRange<T>> for buf {
type Output = [T];
fn index(&self, index: TexelRange<T>) -> &Self::Output {
let scale = index.texel.align();
let bytes = &self.0[scale * index.start_per_align..scale * index.end_per_align];
let slice = index.texel.try_to_slice(bytes);
slice.expect("byte indices validly aligned")
}
}
impl<T> core::ops::IndexMut<TexelRange<T>> for buf {
fn index_mut(&mut self, index: TexelRange<T>) -> &mut Self::Output {
let scale = index.texel.align();
let bytes = &mut self.0[scale * index.start_per_align..scale * index.end_per_align];
let slice = index.texel.try_to_slice_mut(bytes);
slice.expect("byte indices validly aligned")
}
}
impl Default for &'_ cell_buf {
fn default() -> Self {
cell_buf::new(&mut [])
}
}
impl Default for &'_ atomic_buf {
fn default() -> Self {
atomic_buf::new(&mut [])
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::texels::{MAX, U16, U32, U8};
struct AlignMeUp<N>([MaxAligned; 0], N);
#[test]
fn single_max_element() {
let mut buffer = Buffer::new(mem::size_of::<MaxAligned>());
let slice = buffer.as_mut_texels(MAX);
assert!(slice.len() == 1);
}
#[test]
fn growing() {
let mut buffer = Buffer::new(0);
assert_eq!(buffer.capacity(), 0);
buffer.grow_to(mem::size_of::<MaxAligned>());
let capacity = buffer.capacity();
assert!(buffer.capacity() > 0);
buffer.grow_to(capacity);
assert_eq!(buffer.capacity(), capacity);
buffer.grow_to(0);
assert_eq!(buffer.capacity(), capacity);
buffer.grow_to(capacity + 1);
assert!(buffer.capacity() > capacity);
}
#[test]
fn reinterpret() {
let mut buffer = Buffer::new(mem::size_of::<u32>());
assert!(buffer.as_mut_texels(U32).len() >= 1);
buffer
.as_mut_texels(U16)
.iter_mut()
.for_each(|p| *p = 0x0f0f);
buffer
.as_texels(U32)
.iter()
.for_each(|p| assert_eq!(*p, 0x0f0f0f0f));
buffer
.as_texels(U8)
.iter()
.for_each(|p| assert_eq!(*p, 0x0f));
buffer
.as_mut_texels(U8)
.iter_mut()
.enumerate()
.for_each(|(idx, p)| *p = idx as u8);
assert_eq!(u32::from_be(buffer.as_texels(U32)[0]), 0x00010203);
}
#[test]
fn mapping_great_to_small() {
const LEN: usize = 10;
let mut buffer = Buffer::new(LEN * mem::size_of::<u32>());
buffer
.as_mut_texels(U32)
.iter_mut()
.enumerate()
.for_each(|(idx, p)| *p = idx as u32);
buffer.map_within(..LEN, 0, |n: u32| n as u8, U32, U8);
buffer.map_within(..LEN, 0, |n: u8| n as u32, U8, U32);
assert_eq!(
buffer.as_texels(U32)[..LEN].to_vec(),
(0..LEN as u32).collect::<Vec<_>>()
);
buffer.map_within(0..LEN, 3 * LEN, |n: u32| n as u8, U32, U8);
buffer.map_within(3 * LEN..4 * LEN, 0, |n: u8| n as u32, U8, U32);
assert_eq!(
buffer.as_texels(U32)[..LEN].to_vec(),
(0..LEN as u32).collect::<Vec<_>>()
);
}
#[test]
fn cell_buffer() {
let data = [0, 0, 255, 0, 255, 0, 255, 0, 0];
let buffer = CellBuffer::from(&data[..]);
assert_eq!(buffer.capacity(), Buffer::alloc_len(data.len()) * MAX_ALIGN);
let alternative = CellBuffer::with_buffer(buffer.to_owned());
assert_eq!(buffer.capacity(), alternative.capacity());
let contents: &cell_buf = &*buffer;
let slice: &[cell::Cell<u8>] = contents.as_texels(U8).as_slice_of_cells();
assert!(cell_buf::from_bytes(slice).is_some());
}
#[test]
fn atomic_buffer() {
let data = [0, 0, 255, 0, 255, 0, 255, 0, 0];
let buffer = AtomicBuffer::from(&data[..]);
assert_eq!(buffer.capacity(), Buffer::alloc_len(data.len()) * MAX_ALIGN);
let alternative = CellBuffer::with_buffer(buffer.to_owned());
assert_eq!(buffer.capacity(), alternative.capacity());
let contents: &atomic_buf = &*buffer;
let slice: AtomicSliceRef<u8> = contents.as_texels(U8);
assert!(atomic_buf::from_bytes(slice).is_some());
}
#[test]
fn mapping_cells() {
const LEN: usize = 10;
let buffer = CellBuffer::new(LEN * mem::size_of::<u32>());
let output_tap = buffer.clone();
assert!(buffer.ptr_eq(&output_tap));
buffer
.as_texels(U32)
.as_slice_of_cells()
.iter()
.enumerate()
.for_each(|(idx, p)| p.set(idx as u32));
buffer.map_within(..LEN, 0, |n: u32| n as u8, U32, U8);
buffer.map_within(..LEN, 0, |n: u8| n as u32, U8, U32);
assert_eq!(
output_tap.as_texels(U32).as_slice_of_cells()[..LEN]
.iter()
.map(cell::Cell::get)
.collect::<Vec<_>>(),
(0..LEN as u32).collect::<Vec<_>>()
);
buffer.map_within(0..LEN, 3 * LEN, |n: u32| n as u8, U32, U8);
buffer.map_within(3 * LEN..4 * LEN, 0, |n: u8| n as u32, U8, U32);
assert_eq!(
output_tap.as_texels(U32).as_slice_of_cells()[..LEN]
.iter()
.map(cell::Cell::get)
.collect::<Vec<_>>(),
(0..LEN as u32).collect::<Vec<_>>()
);
}
#[test]
fn mapping_atomics() {
const LEN: usize = 10;
let mut initial_state = Buffer::new(LEN * mem::size_of::<u32>());
initial_state
.as_mut_texels(U32)
.iter_mut()
.enumerate()
.for_each(|(idx, p)| *p = idx as u32);
let buffer = AtomicBuffer::with_buffer(initial_state);
let output_tap = buffer.clone();
buffer.map_within(..LEN, 0, |n: u32| n as u8, U32, U8);
buffer.map_within(..LEN, 0, |n: u8| n as u32, U8, U32);
assert_eq!(
output_tap.to_owned().as_texels(U32)[..LEN].to_vec(),
(0..LEN as u32).collect::<Vec<_>>()
);
buffer.map_within(0..LEN, 3 * LEN, |n: u32| n as u8, U32, U8);
buffer.map_within(3 * LEN..4 * LEN, 0, |n: u8| n as u32, U8, U32);
assert_eq!(
output_tap.to_owned().as_texels(U32)[..LEN].to_vec(),
(0..LEN as u32).collect::<Vec<_>>()
);
}
#[test]
fn cell_construction() {
let data = [const { MaxCell::zero() }; 10];
let _empty = cell_buf::new(&data[..0]);
let cell = cell_buf::new(&data);
let (first, tail) = cell.split_at(MAX_ALIGN);
let another_first = cell_buf::new(&data[..1]);
let data: Vec<_> = (0u8..).take(MAX_ALIGN).collect();
U8.store_cell_slice(first.as_texels(U8).as_slice_of_cells(), &data);
let mut alternative: Vec<_> = (1u8..).take(MAX_ALIGN).collect();
U8.load_cell_slice(
another_first.as_texels(U8).as_slice_of_cells(),
&mut alternative,
);
assert_eq!(data, alternative);
U8.load_cell_slice(
tail.truncate(MAX_ALIGN).as_texels(U8).as_slice_of_cells(),
&mut alternative,
);
assert_ne!(data, alternative);
}
#[test]
#[should_panic]
fn cell_unaligned_split() {
let data = [const { MaxCell::zero() }; 10];
cell_buf::new(&data).split_at(1);
}
#[test]
#[should_panic]
fn cell_oob_split() {
let data = [const { MaxCell::zero() }; 1];
cell_buf::new(&data).split_at(MAX_ALIGN + 1);
}
#[test]
fn cell_empty() {
let empty = cell_buf::new(&[]);
assert_eq!(empty.len(), 0);
}
#[test]
fn cell_from_bytes() {
const SIZE: usize = 16;
let data = [0u8; SIZE].map(cell::Cell::new);
let data: AlignMeUp<[_; SIZE]> = AlignMeUp([], data);
let empty = cell_buf::from_bytes(&data.1[..]).expect("this was properly aligned");
assert_eq!(empty.len(), SIZE);
}
#[test]
fn cell_unaligned_from_bytes() {
let data = [const { MaxCell::zero() }; 1];
let unaligned = &cell_buf::new(&data).as_texels(U8).as_slice_of_cells()[1..];
assert!(cell_buf::from_bytes(unaligned).is_none());
}
#[test]
fn cell_from_mut_bytes() {
const SIZE: usize = 16;
let mut data: AlignMeUp<[_; SIZE]> = AlignMeUp([], [0u8; SIZE]);
let empty = cell_buf::from_bytes_mut(&mut data.1[..]).expect("this was properly aligned");
assert_eq!(empty.len(), SIZE);
}
#[test]
fn cell_unaligned_from_mut_bytes() {
const SIZE: usize = 16;
let mut data: AlignMeUp<[_; SIZE]> = AlignMeUp([], [0; SIZE]);
let unaligned = &mut data.1[1..];
assert!(cell_buf::from_bytes_mut(unaligned).is_none());
}
#[test]
fn cell_equality() {
let data = [const { MaxCell::zero() }; 3];
let lhs = cell_buf::new(&data[0..1]);
let rhs = cell_buf::new(&data[1..2]);
let uneq = cell_buf::new(&data[2..3]);
uneq.as_texels(U8).as_slice_of_cells()[0].set(1);
assert!(lhs == lhs, "Must be equal with itself");
assert!(lhs == rhs, "Must be equal with same data");
assert!(lhs != uneq, "Must only be equal with same data");
let mut buffer = [0x42; mem::size_of::<MaxCell>()];
assert!(*lhs != buffer[..], "Must only be equal with its data");
U8.load_cell_slice(lhs.as_texels(U8).as_slice_of_cells(), &mut buffer);
assert!(*lhs == buffer[..], "Must be equal with its data");
}
#[test]
fn atomic_empty() {
let empty = atomic_buf::new(&[]);
assert_eq!(empty.len(), 0);
}
#[test]
fn atomic_construction() {
let data = [const { MaxAtomic::zero() }; 10];
let cell = atomic_buf::new(&data);
let (first, tail) = cell.split_at(MAX_ALIGN);
let another_first = atomic_buf::new(&data[..1]);
assert_eq!(another_first.as_texels(U8).len(), MAX_ALIGN);
assert_eq!(first.as_texels(U8).len(), MAX_ALIGN);
let data: Vec<_> = (0u8..).take(MAX_ALIGN).collect();
first.as_texels(U8).read_from_slice(&data);
let mut alternative: Vec<_> = (1u8..).take(MAX_ALIGN).collect();
another_first.as_texels(U8).write_to_slice(&mut alternative);
assert_eq!(data, alternative);
tail.as_texels(U8)
.index(..MAX_ALIGN)
.write_to_slice(&mut alternative);
assert_ne!(data, alternative);
let another_first = atomic_buf::from_bytes(first.as_texels(U8))
.expect("the whole buffer is always aligned");
another_first.as_texels(U8).write_to_slice(&mut alternative);
assert_eq!(data, alternative);
}
#[test]
fn atomic_from_bytes() {
let data = [const { MaxAtomic::zero() }; 1];
let cell = atomic_buf::new(&data);
let data = cell.as_texels(U8);
let new_buf = atomic_buf::from_bytes(data).expect("this was properly aligned");
assert_eq!(new_buf.len(), MAX_ALIGN);
}
#[test]
fn atomic_unaligned_from_bytes() {
let data = [const { MaxAtomic::zero() }; 1];
let cell = atomic_buf::new(&data);
let unaligned = cell.as_texels(U8).index(1..);
assert!(atomic_buf::from_bytes(unaligned).is_none());
}
#[test]
fn atomic_from_mut_bytes() {
const SIZE: usize = MAX_ALIGN * 2;
let mut data: AlignMeUp<[_; SIZE]> = AlignMeUp([], [0u8; SIZE]);
let empty = atomic_buf::from_bytes_mut(&mut data.1[..]).expect("this was properly aligned");
assert_eq!(empty.len(), SIZE);
}
#[test]
fn atomic_too_small_from_mut_bytes() {
const SIZE: usize = MAX_ALIGN / 2;
let mut data: AlignMeUp<[_; SIZE]> = AlignMeUp([], [0; SIZE]);
let unaligned = &mut data.1[1..];
assert!(atomic_buf::from_bytes_mut(unaligned).is_none());
}
#[test]
fn atomic_unaligned_from_mut_bytes() {
const SIZE: usize = 16;
let mut data: AlignMeUp<[_; SIZE]> = AlignMeUp([], [0; SIZE]);
let unaligned = &mut data.1[1..];
assert!(atomic_buf::from_bytes_mut(unaligned).is_none());
}
#[test]
fn atomic_equality() {
let data = [const { MaxAtomic::zero() }; 3];
let lhs = atomic_buf::new(&data[0..1]);
let rhs = atomic_buf::new(&data[1..2]);
let uneq = atomic_buf::new(&data[2..3]);
U8.store_atomic(uneq.as_texels(U8).index_one(0), 1);
assert!(lhs == lhs, "Must be equal with itself");
assert!(lhs == rhs, "Must be equal with same data");
assert!(lhs != uneq, "Must only be equal with same data");
let mut buffer = [0x42; mem::size_of::<MaxCell>()];
assert!(*lhs != buffer[..], "Must only be equal with its data");
U8.load_atomic_slice(lhs.as_texels(U8), &mut buffer);
assert!(*lhs == buffer[..], "Must be equal with its data");
}
}