use std::{ptr, slice::SliceIndex, sync::atomic::Ordering};
use portable_atomic::AtomicU8;
use crate::{
Context, JsResult,
builtins::typed_array::{ClampedU8, Element, TypedArrayElement, TypedArrayKind},
object::JsObject,
};
use super::ArrayBuffer;
#[derive(Clone, Copy)]
pub(crate) enum BytesConstPtr {
Bytes(*const u8),
AtomicBytes(*const AtomicU8),
}
#[derive(Clone, Copy)]
pub(crate) enum BytesMutPtr {
Bytes(*mut u8),
AtomicBytes(*const AtomicU8),
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum SliceRef<'a> {
Slice(&'a [u8]),
AtomicSlice(&'a [AtomicU8]),
}
impl SliceRef<'_> {
pub(crate) fn len(&self) -> usize {
match self {
Self::Slice(buf) => buf.len(),
Self::AtomicSlice(buf) => buf.len(),
}
}
pub(crate) fn subslice<I>(&self, index: I) -> SliceRef<'_>
where
I: SliceIndex<[u8], Output = [u8]> + SliceIndex<[AtomicU8], Output = [AtomicU8]>,
{
match self {
Self::Slice(buffer) => SliceRef::Slice(buffer.get(index).expect("index out of bounds")),
Self::AtomicSlice(buffer) => {
SliceRef::AtomicSlice(buffer.get(index).expect("index out of bounds"))
}
}
}
#[cfg(debug_assertions)]
pub(crate) fn addr(&self) -> usize {
match self {
Self::Slice(buf) => buf.as_ptr().addr(),
Self::AtomicSlice(buf) => buf.as_ptr().addr(),
}
}
pub(crate) fn as_ptr(&self) -> BytesConstPtr {
match self {
SliceRef::Slice(s) => BytesConstPtr::Bytes(s.as_ptr()),
SliceRef::AtomicSlice(s) => BytesConstPtr::AtomicBytes(s.as_ptr()),
}
}
pub(crate) unsafe fn get_value(
&self,
kind: TypedArrayKind,
order: Ordering,
) -> TypedArrayElement {
unsafe fn read_elem<T: Element>(buffer: SliceRef<'_>, order: Ordering) -> T {
#[cfg(debug_assertions)]
{
assert!(buffer.len() >= size_of::<T>());
assert_eq!(buffer.addr() % align_of::<T>(), 0);
}
unsafe { T::read(buffer).load(order) }
}
let buffer = *self;
unsafe {
match kind {
TypedArrayKind::Int8 => read_elem::<i8>(buffer, order).into(),
TypedArrayKind::Uint8 => read_elem::<u8>(buffer, order).into(),
TypedArrayKind::Uint8Clamped => read_elem::<ClampedU8>(buffer, order).into(),
TypedArrayKind::Int16 => read_elem::<i16>(buffer, order).into(),
TypedArrayKind::Uint16 => read_elem::<u16>(buffer, order).into(),
TypedArrayKind::Int32 => read_elem::<i32>(buffer, order).into(),
TypedArrayKind::Uint32 => read_elem::<u32>(buffer, order).into(),
TypedArrayKind::BigInt64 => read_elem::<i64>(buffer, order).into(),
TypedArrayKind::BigUint64 => read_elem::<u64>(buffer, order).into(),
#[cfg(feature = "float16")]
TypedArrayKind::Float16 => {
read_elem::<crate::builtins::typed_array::Float16>(buffer, order).into()
}
TypedArrayKind::Float32 => read_elem::<f32>(buffer, order).into(),
TypedArrayKind::Float64 => read_elem::<f64>(buffer, order).into(),
}
}
}
pub(crate) fn clone(&self, context: &mut Context) -> JsResult<JsObject<ArrayBuffer>> {
let target_buffer = ArrayBuffer::allocate(
&context
.realm()
.intrinsics()
.constructors()
.array_buffer()
.constructor()
.into(),
self.len() as u64,
None,
context,
)?;
{
let mut target_buffer = target_buffer.borrow_mut();
let target_block = target_buffer
.data_mut()
.bytes_mut()
.expect("ArrayBuffer cannot be detached here");
unsafe {
memcpy(
self.as_ptr(),
BytesMutPtr::Bytes(target_block.as_mut_ptr()),
self.len(),
);
}
}
Ok(target_buffer)
}
}
impl<'a> From<&'a [u8]> for SliceRef<'a> {
fn from(value: &'a [u8]) -> Self {
Self::Slice(value)
}
}
impl<'a> From<&'a [AtomicU8]> for SliceRef<'a> {
fn from(value: &'a [AtomicU8]) -> Self {
Self::AtomicSlice(value)
}
}
#[derive(Debug)]
pub(crate) enum SliceRefMut<'a> {
Slice(&'a mut [u8]),
AtomicSlice(&'a [AtomicU8]),
}
impl SliceRefMut<'_> {
pub(crate) fn len(&self) -> usize {
match self {
Self::Slice(buf) => buf.len(),
Self::AtomicSlice(buf) => buf.len(),
}
}
#[expect(unused, reason = "could still be useful in the future")]
pub(crate) fn subslice<I>(&self, index: I) -> SliceRef<'_>
where
I: SliceIndex<[u8], Output = [u8]> + SliceIndex<[AtomicU8], Output = [AtomicU8]>,
{
match self {
Self::Slice(buffer) => SliceRef::Slice(buffer.get(index).expect("index out of bounds")),
Self::AtomicSlice(buffer) => {
SliceRef::AtomicSlice(buffer.get(index).expect("index out of bounds"))
}
}
}
pub(crate) fn subslice_mut<I>(&mut self, index: I) -> SliceRefMut<'_>
where
I: SliceIndex<[u8], Output = [u8]> + SliceIndex<[AtomicU8], Output = [AtomicU8]>,
{
match self {
Self::Slice(buffer) => {
SliceRefMut::Slice(buffer.get_mut(index).expect("index out of bounds"))
}
Self::AtomicSlice(buffer) => {
SliceRefMut::AtomicSlice(buffer.get(index).expect("index out of bounds"))
}
}
}
#[cfg(debug_assertions)]
pub(crate) fn addr(&self) -> usize {
match self {
Self::Slice(buf) => buf.as_ptr().addr(),
Self::AtomicSlice(buf) => buf.as_ptr().addr(),
}
}
pub(crate) fn as_ptr(&mut self) -> BytesMutPtr {
match self {
Self::Slice(s) => BytesMutPtr::Bytes(s.as_mut_ptr()),
Self::AtomicSlice(s) => BytesMutPtr::AtomicBytes(s.as_ptr()),
}
}
pub(crate) unsafe fn set_value(&mut self, value: TypedArrayElement, order: Ordering) {
unsafe fn write_elem<T: Element>(buffer: SliceRefMut<'_>, value: T, order: Ordering) {
#[cfg(debug_assertions)]
{
assert!(buffer.len() >= size_of::<T>());
assert_eq!(buffer.addr() % align_of::<T>(), 0);
}
unsafe {
T::read_mut(buffer).store(value, order);
}
}
let buffer = match self {
SliceRefMut::Slice(buf) => SliceRefMut::Slice(buf),
SliceRefMut::AtomicSlice(buf) => SliceRefMut::AtomicSlice(buf),
};
unsafe {
match value {
TypedArrayElement::Int8(e) => write_elem(buffer, e, order),
TypedArrayElement::Uint8(e) => write_elem(buffer, e, order),
TypedArrayElement::Uint8Clamped(e) => write_elem(buffer, e, order),
TypedArrayElement::Int16(e) => write_elem(buffer, e, order),
TypedArrayElement::Uint16(e) => write_elem(buffer, e, order),
TypedArrayElement::Int32(e) => write_elem(buffer, e, order),
TypedArrayElement::Uint32(e) => write_elem(buffer, e, order),
TypedArrayElement::BigInt64(e) => write_elem(buffer, e, order),
TypedArrayElement::BigUint64(e) => write_elem(buffer, e, order),
#[cfg(feature = "float16")]
TypedArrayElement::Float16(e) => write_elem(buffer, e, order),
TypedArrayElement::Float32(e) => write_elem(buffer, e, order),
TypedArrayElement::Float64(e) => write_elem(buffer, e, order),
}
}
}
}
impl<'a> From<&'a mut [u8]> for SliceRefMut<'a> {
fn from(value: &'a mut [u8]) -> Self {
Self::Slice(value)
}
}
impl<'a> From<&'a [AtomicU8]> for SliceRefMut<'a> {
fn from(value: &'a [AtomicU8]) -> Self {
Self::AtomicSlice(value)
}
}
pub(super) unsafe fn copy_shared_to_shared(
src: *const AtomicU8,
dest: *const AtomicU8,
count: usize,
) {
for i in 0..count {
unsafe {
(*dest.add(i)).store((*src.add(i)).load(Ordering::Relaxed), Ordering::Relaxed);
}
}
}
unsafe fn copy_shared_to_shared_backwards(
src: *const AtomicU8,
dest: *const AtomicU8,
count: usize,
) {
for i in (0..count).rev() {
unsafe {
(*dest.add(i)).store((*src.add(i)).load(Ordering::Relaxed), Ordering::Relaxed);
}
}
}
pub(crate) unsafe fn memcpy(src: BytesConstPtr, dest: BytesMutPtr, count: usize) {
match (src, dest) {
(BytesConstPtr::Bytes(src), BytesMutPtr::Bytes(dest)) => unsafe {
ptr::copy_nonoverlapping(src, dest, count);
},
(BytesConstPtr::Bytes(src), BytesMutPtr::AtomicBytes(dest)) => unsafe {
for i in 0..count {
(*dest.add(i)).store(*src.add(i), Ordering::Relaxed);
}
},
(BytesConstPtr::AtomicBytes(src), BytesMutPtr::Bytes(dest)) => unsafe {
for i in 0..count {
*dest.add(i) = (*src.add(i)).load(Ordering::Relaxed);
}
},
(BytesConstPtr::AtomicBytes(src), BytesMutPtr::AtomicBytes(dest)) => unsafe {
copy_shared_to_shared(src, dest, count);
},
}
}
pub(crate) unsafe fn memmove_naive(ptr: BytesMutPtr, from: usize, to: usize, count: usize) {
match ptr {
BytesMutPtr::Bytes(ptr) => unsafe {
for i in 0..count {
ptr::copy(ptr.add(from + i), ptr.add(to + i), 1);
}
},
BytesMutPtr::AtomicBytes(ptr) => unsafe {
let src = ptr.add(from);
let dest = ptr.add(to);
copy_shared_to_shared(src, dest, count);
},
}
}
pub(crate) unsafe fn memmove(ptr: BytesMutPtr, from: usize, to: usize, count: usize) {
match ptr {
BytesMutPtr::Bytes(ptr) => unsafe {
let src = ptr.add(from);
let dest = ptr.add(to);
ptr::copy(src, dest, count);
},
BytesMutPtr::AtomicBytes(ptr) => unsafe {
let src = ptr.add(from);
let dest = ptr.add(to);
if src < dest {
copy_shared_to_shared_backwards(src, dest, count);
} else {
copy_shared_to_shared(src, dest, count);
}
},
}
}