extern crate alloc;
use crate::{
alloc::{GLOBAL_ALLOCATOR, NSTD_ALLOCATOR},
core::{
alloc::{
nstd_core_alloc_layout_array, nstd_core_alloc_layout_array_unchecked,
NSTDAllocError::{self, NSTD_ALLOC_ERROR_NONE},
NSTDAllocator,
},
def::{NSTDByte, NSTDErrorCode},
mem::{nstd_core_mem_copy, nstd_core_mem_copy_overlapping, nstd_core_mem_dangling_mut},
optional::NSTDOptional,
slice::{
nstd_core_slice_align, nstd_core_slice_as_ptr, nstd_core_slice_len,
nstd_core_slice_mut_new_unchecked, nstd_core_slice_new_unchecked,
nstd_core_slice_stride, NSTDSlice, NSTDSliceMut,
},
},
NSTDAny, NSTDAnyMut, NSTDBool, NSTDUInt, NSTD_NULL,
};
use alloc::vec::Vec;
use core::ptr::addr_of;
use nstdapi::nstdapi;
#[nstdapi]
pub struct NSTDVec<'a> {
allocator: &'a NSTDAllocator,
ptr: NSTDAnyMut,
stride: NSTDUInt,
align: NSTDUInt,
len: NSTDUInt,
cap: NSTDUInt,
}
impl<'a> NSTDVec<'a> {
#[allow(dead_code)]
pub(crate) fn from_vec<T>(vec: Vec<T>) -> NSTDVec<'a> {
let cap = vec.capacity();
let data = vec.leak();
NSTDVec {
allocator: &GLOBAL_ALLOCATOR,
ptr: data.as_mut_ptr().cast(),
stride: core::mem::size_of::<T>(),
align: core::mem::align_of::<T>(),
len: data.len(),
cap,
}
}
#[inline]
const fn has_allocated(&self) -> NSTDBool {
self.cap > 0
}
#[inline]
#[allow(clippy::arithmetic_side_effects)]
const fn byte_len(&self) -> usize {
self.len * self.stride
}
#[inline]
#[allow(clippy::arithmetic_side_effects)]
const fn buffer_byte_len(&self) -> usize {
self.cap * self.stride
}
#[inline]
#[allow(dead_code)]
pub(crate) unsafe fn as_slice<T>(&self) -> &[T] {
assert!(self.stride == core::mem::size_of::<T>());
core::slice::from_raw_parts(self.ptr as _, self.len)
}
#[inline]
fn end(&mut self) -> NSTDAnyMut {
unsafe { self.ptr.add(self.byte_len()) }
}
#[inline]
fn try_reserve(&mut self) -> NSTDAllocError {
if self.len == self.cap {
#[allow(clippy::arithmetic_side_effects)]
let additional = 1 + self.cap / 2;
#[allow(unused_unsafe)]
return unsafe { nstd_vec_reserve(self, additional) };
}
NSTD_ALLOC_ERROR_NONE
}
}
impl Drop for NSTDVec<'_> {
#[inline]
fn drop(&mut self) {
let buffer_len = self.buffer_byte_len();
if buffer_len > 0 {
let layout = unsafe {
nstd_core_alloc_layout_array_unchecked(self.stride, self.align, self.cap)
};
unsafe { (self.allocator.deallocate)(self.allocator.state, self.ptr, layout) };
}
}
}
impl<A> FromIterator<A> for NSTDVec<'_> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
let size = core::mem::size_of::<A>();
let align = core::mem::align_of::<A>();
#[allow(unused_unsafe)]
let mut s = unsafe { nstd_vec_new(&NSTD_ALLOCATOR, size, align) };
let mut errc;
for v in iter {
errc = unsafe { nstd_vec_push(&mut s, addr_of!(v).cast()) };
assert!(errc == NSTD_ALLOC_ERROR_NONE);
core::mem::forget(v);
}
s
}
}
unsafe impl Send for NSTDVec<'_> {}
unsafe impl Sync for NSTDVec<'_> {}
pub type NSTDOptionalVec<'a> = NSTDOptional<NSTDVec<'a>>;
#[inline]
#[nstdapi]
#[allow(clippy::arithmetic_side_effects)]
pub const fn nstd_vec_new(
allocator: &NSTDAllocator,
stride: NSTDUInt,
align: NSTDUInt,
) -> NSTDVec<'_> {
assert!(crate::core::mem::is_power_of_two(align) && stride % align == 0);
NSTDVec {
allocator,
ptr: nstd_core_mem_dangling_mut(),
stride,
align,
cap: 0,
len: 0,
}
}
#[nstdapi]
pub fn nstd_vec_new_with_cap(
allocator: &NSTDAllocator,
stride: NSTDUInt,
align: NSTDUInt,
cap: NSTDUInt,
) -> NSTDOptionalVec<'_> {
if stride == 0 || cap == 0 {
#[allow(clippy::arithmetic_side_effects)]
if crate::core::mem::is_power_of_two(align) && stride % align == 0 {
return NSTDOptional::Some(NSTDVec {
allocator,
ptr: nstd_core_mem_dangling_mut(),
stride,
align,
cap,
len: 0,
});
}
}
else if let NSTDOptional::Some(layout) = nstd_core_alloc_layout_array(stride, align, cap) {
let ptr = unsafe { (allocator.allocate)(allocator.state, layout) };
if !ptr.is_null() {
return NSTDOptional::Some(NSTDVec {
allocator,
ptr,
stride,
align,
cap,
len: 0,
});
}
}
NSTDOptional::None
}
#[nstdapi]
pub unsafe fn nstd_vec_from_slice<'a>(
allocator: &'a NSTDAllocator,
slice: &NSTDSlice,
) -> NSTDOptionalVec<'a> {
let stride = nstd_core_slice_stride(slice);
let align = nstd_core_slice_align(slice);
let len = nstd_core_slice_len(slice);
#[allow(clippy::arithmetic_side_effects)]
if len > 0 {
if let NSTDOptional::Some(mut vec) = nstd_vec_new_with_cap(allocator, stride, align, len) {
let bytes = len * stride;
nstd_core_mem_copy(vec.ptr.cast(), nstd_core_slice_as_ptr(slice).cast(), bytes);
vec.len = len;
return NSTDOptional::Some(vec);
}
}
NSTDOptional::Some(NSTDVec {
allocator,
ptr: nstd_core_mem_dangling_mut(),
stride,
align,
cap: 0,
len: 0,
})
}
#[nstdapi]
pub fn nstd_vec_clone<'a>(vec: &NSTDVec<'a>) -> NSTDOptionalVec<'a> {
if vec.len > 0 {
let NSTDOptional::Some(mut cloned) =
nstd_vec_new_with_cap(vec.allocator, vec.stride, vec.align, vec.len)
else {
return NSTDOptional::None;
};
unsafe { nstd_core_mem_copy(cloned.ptr.cast(), vec.ptr.cast(), vec.byte_len()) };
cloned.len = vec.len;
NSTDOptional::Some(cloned)
} else {
NSTDOptional::Some(nstd_vec_new(vec.allocator, vec.stride, vec.align))
}
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_allocator<'a>(vec: &NSTDVec<'a>) -> &'a NSTDAllocator {
vec.allocator
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_len(vec: &NSTDVec<'_>) -> NSTDUInt {
vec.len
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_cap(vec: &NSTDVec<'_>) -> NSTDUInt {
vec.cap
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_stride(vec: &NSTDVec<'_>) -> NSTDUInt {
vec.stride
}
#[inline]
#[nstdapi]
#[allow(clippy::arithmetic_side_effects)]
pub const fn nstd_vec_reserved(vec: &NSTDVec<'_>) -> NSTDUInt {
vec.cap - vec.len
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_as_slice(vec: &NSTDVec<'_>) -> NSTDSlice {
unsafe { nstd_core_slice_new_unchecked(vec.ptr, vec.stride, vec.align, vec.len) }
}
#[inline]
#[nstdapi]
pub fn nstd_vec_as_slice_mut(vec: &mut NSTDVec<'_>) -> NSTDSliceMut {
unsafe { nstd_core_slice_mut_new_unchecked(vec.ptr, vec.stride, vec.align, vec.len) }
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_as_ptr(vec: &NSTDVec<'_>) -> NSTDAny {
vec.ptr
}
#[inline]
#[nstdapi]
pub fn nstd_vec_as_ptr_mut(vec: &mut NSTDVec<'_>) -> NSTDAnyMut {
vec.ptr
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_end(vec: &NSTDVec<'_>) -> NSTDAny {
unsafe { vec.ptr.add(vec.byte_len()) }
}
#[inline]
#[nstdapi]
pub fn nstd_vec_end_mut(vec: &mut NSTDVec<'_>) -> NSTDAnyMut {
unsafe { vec.ptr.add(vec.byte_len()) }
}
#[inline]
#[nstdapi]
pub const fn nstd_vec_get(vec: &NSTDVec<'_>, mut pos: NSTDUInt) -> NSTDAny {
#[allow(clippy::arithmetic_side_effects)]
if pos < vec.len {
pos *= vec.stride;
return unsafe { vec.ptr.add(pos) };
}
NSTD_NULL
}
#[inline]
#[nstdapi]
pub fn nstd_vec_get_mut(vec: &mut NSTDVec<'_>, pos: NSTDUInt) -> NSTDAnyMut {
nstd_vec_get(vec, pos).cast_mut()
}
#[inline]
#[nstdapi]
pub unsafe fn nstd_vec_push(vec: &mut NSTDVec<'_>, value: NSTDAny) -> NSTDAllocError {
let errc = vec.try_reserve();
if errc == NSTD_ALLOC_ERROR_NONE {
nstd_core_mem_copy(vec.end().cast(), value.cast(), vec.stride);
vec.len = match vec.len.checked_add(1) {
Some(len) => len,
_ => return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT,
};
}
errc
}
#[inline]
#[nstdapi]
pub fn nstd_vec_pop(vec: &mut NSTDVec<'_>) -> NSTDAny {
#[allow(clippy::arithmetic_side_effects)]
if vec.len > 0 {
vec.len -= 1;
return vec.end();
}
NSTD_NULL
}
#[nstdapi]
pub unsafe fn nstd_vec_insert(
vec: &mut NSTDVec<'_>,
value: NSTDAny,
mut index: NSTDUInt,
) -> NSTDErrorCode {
if index > vec.len {
1
}
else if vec.try_reserve() != NSTD_ALLOC_ERROR_NONE {
2
}
else {
#[allow(clippy::arithmetic_side_effects)]
if vec.stride > 0 {
let stride = vec.stride;
let bytes_to_copy = (vec.len - index) * stride;
index *= stride;
let idxptr = vec.ptr.add(index).cast::<NSTDByte>();
let dest = idxptr.add(stride);
nstd_core_mem_copy_overlapping(dest, idxptr, bytes_to_copy);
nstd_core_mem_copy(idxptr, value.cast(), stride);
vec.len += 1;
} else {
vec.len = match vec.len.checked_add(1) {
Some(len) => len,
_ => return 2,
};
}
0
}
}
#[nstdapi]
pub fn nstd_vec_remove(vec: &mut NSTDVec<'_>, mut index: NSTDUInt) -> NSTDErrorCode {
#[allow(clippy::arithmetic_side_effects)]
if index < vec.len {
if vec.stride > 0 {
let stride = vec.stride;
let bytes_to_copy = (vec.len - index - 1) * stride;
index *= stride;
unsafe {
let idxptr = vec.ptr.add(index).cast::<NSTDByte>();
let src = idxptr.add(stride);
nstd_core_mem_copy_overlapping(idxptr, src, bytes_to_copy);
}
}
vec.len -= 1;
0
} else {
1
}
}
#[nstdapi]
pub unsafe fn nstd_vec_extend(vec: &mut NSTDVec<'_>, values: &NSTDSlice) -> NSTDAllocError {
assert!(vec.stride == nstd_core_slice_stride(values));
let len = nstd_core_slice_len(values);
let mut errc = NSTD_ALLOC_ERROR_NONE;
let reserved = nstd_vec_reserved(vec);
if reserved < len {
#[allow(clippy::arithmetic_side_effects)]
let additional = len - reserved;
errc = nstd_vec_reserve(vec, additional);
}
if errc == NSTD_ALLOC_ERROR_NONE {
let ptr = nstd_core_slice_as_ptr(values).cast();
nstd_core_mem_copy(vec.end().cast(), ptr, values.byte_len());
vec.len = match vec.len.checked_add(len) {
Some(len) => len,
_ => return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT,
};
}
errc
}
#[inline]
#[nstdapi]
pub fn nstd_vec_truncate(vec: &mut NSTDVec<'_>, len: NSTDUInt) {
if vec.len > len {
vec.len = len;
}
}
#[inline]
#[nstdapi]
pub unsafe fn nstd_vec_set_len(vec: &mut NSTDVec<'_>, len: NSTDUInt) {
vec.len = len;
}
#[nstdapi]
pub fn nstd_vec_reserve(vec: &mut NSTDVec<'_>, size: NSTDUInt) -> NSTDAllocError {
let Some(bytes_to_alloc) = size.checked_mul(vec.stride) else {
return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT;
};
if bytes_to_alloc == 0 {
vec.cap = match vec.cap.checked_add(size) {
Some(cap) => cap,
_ => return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT,
};
return NSTD_ALLOC_ERROR_NONE;
}
if vec.has_allocated() {
let Some(new_cap) = vec.cap.checked_add(size) else {
return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT;
};
let new_layout = match nstd_core_alloc_layout_array(vec.stride, vec.align, new_cap) {
NSTDOptional::Some(new_layout) => new_layout,
NSTDOptional::None => return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT,
};
let old_layout =
unsafe { nstd_core_alloc_layout_array_unchecked(vec.stride, vec.align, vec.cap) };
let errc = unsafe {
(vec.allocator.reallocate)(vec.allocator.state, &mut vec.ptr, old_layout, new_layout)
};
if errc == NSTD_ALLOC_ERROR_NONE {
vec.cap = new_cap;
}
errc
} else {
let layout = match nstd_core_alloc_layout_array(vec.stride, vec.align, size) {
NSTDOptional::Some(layout) => layout,
NSTDOptional::None => return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT,
};
let mem = unsafe { (vec.allocator.allocate)(vec.allocator.state, layout) };
if !mem.is_null() {
vec.ptr = mem;
vec.cap = size;
return NSTD_ALLOC_ERROR_NONE;
}
NSTDAllocError::NSTD_ALLOC_ERROR_OUT_OF_MEMORY
}
}
#[nstdapi]
pub fn nstd_vec_shrink(vec: &mut NSTDVec<'_>) -> NSTDAllocError {
if vec.cap > vec.len {
let new_cap = vec.len.max(1);
if vec.stride > 0 {
let new_layout = match nstd_core_alloc_layout_array(vec.stride, vec.align, new_cap) {
NSTDOptional::Some(new_layout) => new_layout,
NSTDOptional::None => return NSTDAllocError::NSTD_ALLOC_ERROR_INVALID_LAYOUT,
};
let old_layout =
unsafe { nstd_core_alloc_layout_array_unchecked(vec.stride, vec.align, vec.cap) };
let errc = unsafe {
(vec.allocator.reallocate)(
vec.allocator.state,
&mut vec.ptr,
old_layout,
new_layout,
)
};
if errc == NSTD_ALLOC_ERROR_NONE {
vec.cap = new_cap;
}
return errc;
}
vec.cap = new_cap;
}
NSTD_ALLOC_ERROR_NONE
}
#[inline]
#[nstdapi]
pub fn nstd_vec_clear(vec: &mut NSTDVec<'_>) {
vec.len = 0;
}
#[inline]
#[nstdapi]
#[allow(
unused_variables,
clippy::missing_const_for_fn,
clippy::needless_pass_by_value
)]
pub fn nstd_vec_free(vec: NSTDVec<'_>) {}
#[nstdapi]
pub unsafe fn nstd_vec_drop(mut vec: NSTDVec<'_>, callback: unsafe extern "C" fn(NSTDAnyMut)) {
let mut ptr = nstd_vec_as_ptr_mut(&mut vec);
let end = nstd_vec_end_mut(&mut vec);
while ptr < end {
callback(ptr);
ptr = ptr.add(vec.stride);
}
}