#[cfg(feature = "alloc")]
use core::{marker::PhantomData, ptr::NonNull};
use core::{mem, mem::MaybeUninit, num::NonZero};
use crate::{Flat, emitter::Pos, list::Segment};
pub unsafe trait Buf: Sized {
const ALIGN: usize;
fn empty() -> Self;
fn as_ptr(&self) -> *const u8;
fn as_mut_ptr(&mut self) -> *mut u8;
fn as_bytes(&self) -> &[u8];
fn len(&self) -> u32;
fn is_empty(&self) -> bool {
self.len() == 0
}
fn capacity(&self) -> u32;
fn resize(&mut self, new_len: u32, fill: u8);
fn reserve(&mut self, additional: u32);
fn extend_from_slice(&mut self, data: &[u8]);
fn align_to(&mut self, align: usize) {
let rem = (self.len() as usize) % align;
if rem != 0 {
let pad = (align - rem) as u32;
self.resize(self.len() + pad, 0);
}
}
fn alloc<U: Flat>(&mut self) -> Pos {
const {
assert!(align_of::<U>() <= Self::ALIGN, "allocated type alignment exceeds buffer alignment");
}
self.align_to(align_of::<U>());
let pos = Pos(self.len());
let size = size_of::<U>() as u32;
self.resize(self.len() + size, 0);
pos
}
fn expose_provenance(&self) {
let _ = self.as_ptr().expose_provenance();
}
}
#[repr(C, align(8))]
pub struct FixedBuf<const N: usize> {
data: [MaybeUninit<u8>; N],
len: u32,
}
impl<const N: usize> FixedBuf<N> {
#[must_use]
pub const fn new() -> Self {
Self { data: [MaybeUninit::uninit(); N], len: 0 }
}
}
impl<const N: usize> Default for FixedBuf<N> {
fn default() -> Self {
Self::new()
}
}
unsafe impl<const N: usize> Buf for FixedBuf<N> {
const ALIGN: usize = 8;
fn empty() -> Self {
Self::new()
}
fn as_ptr(&self) -> *const u8 {
self.data.as_ptr().cast()
}
fn as_mut_ptr(&mut self) -> *mut u8 {
self.data.as_mut_ptr().cast()
}
fn as_bytes(&self) -> &[u8] {
if self.len == 0 {
return &[];
}
unsafe { core::slice::from_raw_parts(self.data.as_ptr().cast(), self.len as usize) }
}
fn len(&self) -> u32 {
self.len
}
fn capacity(&self) -> u32 {
N as u32
}
fn resize(&mut self, new_len: u32, fill: u8) {
assert!(new_len as usize <= N, "FixedBuf capacity exceeded: requested {new_len}, capacity {N}");
if new_len > self.len {
unsafe {
core::ptr::write_bytes(
self.data.as_mut_ptr().add(self.len as usize).cast::<u8>(),
fill,
(new_len - self.len) as usize,
);
}
}
self.len = new_len;
}
fn reserve(&mut self, additional: u32) {
let required = self.len.checked_add(additional).expect("capacity overflow");
assert!(required as usize <= N, "FixedBuf capacity exceeded: need {required}, capacity {N}");
}
fn extend_from_slice(&mut self, data: &[u8]) {
let n = data.len() as u32;
self.reserve(n);
unsafe {
core::ptr::copy_nonoverlapping(
data.as_ptr(),
self.data.as_mut_ptr().add(self.len as usize).cast(),
data.len(),
);
}
self.len += n;
}
}
impl<const N: usize> Clone for FixedBuf<N> {
fn clone(&self) -> Self {
let mut new = Self::new();
if self.len > 0 {
unsafe {
core::ptr::copy_nonoverlapping(
self.data.as_ptr(),
new.data.as_mut_ptr(),
self.len as usize,
);
}
new.len = self.len;
}
new
}
}
unsafe impl<const N: usize> Send for FixedBuf<N> {}
unsafe impl<const N: usize> Sync for FixedBuf<N> {}
#[cfg(feature = "alloc")]
pub struct AlignedBuf<T> {
ptr: NonNull<u8>,
len: u32,
cap: u32,
_type: PhantomData<T>,
}
#[cfg(feature = "alloc")]
impl<T> AlignedBuf<T> {
const BUF_ALIGN: usize = if align_of::<T>() >= 8 { align_of::<T>() } else { 8 };
#[must_use]
pub const fn new() -> Self {
let ptr = unsafe { NonNull::new_unchecked(core::ptr::without_provenance_mut(Self::BUF_ALIGN)) };
Self { ptr, len: 0, cap: 0, _type: PhantomData }
}
#[must_use]
pub fn with_capacity(capacity: u32) -> Self {
let mut buf = Self::new();
if capacity > 0 {
buf.reserve(capacity);
}
buf
}
fn grow(&mut self, new_cap: u32) {
debug_assert!(new_cap > self.cap);
let align = Self::BUF_ALIGN;
let new_size = new_cap as usize;
let ptr = if self.cap == 0 {
let layout = alloc::alloc::Layout::from_size_align(new_size, align).expect("invalid layout");
let p = unsafe { alloc::alloc::alloc(layout) };
if p.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
p
} else {
let old_layout =
alloc::alloc::Layout::from_size_align(self.cap as usize, align).expect("invalid layout");
let p = unsafe { alloc::alloc::realloc(self.ptr.as_ptr(), old_layout, new_size) };
if p.is_null() {
alloc::alloc::handle_alloc_error(
alloc::alloc::Layout::from_size_align(new_size, align).expect("invalid layout"),
);
}
p
};
unsafe {
core::ptr::write_bytes(ptr.add(self.cap as usize), 0, (new_cap - self.cap) as usize);
let _ = ptr.expose_provenance();
self.ptr = NonNull::new_unchecked(ptr);
}
self.cap = new_cap;
}
}
#[cfg(feature = "alloc")]
unsafe impl<T> Buf for AlignedBuf<T> {
const ALIGN: usize = Self::BUF_ALIGN;
fn empty() -> Self {
Self::new()
}
fn as_ptr(&self) -> *const u8 {
self.ptr.as_ptr()
}
fn as_mut_ptr(&mut self) -> *mut u8 {
self.ptr.as_ptr()
}
fn as_bytes(&self) -> &[u8] {
if self.len == 0 {
return &[];
}
unsafe { core::slice::from_raw_parts(self.ptr.as_ptr(), self.len as usize) }
}
fn len(&self) -> u32 {
self.len
}
fn capacity(&self) -> u32 {
self.cap
}
fn resize(&mut self, new_len: u32, fill: u8) {
if new_len > self.len {
self.reserve(new_len - self.len);
unsafe {
core::ptr::write_bytes(
self.ptr.as_ptr().add(self.len as usize),
fill,
(new_len - self.len) as usize,
);
}
}
self.len = new_len;
}
fn reserve(&mut self, additional: u32) {
let required = self.len.checked_add(additional).expect("capacity overflow");
if required <= self.cap {
return;
}
let new_cap = required.max(self.cap.saturating_mul(2)).max(64);
self.grow(new_cap);
}
fn extend_from_slice(&mut self, data: &[u8]) {
let n = data.len() as u32;
self.reserve(n);
unsafe {
core::ptr::copy_nonoverlapping(
data.as_ptr(),
self.ptr.as_ptr().add(self.len as usize),
data.len(),
);
}
self.len += n;
}
}
#[cfg(feature = "alloc")]
impl<T> Clone for AlignedBuf<T> {
fn clone(&self) -> Self {
if self.cap == 0 {
return Self::new();
}
let align = Self::BUF_ALIGN;
let layout =
alloc::alloc::Layout::from_size_align(self.cap as usize, align).expect("invalid layout");
let ptr = unsafe {
let p = alloc::alloc::alloc(layout);
if p.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
core::ptr::copy_nonoverlapping(self.ptr.as_ptr(), p, self.len as usize);
let _ = p.expose_provenance();
NonNull::new_unchecked(p)
};
Self { ptr, len: self.len, cap: self.cap, _type: PhantomData }
}
}
#[cfg(feature = "alloc")]
impl<T> Default for AlignedBuf<T> {
fn default() -> Self {
Self::new()
}
}
#[cfg(feature = "alloc")]
impl<T> Drop for AlignedBuf<T> {
fn drop(&mut self) {
if self.cap == 0 {
return; }
let align = Self::BUF_ALIGN;
unsafe {
let layout = alloc::alloc::Layout::from_size_align_unchecked(self.cap as usize, align);
alloc::alloc::dealloc(self.ptr.as_ptr(), layout);
}
}
}
#[cfg(feature = "alloc")]
unsafe impl<T> Send for AlignedBuf<T> {}
#[cfg(feature = "alloc")]
unsafe impl<T> Sync for AlignedBuf<T> {}
pub unsafe fn write_flat<U: Flat>(buf: &mut impl Buf, at: Pos, val: U) {
let start = at.0 as usize;
let size = mem::size_of::<U>();
assert!(
start + size <= buf.len() as usize,
"write_flat out of bounds: {}..{} but len is {}",
start,
start + size,
buf.len()
);
unsafe {
let src = core::ptr::from_ref(&val).cast::<u8>();
core::ptr::copy_nonoverlapping(src, buf.as_mut_ptr().add(start), size);
}
mem::forget(val);
}
pub unsafe fn patch_near(buf: &mut impl Buf, at: Pos, target: Pos) {
let rel = i64::from(target.0) - i64::from(at.0);
let rel_i32: i32 = rel.try_into().expect("near offset overflow");
let nz = NonZero::new(rel_i32).expect("near offset must be non-zero (target == at)");
let start = at.0 as usize;
let size = mem::size_of::<NonZero<i32>>();
assert!(start + size <= buf.len() as usize, "patch_near out of bounds");
unsafe {
let src = core::ptr::from_ref(&nz).cast::<u8>();
core::ptr::copy_nonoverlapping(src, buf.as_mut_ptr().add(start), size);
}
}
pub unsafe fn patch_list_header(buf: &mut impl Buf, at: Pos, target: Pos, len: u32) {
let off_pos = at.0 as usize;
let len_pos = off_pos + mem::size_of::<i32>();
assert!(len_pos + mem::size_of::<u32>() <= buf.len() as usize, "patch_list_header out of bounds");
let rel: i32 = if len == 0 {
0
} else {
let r = i64::from(target.0) - i64::from(at.0);
r.try_into().expect("list header offset overflow")
};
unsafe {
let buf_ptr = buf.as_mut_ptr();
core::ptr::copy_nonoverlapping(
core::ptr::from_ref(&rel).cast::<u8>(),
buf_ptr.add(off_pos),
mem::size_of::<i32>(),
);
core::ptr::copy_nonoverlapping(
core::ptr::from_ref(&len).cast::<u8>(),
buf_ptr.add(len_pos),
mem::size_of::<u32>(),
);
}
}
pub unsafe fn write_bytes(buf: &mut impl Buf, at: Pos, src: *const u8, len: usize) {
let start = at.0 as usize;
assert!(
start + len <= buf.len() as usize,
"write_bytes out of bounds: {}..{} but len is {}",
start,
start + len,
buf.len()
);
unsafe {
core::ptr::copy_nonoverlapping(src, buf.as_mut_ptr().add(start), len);
}
}
pub fn alloc_segment<U: Flat>(buf: &mut impl Buf, count: u32) -> Pos {
buf.align_to(align_of::<Segment<U>>());
let pos = Pos(buf.len());
let values_size = count.checked_mul(size_of::<U>() as u32).expect("segment values overflow");
let total =
(size_of::<Segment<U>>() as u32).checked_add(values_size).expect("segment total size overflow");
buf.resize(buf.len() + total, 0);
let len_offset = pos.0 as usize + size_of::<i32>();
unsafe {
core::ptr::copy_nonoverlapping(
core::ptr::from_ref(&count).cast::<u8>(),
buf.as_mut_ptr().add(len_offset),
size_of::<u32>(),
);
}
pos
}
pub unsafe fn patch_segment_next(buf: &mut impl Buf, seg_pos: Pos, next_seg_pos: Pos) {
let rel = i64::from(next_seg_pos.0) - i64::from(seg_pos.0);
let rel_i32: i32 = rel.try_into().expect("segment next offset overflow");
let start = seg_pos.0 as usize;
assert!(start + mem::size_of::<i32>() <= buf.len() as usize, "patch_segment_next out of bounds");
unsafe {
core::ptr::copy_nonoverlapping(
core::ptr::from_ref(&rel_i32).cast::<u8>(),
buf.as_mut_ptr().add(start),
mem::size_of::<i32>(),
);
}
}