use core::mem::{align_of, size_of};
use std::alloc;
use std::alloc::Layout;
use std::ptr::NonNull;
#[cfg(feature = "singlethreaded")]
use std::cell::Cell;
#[cfg(not(feature = "singlethreaded"))]
use std::sync::atomic::{AtomicUsize, Ordering};
macro_rules! POKE {
($dest:ident.$member:ident = $val:expr) => {
core::ptr::write(&mut (*$dest).$member, $val);
};
}
#[derive(Debug)]
#[repr(C, align(32))]
pub(crate) struct RcString {
#[cfg(not(feature = "singlethreaded"))]
refcount: AtomicUsize,
#[cfg(feature = "singlethreaded")]
refcount: Cell<usize>,
len: usize,
capacity: usize,
data: [u8; 0],
}
const RCSTRING_SIZE: usize = size_of::<RcString>();
const RCSTRING_ALIGN: usize = align_of::<RcString>();
impl RcString {
pub(crate) fn allocate(capacity: usize) -> Option<NonNull<Self>> {
unsafe {
let layout = Layout::from_size_align_unchecked(
capacity
.checked_add(RCSTRING_SIZE)
.expect("Capacity overflow"),
RCSTRING_ALIGN,
);
let rcstring = alloc::alloc(layout) as *mut Self;
if rcstring.is_null() {
return None;
}
#[cfg(not(feature = "singlethreaded"))]
POKE!(rcstring.refcount = AtomicUsize::new(1usize));
#[cfg(feature = "singlethreaded")]
POKE!(rcstring.refcount = Cell::new(1usize));
POKE!(rcstring.len = 0);
POKE!(rcstring.capacity = layout.size() - RCSTRING_SIZE);
Some(NonNull::new_unchecked(rcstring))
}
}
pub(crate) fn grow(this: NonNull<Self>, reserve: usize) -> Option<NonNull<Self>> {
unsafe {
debug_assert_eq!(this.as_ref().strong_count(), 1);
let layout = Layout::from_size_align_unchecked(
this.as_ref().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
);
let min_new_size = (this.as_ref().len + RCSTRING_SIZE)
.checked_add(reserve)
.expect("Capacity overflow");
let new_size = DEFAULT_ALLOCATION_STRATEGY
.grow(layout.size(), min_new_size)
.expect("Capacity overflow");
let rcstring = alloc::realloc(this.as_ptr() as *mut u8, layout, new_size) as *mut Self;
if rcstring.is_null() {
return None;
}
POKE!(rcstring.capacity = new_size - RCSTRING_SIZE);
Some(NonNull::new_unchecked(rcstring))
}
}
pub(crate) fn shrink(this: NonNull<Self>, new_capacity: usize) -> Option<NonNull<Self>> {
unsafe {
debug_assert_eq!(this.as_ref().strong_count(), 1);
let layout = Layout::from_size_align_unchecked(
this.as_ref().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
);
let new_size = DEFAULT_ALLOCATION_STRATEGY
.align((this.as_ref().len + RCSTRING_SIZE).max(new_capacity + RCSTRING_SIZE));
if new_size < layout.size() {
let rcstring =
alloc::realloc(this.as_ptr() as *mut u8, layout, new_size) as *mut Self;
if rcstring.is_null() {
return None;
}
POKE!(rcstring.capacity = new_size - RCSTRING_SIZE);
Some(NonNull::new_unchecked(rcstring))
} else {
Some(this)
}
}
}
pub(crate) unsafe fn dealloc(this: NonNull<Self>) {
debug_assert_eq!(this.as_ref().strong_count(), 0);
let layout = Layout::from_size_align_unchecked(
this.as_ref().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
);
alloc::dealloc(this.as_ptr() as *mut u8, layout);
}
unsafe fn from_bytes_unchecked(source: &[u8], reserve: usize) -> Option<NonNull<Self>> {
let mut rcstring = Self::allocate(
source
.len()
.checked_add(reserve)
.expect("Capacity overflow"),
)?;
std::ptr::copy_nonoverlapping(
source.as_ptr(),
rcstring.as_mut().data.as_mut_ptr(),
source.len(),
);
rcstring.as_mut().len = source.len();
Some(rcstring)
}
#[inline]
pub(crate) fn from_str(source: &str, reserve: usize) -> Option<NonNull<Self>> {
unsafe { Self::from_bytes_unchecked(source.as_bytes(), reserve) }
}
#[inline]
pub(crate) fn push(&mut self, ch: char) {
let mut buf = [0u8; 4];
let bytes = ch.encode_utf8(&mut buf).as_bytes();
assert!(self.len + bytes.len() <= self.capacity);
unsafe {
std::ptr::copy_nonoverlapping(
bytes.as_ptr(),
self.data.as_mut_ptr().add(self.len),
bytes.len(),
);
}
self.len += bytes.len();
}
#[inline]
pub(crate) fn push_str(&mut self, s: &str) {
assert!(self.len + s.len() <= self.capacity);
unsafe {
std::ptr::copy_nonoverlapping(
s.as_ptr(),
self.data.as_mut_ptr().add(self.len),
s.len(),
);
}
self.len += s.len();
}
#[inline]
pub(crate) fn as_str(&self) -> &str {
unsafe {
std::str::from_utf8_unchecked(std::slice::from_raw_parts(self.data.as_ptr(), self.len))
}
}
#[inline]
pub(crate) fn as_mut_str(&mut self) -> &mut str {
unsafe {
std::str::from_utf8_unchecked_mut(std::slice::from_raw_parts_mut(
self.data.as_mut_ptr(),
self.len,
))
}
}
#[inline]
pub(crate) fn spare_capacity(&self) -> usize {
self.capacity - self.len
}
#[inline]
pub(crate) fn capacity(&self) -> usize {
self.capacity
}
#[inline]
#[cfg(not(feature = "singlethreaded"))]
pub(crate) fn strong_count(&self) -> usize {
self.refcount.load(Ordering::Relaxed)
}
#[inline]
#[cfg(feature = "singlethreaded")]
pub(crate) fn strong_count(&self) -> usize {
self.refcount.get()
}
#[cfg(not(feature = "singlethreaded"))]
pub(crate) fn increment_strong_count(&self) {
self.refcount.fetch_add(1, Ordering::Relaxed);
}
#[cfg(feature = "singlethreaded")]
pub(crate) fn increment_strong_count(&self) {
self.refcount.set(self.refcount.get() + 1);
}
#[cfg(feature = "singlethreaded")]
pub(crate) fn decrement_strong_count(&self) -> usize {
let count = self.refcount.get() - 1;
self.refcount.set(count);
count
}
#[cfg(not(feature = "singlethreaded"))]
pub(crate) fn decrement_strong_count(&self) -> usize {
self.refcount.fetch_sub(1, Ordering::Release) - 1
}
}
const KB: usize = 1024;
const MB: usize = KB * 1024;
const GB: usize = MB * 1024;
struct AllocationStrategy {
pub min_allocation: usize,
pub grow_half: usize,
pub grow_const: usize,
}
const DEFAULT_ALLOCATION_STRATEGY: AllocationStrategy = AllocationStrategy {
min_allocation: 32,
grow_half: 8 * MB,
grow_const: GB,
};
impl AllocationStrategy {
#[inline]
pub fn grow(&self, old_size: usize, minimum_needed: usize) -> Option<usize> {
Some(
self.align(
if old_size < self.min_allocation {
self.min_allocation
} else if old_size < self.grow_half {
old_size.checked_mul(2)?
} else if old_size < self.grow_const {
old_size.checked_add(old_size / 2)?
} else {
old_size.checked_add(self.grow_const / 2)?
}
.max(minimum_needed),
),
)
}
#[inline]
pub fn align(&self, size: usize) -> usize {
if size > 0 {
size | (self.min_allocation - 1)
} else {
0
}
}
}