use core::mem::{align_of, size_of};
use std::alloc;
use std::alloc::Layout;
use std::ptr::{addr_of_mut, NonNull};
#[cfg(not(feature = "multithreaded"))]
use std::cell::Cell;
#[cfg(feature = "multithreaded")]
use std::sync::atomic::{fence, AtomicUsize, Ordering};
#[cfg(feature = "multithreaded")]
use threadcell::ThreadCell;
use crate::Error;
#[derive(Debug, Copy, Clone)]
#[repr(transparent)]
pub(crate) struct RcString(NonNull<u8>);
unsafe impl Send for RcString {}
unsafe impl Sync for RcString {}
impl RcString {
#[inline]
unsafe fn from_ptr(allocated: *mut u8, layout: Layout) -> Self {
if allocated.is_null() {
alloc::handle_alloc_error(layout);
}
Self(NonNull::new_unchecked(allocated))
}
pub(crate) fn allocate(capacity: usize) -> Self {
unsafe {
let layout = Layout::from_size_align_unchecked(
capacity
.checked_add(RCSTRING_SIZE)
.expect("Capacity overflow"),
RCSTRING_ALIGN,
);
let allocated = Self::from_ptr(alloc::alloc(layout), layout);
let rcstring = allocated.0.as_ptr() as *mut RcStringHeader;
#[cfg(feature = "multithreaded")]
addr_of_mut!((*rcstring).refcount).write(AtomicUsize::new(1usize));
#[cfg(not(feature = "multithreaded"))]
addr_of_mut!((*rcstring).refcount).write(Cell::new(1usize));
#[cfg(feature = "multithreaded")]
addr_of_mut!((*rcstring).len).write(AtomicUsize::new(0));
#[cfg(not(feature = "multithreaded"))]
addr_of_mut!((*rcstring).len).write(Cell::new(0));
addr_of_mut!((*rcstring).capacity).write(layout.size() - RCSTRING_SIZE);
#[cfg(feature = "multithreaded")]
addr_of_mut!((*rcstring).push_lock).write(ThreadCell::new_disowned(()));
allocated
}
}
pub(crate) fn grow(mut self, reserve: usize) -> Self {
debug_assert_eq!(self.strong_count(), 1);
let layout = self.current_layout();
let min_new_size = (self.len_relaxed() + RCSTRING_SIZE)
.checked_add(reserve)
.expect("Capacity overflow");
let new_size = DEFAULT_ALLOCATION_STRATEGY
.grow(layout.size(), min_new_size)
.expect("Capacity overflow");
if new_size > layout.size() {
self = unsafe {
Self::from_ptr(alloc::realloc(self.0.as_ptr(), layout, new_size), layout)
};
self.header_mut().capacity = new_size - RCSTRING_SIZE;
}
self
}
pub(crate) fn shrink(mut self, new_capacity: usize) -> Self {
debug_assert_eq!(self.strong_count(), 1);
let layout = self.current_layout();
let new_size = DEFAULT_ALLOCATION_STRATEGY
.align((self.len_relaxed() + RCSTRING_SIZE).max(new_capacity + RCSTRING_SIZE));
if new_size < layout.size() {
self = unsafe {
Self::from_ptr(alloc::realloc(self.0.as_ptr(), layout, new_size), layout)
};
self.header_mut().capacity = new_size - RCSTRING_SIZE;
}
self
}
unsafe fn from_bytes_unchecked(source: &[u8], reserve: usize) -> Self {
let mut allocated = Self::allocate(
source
.len()
.checked_add(reserve)
.expect("Capacity overflow"),
);
std::ptr::copy_nonoverlapping(source.as_ptr(), allocated.data_mut(), source.len());
allocated.len_set_release(source.len());
allocated
}
#[inline]
pub(crate) fn from_str(source: &str, reserve: usize) -> Self {
unsafe { Self::from_bytes_unchecked(source.as_bytes(), reserve) }
}
#[mutants::skip]
pub(crate) unsafe fn dealloc(self) {
debug_assert_eq!(self.strong_count(), 0);
let layout = self.current_layout();
alloc::dealloc(self.0.as_ptr(), layout);
}
#[inline]
fn current_layout(&self) -> Layout {
unsafe {
Layout::from_size_align_unchecked(
self.header().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
)
}
}
}
impl RcString {
#[inline]
pub(crate) fn push(&mut self, ch: char) {
let mut buf = [0u8; 4];
let bytes = ch.encode_utf8(&mut buf).as_bytes();
assert!(self.spare_capacity() >= bytes.len());
unsafe {
std::ptr::copy_nonoverlapping(
bytes.as_ptr(),
self.data_mut().add(self.header().len_relaxed()),
bytes.len(),
);
}
self.len_add_release(bytes.len());
}
pub(crate) unsafe fn try_push(&self, ch: char) -> Result<(), Error> {
let mut buf = [0u8; 4];
let bytes = ch.encode_utf8(&mut buf).as_bytes();
#[cfg(feature = "multithreaded")]
if !self.header().push_lock.is_owned() {
return Err(Error::ThreadOwnership);
}
if self.spare_capacity() >= bytes.len() {
std::ptr::copy_nonoverlapping(
bytes.as_ptr(),
(self.data() as *mut u8).add(self.header().len_relaxed()),
bytes.len(),
);
self.len_add_release(bytes.len());
Ok(())
} else {
Err(Error::Capacity)
}
}
#[inline]
pub(crate) fn push_str(&mut self, s: &str) {
assert!(self.spare_capacity() >= s.len());
unsafe {
std::ptr::copy_nonoverlapping(
s.as_ptr(),
self.data_mut().add(self.header().len_relaxed()),
s.len(),
);
}
self.len_add_release(s.len());
}
pub(crate) unsafe fn try_push_str(&self, s: &str) -> Result<(), Error> {
#[cfg(feature = "multithreaded")]
if !self.header().push_lock.is_owned() {
return Err(Error::ThreadOwnership);
}
if self.spare_capacity() >= s.len() {
std::ptr::copy_nonoverlapping(
s.as_ptr(),
(self.data() as *mut u8).add(self.header().len_relaxed()),
s.len(),
);
self.len_add_release(s.len());
Ok(())
} else {
Err(Error::Capacity)
}
}
#[cfg(feature = "multithreaded")]
#[inline]
pub(crate) fn acquire(&self) -> bool {
self.header().push_lock.try_acquire_once()
}
#[cfg(feature = "multithreaded")]
#[inline]
pub(crate) fn release(&self) {
self.header().push_lock.release();
}
}
impl RcString {
#[inline]
pub(crate) fn as_str(&self) -> &str {
unsafe {
std::str::from_utf8_unchecked(std::slice::from_raw_parts(
self.data(),
self.len_acquire(),
))
}
}
#[inline]
pub(crate) unsafe fn as_mut_str(&mut self) -> &mut str {
debug_assert_eq!(self.strong_count(), 1);
std::str::from_utf8_unchecked_mut(std::slice::from_raw_parts_mut(
self.data_mut(),
self.len_acquire(),
))
}
}
impl RcString {
#[inline(always)]
fn header(&self) -> &RcStringHeader {
unsafe { std::mem::transmute::<_, &RcStringHeader>(self.0) }
}
#[inline(always)]
fn header_mut(&mut self) -> &mut RcStringHeader {
unsafe { std::mem::transmute::<_, &mut RcStringHeader>(self.0) }
}
#[inline(always)]
fn data(&self) -> *const u8 {
unsafe { self.0.as_ptr().add(RCSTRING_SIZE) }
}
#[inline(always)]
fn data_mut(&mut self) -> *mut u8 {
unsafe { self.0.as_ptr().add(RCSTRING_SIZE) }
}
#[inline(always)]
pub(crate) fn spare_capacity(&self) -> usize {
self.header().spare_capacity()
}
#[inline(always)]
pub(crate) fn capacity(&self) -> usize {
self.header().capacity()
}
#[inline(always)]
pub(crate) fn strong_count(&self) -> usize {
self.header().strong_count()
}
#[inline(always)]
pub(crate) fn increment_strong_count(&self) {
self.header().increment_strong_count();
}
#[inline(always)]
#[must_use = "Would leak when 'true' is ignored"]
pub(crate) fn decrement_strong_count(&self) -> bool {
self.header().decrement_strong_count()
}
#[inline(always)]
#[mutants::skip]
fn len_relaxed(&self) -> usize {
self.header().len_relaxed()
}
#[inline(always)]
fn len_acquire(&self) -> usize {
self.header().len_acquire()
}
#[inline(always)]
fn len_add_release(&self, n: usize) {
self.header().len_add_release(n);
}
#[inline(always)]
fn len_set_release(&self, n: usize) {
self.header().len_set_release(n);
}
}
#[cfg(feature = "multithreaded")]
#[derive(Debug)]
#[repr(C, align(32))]
pub(crate) struct RcStringHeader {
refcount: AtomicUsize,
len: AtomicUsize,
capacity: usize,
push_lock: ThreadCell<()>, }
#[cfg(not(feature = "multithreaded"))]
#[derive(Debug)]
#[repr(C, align(32))]
pub(crate) struct RcStringHeader {
refcount: Cell<usize>,
len: Cell<usize>,
capacity: usize,
}
const RCSTRING_SIZE: usize = size_of::<RcStringHeader>();
const RCSTRING_ALIGN: usize = align_of::<RcStringHeader>();
impl RcStringHeader {
#[cfg(feature = "multithreaded")]
#[inline(always)]
fn len_relaxed(&self) -> usize {
self.len.load(Ordering::Relaxed)
}
#[doc(hidden)]
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
#[inline(always)]
fn len_relaxed(&self) -> usize {
self.len.get()
}
#[cfg(feature = "multithreaded")]
#[inline(always)]
fn len_acquire(&self) -> usize {
self.len.load(Ordering::Acquire)
}
#[doc(hidden)]
#[inline(always)]
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
fn len_acquire(&self) -> usize {
self.len.get()
}
#[inline(always)]
fn len_add_release(&self, n: usize) {
#[cfg(feature = "multithreaded")]
self.len.fetch_add(n, Ordering::Release);
#[cfg(not(feature = "multithreaded"))]
self.len.set(self.len.get() + n);
}
#[inline(always)]
fn len_set_release(&self, n: usize) {
#[cfg(feature = "multithreaded")]
self.len.store(n, Ordering::Release);
#[cfg(not(feature = "multithreaded"))]
self.len.set(n);
}
#[inline]
pub(crate) fn spare_capacity(&self) -> usize {
self.capacity - self.len_acquire()
}
#[inline]
pub(crate) fn capacity(&self) -> usize {
self.capacity
}
#[inline]
#[cfg(feature = "multithreaded")]
pub(crate) fn strong_count(&self) -> usize {
self.refcount.load(Ordering::Relaxed)
}
#[inline]
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
pub(crate) fn strong_count(&self) -> usize {
self.refcount.get()
}
#[cfg(feature = "multithreaded")]
pub(crate) fn increment_strong_count(&self) {
self.refcount.fetch_add(1, Ordering::Relaxed);
}
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
pub(crate) fn increment_strong_count(&self) {
self.refcount.set(self.refcount.get() + 1);
}
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
#[must_use = "Would leak when 'true' is ignored"]
pub(crate) fn decrement_strong_count(&self) -> bool {
let count = self.refcount.get();
self.refcount.set(count - 1);
count == 1
}
#[cfg(feature = "multithreaded")]
#[must_use = "Would leak when 'true' is ignored"]
pub(crate) fn decrement_strong_count(&self) -> bool {
if self.refcount.fetch_sub(1, Ordering::Release) == 1 {
fence(Ordering::Acquire);
true
} else {
false
}
}
}
const KB: usize = 1024;
const MB: usize = KB * 1024;
const GB: usize = MB * 1024;
struct AllocationStrategy {
pub min_allocation: usize,
pub grow_half: usize,
pub grow_const: usize,
}
const DEFAULT_ALLOCATION_STRATEGY: AllocationStrategy = AllocationStrategy {
min_allocation: 32,
grow_half: 8 * MB,
grow_const: GB,
};
impl AllocationStrategy {
#[inline]
pub fn grow(&self, old_size: usize, minimum_needed: usize) -> Option<usize> {
Some(
self.align(
if old_size < self.min_allocation {
self.min_allocation
} else if old_size < self.grow_half {
old_size.checked_mul(2)?
} else if old_size < self.grow_const {
old_size.checked_add(old_size / 2)?
} else {
old_size.checked_add(self.grow_const / 2)?
}
.max(minimum_needed),
),
)
}
#[inline]
pub fn align(&self, size: usize) -> usize {
if size > 0 {
size | (self.min_allocation - 1)
} else {
0
}
}
}