use core::mem::{align_of, size_of};
use std::alloc;
use std::alloc::Layout;
use std::ptr::{addr_of_mut, NonNull};
#[cfg(not(feature = "multithreaded"))]
use std::cell::Cell;
#[cfg(feature = "multithreaded")]
use std::sync::atomic::{fence, AtomicUsize, Ordering};
#[cfg(feature = "multithreaded")]
use parking_lot::Mutex;
use crate::CapacityError;
#[cfg(feature = "multithreaded")]
#[derive(Debug)]
#[repr(C, align(32))]
pub(crate) struct RcString {
refcount: AtomicUsize,
len: AtomicUsize,
capacity: usize,
push_lock: Mutex<()>, data: [u8; 0],
}
#[cfg(not(feature = "multithreaded"))]
#[derive(Debug)]
#[repr(C, align(32))]
pub(crate) struct RcString {
refcount: Cell<usize>,
len: Cell<usize>,
capacity: usize,
data: [u8; 0],
}
const RCSTRING_SIZE: usize = size_of::<RcString>();
const RCSTRING_ALIGN: usize = align_of::<RcString>();
impl RcString {
pub(crate) fn allocate(capacity: usize) -> NonNull<Self> {
unsafe {
let layout = Layout::from_size_align_unchecked(
capacity
.checked_add(RCSTRING_SIZE)
.expect("Capacity overflow"),
RCSTRING_ALIGN,
);
let rcstring = alloc::alloc(layout) as *mut Self;
if rcstring.is_null() {
alloc::handle_alloc_error(layout);
}
#[cfg(feature = "multithreaded")]
addr_of_mut!((*rcstring).refcount).write(AtomicUsize::new(1usize));
#[cfg(not(feature = "multithreaded"))]
addr_of_mut!((*rcstring).refcount).write(Cell::new(1usize));
#[cfg(feature = "multithreaded")]
addr_of_mut!((*rcstring).len).write(AtomicUsize::new(0));
#[cfg(not(feature = "multithreaded"))]
addr_of_mut!((*rcstring).len).write(Cell::new(0));
addr_of_mut!((*rcstring).capacity).write(layout.size() - RCSTRING_SIZE);
#[cfg(feature = "multithreaded")]
addr_of_mut!((*rcstring).push_lock).write(Mutex::new(()));
NonNull::new_unchecked(rcstring)
}
}
pub(crate) fn grow(this: NonNull<Self>, reserve: usize) -> NonNull<Self> {
unsafe {
debug_assert_eq!(this.as_ref().strong_count(), 1);
let layout = Layout::from_size_align_unchecked(
this.as_ref().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
);
let min_new_size = (this.as_ref().len_relaxed() + RCSTRING_SIZE)
.checked_add(reserve)
.expect("Capacity overflow");
let new_size = DEFAULT_ALLOCATION_STRATEGY
.grow(layout.size(), min_new_size)
.expect("Capacity overflow");
let rcstring = alloc::realloc(this.as_ptr() as *mut u8, layout, new_size) as *mut Self;
if rcstring.is_null() {
alloc::handle_alloc_error(layout);
}
addr_of_mut!((*rcstring).capacity).write(new_size - RCSTRING_SIZE);
NonNull::new_unchecked(rcstring)
}
}
pub(crate) fn shrink(this: NonNull<Self>, new_capacity: usize) -> NonNull<Self> {
unsafe {
debug_assert_eq!(this.as_ref().strong_count(), 1);
let layout = Layout::from_size_align_unchecked(
this.as_ref().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
);
let new_size = DEFAULT_ALLOCATION_STRATEGY.align(
(this.as_ref().len_relaxed() + RCSTRING_SIZE).max(new_capacity + RCSTRING_SIZE),
);
if new_size < layout.size() {
let rcstring =
alloc::realloc(this.as_ptr() as *mut u8, layout, new_size) as *mut Self;
if rcstring.is_null() {
let layout = Layout::from_size_align_unchecked(new_size, RCSTRING_ALIGN);
alloc::handle_alloc_error(layout);
}
addr_of_mut!((*rcstring).capacity).write(new_size - RCSTRING_SIZE);
NonNull::new_unchecked(rcstring)
} else {
this
}
}
}
#[mutants::skip]
pub(crate) unsafe fn dealloc(this: NonNull<Self>) {
debug_assert_eq!(this.as_ref().strong_count(), 0);
let layout = Layout::from_size_align_unchecked(
this.as_ref().capacity + RCSTRING_SIZE,
RCSTRING_ALIGN,
);
alloc::dealloc(this.as_ptr() as *mut u8, layout);
}
unsafe fn from_bytes_unchecked(source: &[u8], reserve: usize) -> NonNull<Self> {
let mut rcstring = Self::allocate(
source
.len()
.checked_add(reserve)
.expect("Capacity overflow"),
);
std::ptr::copy_nonoverlapping(
source.as_ptr(),
rcstring.as_mut().data.as_mut_ptr(),
source.len(),
);
rcstring.as_mut().len_set_release(source.len());
rcstring
}
#[inline]
pub(crate) fn from_str(source: &str, reserve: usize) -> NonNull<Self> {
unsafe { Self::from_bytes_unchecked(source.as_bytes(), reserve) }
}
#[inline]
pub(crate) fn push(&mut self, ch: char) {
let mut buf = [0u8; 4];
let bytes = ch.encode_utf8(&mut buf).as_bytes();
assert!(self.spare_capacity() >= bytes.len());
unsafe {
std::ptr::copy_nonoverlapping(
bytes.as_ptr(),
self.data.as_mut_ptr().add(self.len_relaxed()),
bytes.len(),
);
}
self.len_add_release(bytes.len());
}
pub(crate) unsafe fn try_push_locked(&self, ch: char) -> Result<(), CapacityError> {
let mut buf = [0u8; 4];
let bytes = ch.encode_utf8(&mut buf).as_bytes();
#[cfg(feature = "multithreaded")]
let _lock = self.push_lock.lock();
if self.spare_capacity() >= bytes.len() {
std::ptr::copy_nonoverlapping(
bytes.as_ptr(),
(self.data.as_ptr() as *mut u8).add(self.len_relaxed()),
bytes.len(),
);
self.len_add_release(bytes.len());
Ok(())
} else {
Err(CapacityError)
}
}
#[inline]
pub(crate) fn push_str(&mut self, s: &str) {
assert!(self.spare_capacity() >= s.len());
unsafe {
std::ptr::copy_nonoverlapping(
s.as_ptr(),
self.data.as_mut_ptr().add(self.len_relaxed()),
s.len(),
);
}
self.len_add_release(s.len());
}
pub(crate) unsafe fn try_push_str_locked(&self, s: &str) -> Result<(), CapacityError> {
#[cfg(feature = "multithreaded")]
let _lock = self.push_lock.lock();
if self.spare_capacity() >= s.len() {
std::ptr::copy_nonoverlapping(
s.as_ptr(),
(self.data.as_ptr() as *mut u8).add(self.len_relaxed()),
s.len(),
);
self.len_add_release(s.len());
Ok(())
} else {
Err(CapacityError)
}
}
#[cfg(feature = "multithreaded")]
#[inline(always)]
fn len_relaxed(&self) -> usize {
self.len.load(Ordering::Relaxed)
}
#[doc(hidden)]
#[cfg(not(feature = "multithreaded"))]
#[inline(always)]
fn len_relaxed(&self) -> usize {
self.len.get()
}
#[cfg(feature = "multithreaded")]
#[inline(always)]
fn len_acquire(&self) -> usize {
self.len.load(Ordering::Acquire)
}
#[doc(hidden)]
#[inline(always)]
#[cfg(not(feature = "multithreaded"))]
fn len_acquire(&self) -> usize {
self.len.get()
}
#[inline(always)]
fn len_add_release(&self, n: usize) {
#[cfg(feature = "multithreaded")]
self.len.fetch_add(n, Ordering::Release);
#[cfg(not(feature = "multithreaded"))]
self.len.set(self.len.get() + n);
}
#[inline(always)]
fn len_set_release(&self, n: usize) {
#[cfg(feature = "multithreaded")]
self.len.store(n, Ordering::Release);
#[cfg(not(feature = "multithreaded"))]
self.len.set(n);
}
#[inline]
pub(crate) fn as_str(&self) -> &str {
unsafe {
std::str::from_utf8_unchecked(std::slice::from_raw_parts(
self.data.as_ptr(),
self.len_acquire(),
))
}
}
#[inline]
pub(crate) fn as_mut_str(&mut self) -> &mut str {
unsafe {
std::str::from_utf8_unchecked_mut(std::slice::from_raw_parts_mut(
self.data.as_mut_ptr(),
self.len_acquire(),
))
}
}
#[inline]
pub(crate) fn spare_capacity(&self) -> usize {
self.capacity - self.len_acquire()
}
#[inline]
pub(crate) fn capacity(&self) -> usize {
self.capacity
}
#[inline]
#[cfg(feature = "multithreaded")]
#[mutants::skip]
pub(crate) fn strong_count(&self) -> usize {
self.refcount.load(Ordering::Relaxed)
}
#[inline]
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
pub(crate) fn strong_count(&self) -> usize {
self.refcount.get()
}
#[cfg(feature = "multithreaded")]
#[mutants::skip]
pub(crate) fn increment_strong_count(&self) {
self.refcount.fetch_add(1, Ordering::Relaxed);
}
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
pub(crate) fn increment_strong_count(&self) {
self.refcount.set(self.refcount.get() + 1);
}
#[cfg(not(feature = "multithreaded"))]
#[mutants::skip]
pub(crate) fn decrement_strong_count(&self) -> bool {
let count = self.refcount.get();
self.refcount.set(count - 1);
count == 1
}
#[cfg(feature = "multithreaded")]
#[mutants::skip]
pub(crate) fn decrement_strong_count(&self) -> bool {
if self.refcount.fetch_sub(1, Ordering::Release) == 1 {
fence(Ordering::Acquire);
true
} else {
false
}
}
}
#[test]
fn refcounting() {
let rcptr = RcString::from_str("foobar", 0);
unsafe {
let rcstr = rcptr.as_ref();
assert_eq!(rcstr.strong_count(), 1);
rcstr.increment_strong_count();
assert_eq!(rcstr.strong_count(), 2);
rcstr.decrement_strong_count();
assert_eq!(rcstr.strong_count(), 1);
rcstr.decrement_strong_count();
RcString::dealloc(rcptr);
}
}
const KB: usize = 1024;
const MB: usize = KB * 1024;
const GB: usize = MB * 1024;
struct AllocationStrategy {
pub min_allocation: usize,
pub grow_half: usize,
pub grow_const: usize,
}
const DEFAULT_ALLOCATION_STRATEGY: AllocationStrategy = AllocationStrategy {
min_allocation: 32,
grow_half: 8 * MB,
grow_const: GB,
};
impl AllocationStrategy {
#[inline]
pub fn grow(&self, old_size: usize, minimum_needed: usize) -> Option<usize> {
Some(
self.align(
if old_size < self.min_allocation {
self.min_allocation
} else if old_size < self.grow_half {
old_size.checked_mul(2)?
} else if old_size < self.grow_const {
old_size.checked_add(old_size / 2)?
} else {
old_size.checked_add(self.grow_const / 2)?
}
.max(minimum_needed),
),
)
}
#[inline]
pub fn align(&self, size: usize) -> usize {
if size > 0 {
size | (self.min_allocation - 1)
} else {
0
}
}
}