#![allow(
// We follow libstd's lead and prefer to define both.
clippy::partialeq_ne_impl,
// This is a really annoying clippy lint, since it's required for so many cases...
clippy::cast_ptr_alignment,
// For macros
clippy::redundant_slicing,
)]
use core::alloc::Layout;
use core::mem::{align_of, size_of, MaybeUninit};
use core::ptr::NonNull;
#[cfg(not(all(loom, test)))]
pub(crate) use core::sync::atomic::{AtomicUsize, Ordering};
#[cfg(all(loom, test))]
pub(crate) use loom::sync::atomic::{AtomicUsize, Ordering};
#[cfg(feature = "substr")]
use crate::Substr;
use alloc::borrow::Cow;
use alloc::boxed::Box;
use alloc::string::String;
#[repr(transparent)]
pub struct ArcStr(NonNull<ThinInner>);
unsafe impl Sync for ArcStr {}
unsafe impl Send for ArcStr {}
impl ArcStr {
#[inline]
pub const fn new() -> Self {
EMPTY
}
#[inline]
pub fn try_alloc(copy_from: &str) -> Option<Self> {
if let Ok(inner) = ThinInner::try_allocate(copy_from, false) {
Some(Self(inner))
} else {
None
}
}
#[inline]
pub unsafe fn try_init_with_unchecked<F>(n: usize, initializer: F) -> Option<Self>
where
F: FnOnce(&mut [MaybeUninit<u8>]),
{
if let Ok(inner) = ThinInner::try_allocate_with(n, false, AllocInit::Uninit, initializer) {
Some(Self(inner))
} else {
None
}
}
#[inline]
pub unsafe fn init_with_unchecked<F>(n: usize, initializer: F) -> Self
where
F: FnOnce(&mut [MaybeUninit<u8>]),
{
match ThinInner::try_allocate_with(n, false, AllocInit::Uninit, initializer) {
Ok(inner) => Self(inner),
Err(None) => panic!("capacity overflow"),
Err(Some(layout)) => alloc::alloc::handle_alloc_error(layout),
}
}
#[inline]
pub fn init_with<F>(n: usize, initializer: F) -> Result<Self, core::str::Utf8Error>
where
F: FnOnce(&mut [u8]),
{
let mut failed = None::<core::str::Utf8Error>;
let wrapper = |zeroed_slice: &mut [MaybeUninit<u8>]| {
debug_assert_eq!(n, zeroed_slice.len());
let slice = unsafe {
core::slice::from_raw_parts_mut(zeroed_slice.as_mut_ptr().cast::<u8>(), n)
};
initializer(slice);
if let Err(e) = core::str::from_utf8(slice) {
failed = Some(e);
}
};
match unsafe { ThinInner::try_allocate_with(n, false, AllocInit::Zero, wrapper) } {
Ok(inner) => {
let this = Self(inner);
if let Some(e) = failed {
Err(e)
} else {
Ok(this)
}
}
Err(None) => panic!("capacity overflow"),
Err(Some(layout)) => alloc::alloc::handle_alloc_error(layout),
}
}
#[inline]
pub fn as_str(&self) -> &str {
self
}
#[inline]
pub fn len(&self) -> usize {
self.get_inner_len_flag().uint_part()
}
#[inline]
fn get_inner_len_flag(&self) -> PackedFlagUint {
unsafe { ThinInner::get_len_flag(self.0.as_ptr()) }
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
#[allow(clippy::inherent_to_string_shadow_display)]
pub fn to_string(&self) -> String {
#[cfg(not(feature = "std"))]
use alloc::borrow::ToOwned;
self.as_str().to_owned()
}
#[inline]
pub fn as_bytes(&self) -> &[u8] {
let len = self.len();
let p = self.0.as_ptr();
unsafe {
let data = p.cast::<u8>().add(OFFSET_DATA);
debug_assert_eq!(core::ptr::addr_of!((*p).data).cast::<u8>(), data);
core::slice::from_raw_parts(data, len)
}
}
#[inline]
pub fn into_raw(this: Self) -> NonNull<()> {
let p = this.0;
core::mem::forget(this);
p.cast()
}
#[inline]
pub unsafe fn from_raw(ptr: NonNull<()>) -> Self {
Self(ptr.cast())
}
#[inline]
pub fn ptr_eq(lhs: &Self, rhs: &Self) -> bool {
core::ptr::eq(lhs.0.as_ptr(), rhs.0.as_ptr())
}
#[inline]
pub fn strong_count(this: &Self) -> Option<usize> {
let cf = Self::load_count_flag(this, Ordering::Acquire)?;
if cf.flag_part() {
None
} else {
Some(cf.uint_part())
}
}
#[inline]
unsafe fn load_count_flag_raw(this: &Self, ord_if_needed: Ordering) -> PackedFlagUint {
PackedFlagUint::from_encoded((*this.0.as_ptr()).count_flag.load(ord_if_needed))
}
#[inline]
fn load_count_flag(this: &Self, ord_if_needed: Ordering) -> Option<PackedFlagUint> {
if Self::has_static_lenflag(this) {
None
} else {
let count_and_flag = PackedFlagUint::from_encoded(unsafe {
(*this.0.as_ptr()).count_flag.load(ord_if_needed)
});
Some(count_and_flag)
}
}
#[inline]
pub fn leak(&self) -> &'static str {
if Self::has_static_lenflag(self) {
return unsafe { Self::to_static_unchecked(self) };
}
let is_static_count = unsafe {
Self::load_count_flag_raw(self, Ordering::Acquire)
};
if is_static_count.flag_part() {
return unsafe { Self::to_static_unchecked(self) };
}
unsafe { Self::become_static(self, is_static_count.uint_part() == 1) };
debug_assert!(Self::is_static(self));
unsafe { Self::to_static_unchecked(self) }
}
unsafe fn become_static(this: &Self, is_unique: bool) {
if is_unique {
core::ptr::addr_of_mut!((*this.0.as_ptr()).count_flag).write(AtomicUsize::new(
PackedFlagUint::new_raw(true, 1).encoded_value(),
));
let lenp = core::ptr::addr_of_mut!((*this.0.as_ptr()).len_flag);
debug_assert!(!lenp.read().flag_part());
lenp.write(lenp.read().with_flag(true));
} else {
let flag_bit = PackedFlagUint::new_raw(true, 0).encoded_value();
let atomic_count_flag = &*core::ptr::addr_of!((*this.0.as_ptr()).count_flag);
atomic_count_flag.fetch_or(flag_bit, Ordering::Release);
}
}
#[inline]
unsafe fn to_static_unchecked(this: &Self) -> &'static str {
&*Self::str_ptr(this)
}
#[inline]
fn bytes_ptr(this: &Self) -> *const [u8] {
let len = this.get_inner_len_flag().uint_part();
unsafe {
let p: *const ThinInner = this.0.as_ptr();
let data = p.cast::<u8>().add(OFFSET_DATA);
debug_assert_eq!(core::ptr::addr_of!((*p).data).cast::<u8>(), data,);
core::ptr::slice_from_raw_parts(data, len)
}
}
#[inline]
fn str_ptr(this: &Self) -> *const str {
Self::bytes_ptr(this) as *const str
}
#[inline]
pub fn is_static(this: &Self) -> bool {
this.get_inner_len_flag().flag_part()
|| unsafe { Self::load_count_flag_raw(this, Ordering::Relaxed).flag_part() }
}
#[inline]
fn has_static_lenflag(this: &Self) -> bool {
this.get_inner_len_flag().flag_part()
}
#[inline]
pub fn as_static(this: &Self) -> Option<&'static str> {
if Self::is_static(this) {
Some(unsafe { &*(this.as_str() as *const str) })
} else {
None
}
}
#[inline]
#[doc(hidden)]
pub const unsafe fn _private_new_from_static_data<B>(
ptr: &'static StaticArcStrInner<B>,
) -> Self {
Self(NonNull::new_unchecked(ptr as *const _ as *mut ThinInner))
}
#[cfg(feature = "substr")]
#[inline]
pub fn substr(&self, range: impl core::ops::RangeBounds<usize>) -> Substr {
Substr::from_parts(self, range)
}
#[cfg(feature = "substr")]
pub fn substr_from(&self, substr: &str) -> Substr {
if substr.is_empty() {
return Substr::new();
}
let self_start = self.as_ptr() as usize;
let self_end = self_start + self.len();
let substr_start = substr.as_ptr() as usize;
let substr_end = substr_start + substr.len();
if substr_start < self_start || substr_end > self_end {
out_of_range(self, &substr);
}
let index = substr_start - self_start;
let end = index + substr.len();
self.substr(index..end)
}
#[cfg(feature = "substr")]
pub fn try_substr_from(&self, substr: &str) -> Option<Substr> {
if substr.is_empty() {
return Some(Substr::new());
}
let self_start = self.as_ptr() as usize;
let self_end = self_start + self.len();
let substr_start = substr.as_ptr() as usize;
let substr_end = substr_start + substr.len();
if substr_start < self_start || substr_end > self_end {
return None;
}
let index = substr_start - self_start;
let end = index + substr.len();
debug_assert!(self.get(index..end).is_some());
Some(self.substr(index..end))
}
#[cfg(feature = "substr")]
pub fn try_substr_using(&self, f: impl FnOnce(&str) -> &str) -> Option<Substr> {
self.try_substr_from(f(self.as_str()))
}
#[cfg(feature = "substr")]
pub fn substr_using(&self, f: impl FnOnce(&str) -> &str) -> Substr {
self.substr_from(f(self.as_str()))
}
pub fn try_repeat(source: &str, n: usize) -> Option<Self> {
if source.is_empty() || n == 0 {
return Some(Self::new());
}
let capacity = source.len().checked_mul(n)?;
let inner =
ThinInner::try_allocate_maybe_uninit(capacity, false, AllocInit::Uninit).ok()?;
unsafe {
let mut data_ptr = ThinInner::data_ptr(inner);
let data_end = data_ptr.add(capacity);
while data_ptr < data_end {
core::ptr::copy_nonoverlapping(source.as_ptr(), data_ptr, source.len());
data_ptr = data_ptr.add(source.len());
}
}
Some(Self(inner))
}
pub fn repeat(source: &str, n: usize) -> Self {
Self::try_repeat(source, n).expect("capacity overflow")
}
}
#[cold]
#[inline(never)]
#[cfg(feature = "substr")]
fn out_of_range(arc: &ArcStr, substr: &&str) -> ! {
let arc_start = arc.as_ptr();
let arc_end = arc_start.wrapping_add(arc.len());
let substr_start = substr.as_ptr();
let substr_end = substr_start.wrapping_add(substr.len());
panic!(
"ArcStr over ({:p}..{:p}) does not contain substr over ({:p}..{:p})",
arc_start, arc_end, substr_start, substr_end,
);
}
impl Clone for ArcStr {
#[inline]
fn clone(&self) -> Self {
if !Self::is_static(self) {
let n: PackedFlagUint = PackedFlagUint::from_encoded(unsafe {
let step = PackedFlagUint::FALSE_ONE.encoded_value();
(*self.0.as_ptr())
.count_flag
.fetch_add(step, Ordering::Relaxed)
});
if n.uint_part() > RC_MAX && !n.flag_part() {
let val = PackedFlagUint::new_raw(true, 0).encoded_value();
unsafe {
(*self.0.as_ptr())
.count_flag
.fetch_or(val, Ordering::Release)
};
}
}
Self(self.0)
}
}
const RC_MAX: usize = PackedFlagUint::UINT_PART_MAX / 2;
impl Drop for ArcStr {
#[inline]
fn drop(&mut self) {
if Self::is_static(self) {
return;
}
unsafe {
let this = self.0.as_ptr();
let enc = PackedFlagUint::from_encoded(
(*this)
.count_flag
.fetch_sub(PackedFlagUint::FALSE_ONE.encoded_value(), Ordering::Release),
);
if enc == PackedFlagUint::FALSE_ONE {
let _ = (*this).count_flag.load(Ordering::Acquire);
ThinInner::destroy_cold(this)
}
}
}
}
#[repr(C, align(8))]
struct ThinInner {
len_flag: PackedFlagUint,
count_flag: AtomicUsize,
data: [u8; 0],
}
const OFFSET_LENFLAGS: usize = 0;
const OFFSET_COUNTFLAGS: usize = size_of::<PackedFlagUint>();
const OFFSET_DATA: usize = OFFSET_COUNTFLAGS + size_of::<AtomicUsize>();
#[repr(C, align(8))]
#[doc(hidden)]
pub struct StaticArcStrInner<Buf> {
pub len_flag: usize,
pub count_flag: usize,
pub data: Buf,
}
impl<Buf> StaticArcStrInner<Buf> {
#[doc(hidden)]
pub const STATIC_COUNT_VALUE: usize = PackedFlagUint::new_raw(true, 1).encoded_value();
#[doc(hidden)]
#[inline]
pub const fn encode_len(v: usize) -> Option<usize> {
match PackedFlagUint::new(true, v) {
Some(v) => Some(v.encoded_value()),
None => None,
}
}
}
const _: [(); size_of::<StaticArcStrInner<[u8; 0]>>()] = [(); 2 * size_of::<usize>()];
const _: [(); align_of::<StaticArcStrInner<[u8; 0]>>()] = [(); 8];
const _: [(); size_of::<StaticArcStrInner<[u8; 2 * size_of::<usize>()]>>()] =
[(); 4 * size_of::<usize>()];
const _: [(); align_of::<StaticArcStrInner<[u8; 2 * size_of::<usize>()]>>()] = [(); 8];
const _: [(); size_of::<ThinInner>()] = [(); 2 * size_of::<usize>()];
const _: [(); align_of::<ThinInner>()] = [(); 8];
const _: [(); align_of::<AtomicUsize>()] = [(); align_of::<usize>()];
const _: [(); align_of::<AtomicUsize>()] = [(); size_of::<usize>()];
const _: [(); size_of::<AtomicUsize>()] = [(); size_of::<usize>()];
const _: [(); align_of::<PackedFlagUint>()] = [(); align_of::<usize>()];
const _: [(); size_of::<PackedFlagUint>()] = [(); size_of::<usize>()];
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
struct PackedFlagUint(usize);
impl PackedFlagUint {
const UINT_PART_MAX: usize = (1 << (usize::BITS - 1)) - 1;
const FALSE_ONE: Self = Self::new_raw(false, 1);
#[inline]
const fn new(flag_part: bool, uint_part: usize) -> Option<Self> {
if uint_part > Self::UINT_PART_MAX {
None
} else {
Some(Self::new_raw(flag_part, uint_part))
}
}
#[inline(always)]
const fn new_raw(flag_part: bool, uint_part: usize) -> Self {
Self(flag_part as usize | (uint_part << 1))
}
#[inline(always)]
const fn uint_part(self) -> usize {
self.0 >> 1
}
#[inline(always)]
const fn flag_part(self) -> bool {
(self.0 & 1) != 0
}
#[inline(always)]
const fn from_encoded(v: usize) -> Self {
Self(v)
}
#[inline(always)]
const fn encoded_value(self) -> usize {
self.0
}
#[inline(always)]
#[must_use]
const fn with_flag(self, v: bool) -> Self {
Self(v as usize | self.0)
}
}
const EMPTY: ArcStr = literal!("");
impl ThinInner {
#[inline]
fn allocate(data: &str, initially_static: bool) -> NonNull<Self> {
match Self::try_allocate(data, initially_static) {
Ok(v) => v,
Err(None) => alloc_overflow(),
Err(Some(layout)) => alloc::alloc::handle_alloc_error(layout),
}
}
#[inline]
fn data_ptr(this: NonNull<Self>) -> *mut u8 {
unsafe { this.as_ptr().cast::<u8>().add(OFFSET_DATA) }
}
fn try_allocate_maybe_uninit(
capacity: usize,
initially_static: bool,
init_how: AllocInit,
) -> Result<NonNull<Self>, Option<Layout>> {
const ALIGN: usize = align_of::<ThinInner>();
debug_assert_ne!(capacity, 0);
if capacity >= (isize::MAX as usize) - (OFFSET_DATA + ALIGN) {
return Err(None);
}
debug_assert!(Layout::from_size_align(capacity + OFFSET_DATA, ALIGN).is_ok());
let layout = unsafe { Layout::from_size_align_unchecked(capacity + OFFSET_DATA, ALIGN) };
let ptr = match init_how {
AllocInit::Uninit => unsafe { alloc::alloc::alloc(layout) as *mut ThinInner },
AllocInit::Zero => unsafe { alloc::alloc::alloc_zeroed(layout) as *mut ThinInner },
};
if ptr.is_null() {
return Err(Some(layout));
}
debug_assert!(PackedFlagUint::new(initially_static, capacity).is_some());
let len_flag = PackedFlagUint::new_raw(initially_static, capacity);
debug_assert_eq!(len_flag.uint_part(), capacity);
debug_assert_eq!(len_flag.flag_part(), initially_static);
unsafe {
core::ptr::addr_of_mut!((*ptr).len_flag).write(len_flag);
let initial_count_flag = PackedFlagUint::new_raw(initially_static, 1);
let count_flag: AtomicUsize = AtomicUsize::new(initial_count_flag.encoded_value());
core::ptr::addr_of_mut!((*ptr).count_flag).write(count_flag);
debug_assert_eq!(
(ptr as *const u8).wrapping_add(OFFSET_DATA),
(*ptr).data.as_ptr(),
);
Ok(NonNull::new_unchecked(ptr))
}
}
#[inline]
fn try_allocate(data: &str, initially_static: bool) -> Result<NonNull<Self>, Option<Layout>> {
unsafe {
Self::try_allocate_with(
data.len(),
initially_static,
AllocInit::Uninit,
|uninit_slice| {
debug_assert_eq!(uninit_slice.len(), data.len());
core::ptr::copy_nonoverlapping(
data.as_ptr(),
uninit_slice.as_mut_ptr().cast::<u8>(),
data.len(),
)
},
)
}
}
#[inline]
unsafe fn try_allocate_with(
len: usize,
initially_static: bool,
init_style: AllocInit,
initializer: impl FnOnce(&mut [core::mem::MaybeUninit<u8>]),
) -> Result<NonNull<Self>, Option<Layout>> {
let this = Self::try_allocate_maybe_uninit(len, initially_static, init_style)?;
initializer(core::slice::from_raw_parts_mut(
Self::data_ptr(this).cast::<MaybeUninit<u8>>(),
len,
));
Ok(this)
}
#[inline]
unsafe fn get_len_flag(p: *const ThinInner) -> PackedFlagUint {
debug_assert_eq!(OFFSET_LENFLAGS, 0);
*p.cast()
}
#[cold]
unsafe fn destroy_cold(p: *mut ThinInner) {
let lf = Self::get_len_flag(p);
let (is_static, len) = (lf.flag_part(), lf.uint_part());
debug_assert!(!is_static);
let layout = {
let size = len + OFFSET_DATA;
let align = align_of::<ThinInner>();
Layout::from_size_align_unchecked(size, align)
};
alloc::alloc::dealloc(p as *mut _, layout);
}
}
#[derive(Clone, Copy, PartialEq)]
enum AllocInit {
Uninit,
Zero,
}
#[inline(never)]
#[cold]
fn alloc_overflow() -> ! {
panic!("overflow during Layout computation")
}
impl From<&str> for ArcStr {
#[inline]
fn from(s: &str) -> Self {
if s.is_empty() {
Self::new()
} else {
Self(ThinInner::allocate(s, false))
}
}
}
impl core::ops::Deref for ArcStr {
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe { core::str::from_utf8_unchecked(self.as_bytes()) }
}
}
impl Default for ArcStr {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl From<String> for ArcStr {
#[inline]
fn from(v: String) -> Self {
v.as_str().into()
}
}
impl From<&mut str> for ArcStr {
#[inline]
fn from(s: &mut str) -> Self {
let s: &str = s;
Self::from(s)
}
}
impl From<Box<str>> for ArcStr {
#[inline]
fn from(s: Box<str>) -> Self {
Self::from(&s[..])
}
}
impl From<ArcStr> for Box<str> {
#[inline]
fn from(s: ArcStr) -> Self {
s.as_str().into()
}
}
impl From<ArcStr> for alloc::rc::Rc<str> {
#[inline]
fn from(s: ArcStr) -> Self {
s.as_str().into()
}
}
impl From<ArcStr> for alloc::sync::Arc<str> {
#[inline]
fn from(s: ArcStr) -> Self {
s.as_str().into()
}
}
impl From<alloc::rc::Rc<str>> for ArcStr {
#[inline]
fn from(s: alloc::rc::Rc<str>) -> Self {
Self::from(&*s)
}
}
impl From<alloc::sync::Arc<str>> for ArcStr {
#[inline]
fn from(s: alloc::sync::Arc<str>) -> Self {
Self::from(&*s)
}
}
impl<'a> From<Cow<'a, str>> for ArcStr {
#[inline]
fn from(s: Cow<'a, str>) -> Self {
Self::from(&*s)
}
}
impl<'a> From<&'a ArcStr> for Cow<'a, str> {
#[inline]
fn from(s: &'a ArcStr) -> Self {
Cow::Borrowed(s)
}
}
impl<'a> From<ArcStr> for Cow<'a, str> {
#[inline]
fn from(s: ArcStr) -> Self {
if let Some(st) = ArcStr::as_static(&s) {
Cow::Borrowed(st)
} else {
Cow::Owned(s.to_string())
}
}
}
impl From<&String> for ArcStr {
#[inline]
fn from(s: &String) -> Self {
Self::from(s.as_str())
}
}
impl From<&ArcStr> for ArcStr {
#[inline]
fn from(s: &ArcStr) -> Self {
s.clone()
}
}
impl core::fmt::Debug for ArcStr {
#[inline]
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Debug::fmt(self.as_str(), f)
}
}
impl core::fmt::Display for ArcStr {
#[inline]
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Display::fmt(self.as_str(), f)
}
}
impl PartialEq for ArcStr {
#[inline]
fn eq(&self, o: &Self) -> bool {
ArcStr::ptr_eq(self, o) || PartialEq::eq(self.as_str(), o.as_str())
}
#[inline]
fn ne(&self, o: &Self) -> bool {
!ArcStr::ptr_eq(self, o) && PartialEq::ne(self.as_str(), o.as_str())
}
}
impl Eq for ArcStr {}
macro_rules! impl_peq {
(@one $a:ty, $b:ty) => {
#[allow(clippy::extra_unused_lifetimes)]
impl<'a> PartialEq<$b> for $a {
#[inline]
fn eq(&self, s: &$b) -> bool {
PartialEq::eq(&self[..], &s[..])
}
#[inline]
fn ne(&self, s: &$b) -> bool {
PartialEq::ne(&self[..], &s[..])
}
}
};
($(($a:ty, $b:ty),)+) => {$(
impl_peq!(@one $a, $b);
impl_peq!(@one $b, $a);
)+};
}
impl_peq! {
(ArcStr, str),
(ArcStr, &'a str),
(ArcStr, String),
(ArcStr, Cow<'a, str>),
(ArcStr, Box<str>),
(ArcStr, alloc::sync::Arc<str>),
(ArcStr, alloc::rc::Rc<str>),
(ArcStr, alloc::sync::Arc<String>),
(ArcStr, alloc::rc::Rc<String>),
}
impl PartialOrd for ArcStr {
#[inline]
fn partial_cmp(&self, s: &Self) -> Option<core::cmp::Ordering> {
Some(self.as_str().cmp(s.as_str()))
}
}
impl Ord for ArcStr {
#[inline]
fn cmp(&self, s: &Self) -> core::cmp::Ordering {
self.as_str().cmp(s.as_str())
}
}
impl core::hash::Hash for ArcStr {
#[inline]
fn hash<H: core::hash::Hasher>(&self, h: &mut H) {
self.as_str().hash(h)
}
}
macro_rules! impl_index {
($($IdxT:ty,)*) => {$(
impl core::ops::Index<$IdxT> for ArcStr {
type Output = str;
#[inline]
fn index(&self, i: $IdxT) -> &Self::Output {
&self.as_str()[i]
}
}
)*};
}
impl_index! {
core::ops::RangeFull,
core::ops::Range<usize>,
core::ops::RangeFrom<usize>,
core::ops::RangeTo<usize>,
core::ops::RangeInclusive<usize>,
core::ops::RangeToInclusive<usize>,
}
impl AsRef<str> for ArcStr {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
impl AsRef<[u8]> for ArcStr {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl core::borrow::Borrow<str> for ArcStr {
#[inline]
fn borrow(&self) -> &str {
self
}
}
impl core::str::FromStr for ArcStr {
type Err = core::convert::Infallible;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::from(s))
}
}
#[cfg(test)]
#[cfg(not(msrv))] mod test {
use super::*;
fn sasi_layout_check<Buf>() {
assert!(align_of::<StaticArcStrInner<Buf>>() >= 8);
assert_eq!(
core::mem::offset_of!(StaticArcStrInner<Buf>, count_flag),
OFFSET_COUNTFLAGS
);
assert_eq!(
core::mem::offset_of!(StaticArcStrInner<Buf>, len_flag),
OFFSET_LENFLAGS
);
assert_eq!(
core::mem::offset_of!(StaticArcStrInner<Buf>, data),
OFFSET_DATA
);
assert_eq!(
core::mem::offset_of!(ThinInner, count_flag),
core::mem::offset_of!(StaticArcStrInner::<Buf>, count_flag),
);
assert_eq!(
core::mem::offset_of!(ThinInner, len_flag),
core::mem::offset_of!(StaticArcStrInner::<Buf>, len_flag),
);
assert_eq!(
core::mem::offset_of!(ThinInner, data),
core::mem::offset_of!(StaticArcStrInner::<Buf>, data),
);
}
#[test]
fn verify_type_pun_offsets_sasi_big_bufs() {
assert_eq!(
core::mem::offset_of!(ThinInner, count_flag),
OFFSET_COUNTFLAGS,
);
assert_eq!(core::mem::offset_of!(ThinInner, len_flag), OFFSET_LENFLAGS);
assert_eq!(core::mem::offset_of!(ThinInner, data), OFFSET_DATA);
assert!(align_of::<ThinInner>() >= 8);
sasi_layout_check::<[u8; 0]>();
sasi_layout_check::<[u8; 1]>();
sasi_layout_check::<[u8; 2]>();
sasi_layout_check::<[u8; 3]>();
sasi_layout_check::<[u8; 4]>();
sasi_layout_check::<[u8; 5]>();
sasi_layout_check::<[u8; 15]>();
sasi_layout_check::<[u8; 16]>();
sasi_layout_check::<[u8; 64]>();
sasi_layout_check::<[u8; 128]>();
sasi_layout_check::<[u8; 1024]>();
sasi_layout_check::<[u8; 4095]>();
sasi_layout_check::<[u8; 4096]>();
}
}
#[cfg(all(test, loom))]
mod loomtest {
use super::ArcStr;
use loom::sync::Arc;
use loom::thread;
#[test]
fn cloning_threads() {
loom::model(|| {
let a = ArcStr::from("abcdefgh");
let addr = a.as_ptr() as usize;
let a1 = Arc::new(a);
let a2 = a1.clone();
let t1 = thread::spawn(move || {
let b: ArcStr = (*a1).clone();
assert_eq!(b.as_ptr() as usize, addr);
});
let t2 = thread::spawn(move || {
let b: ArcStr = (*a2).clone();
assert_eq!(b.as_ptr() as usize, addr);
});
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn drop_timing() {
loom::model(|| {
let a1 = alloc::vec![
ArcStr::from("s1"),
ArcStr::from("s2"),
ArcStr::from("s3"),
ArcStr::from("s4"),
];
let a2 = a1.clone();
let t1 = thread::spawn(move || {
let mut a1 = a1;
while let Some(s) = a1.pop() {
assert!(s.starts_with("s"));
}
});
let t2 = thread::spawn(move || {
let mut a2 = a2;
while let Some(s) = a2.pop() {
assert!(s.starts_with("s"));
}
});
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn leak_drop() {
loom::model(|| {
let a1 = ArcStr::from("foo");
let a2 = a1.clone();
let t1 = thread::spawn(move || {
drop(a1);
});
let t2 = thread::spawn(move || a2.leak());
t1.join().unwrap();
let leaked: &'static str = t2.join().unwrap();
assert_eq!(leaked, "foo");
});
}
}