#![allow(
incomplete_features, // For `specialization`
internal_features, // for `unsized_fn_params`
)]
#![feature(
allocator_api,
array_windows,
forget_unsized,
int_roundings,
ptr_metadata,
// We avoid specializing based on subtyping,
// so barring compiler bugs, our usage should be sound.
specialization,
try_reserve_kind,
type_alias_impl_trait,
unsize,
unsized_fn_params,
)]
#![no_std]
mod helper;
mod inner;
mod marker;
#[doc(hidden)]
pub extern crate alloc;
use alloc::{alloc::handle_alloc_error, collections::TryReserveErrorKind};
use core::{
self, cmp,
fmt::{self, Debug, Formatter},
hash::Hash,
iter::FusedIterator,
marker::Unsize,
mem,
ops::{Index, IndexMut},
};
use emplacable::{Emplacable, EmplacableFn, Emplacer, unsize};
use inner::{Align, Size, UnsizedVecImpl, UnsizedVecProvider};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TryReserveError {
kind: TryReserveErrorKind,
}
#[track_caller]
#[inline]
fn to_align<T: ?Sized>(align: usize) -> AlignTypeFor<T> {
#[cold]
#[inline(never)]
fn invalid_align(align: usize) -> ! {
panic!("align {align} is not a power of 2")
}
let Some(ret) = AlignTypeFor::<T>::new(align) else {
invalid_align(align)
};
ret
}
#[track_caller]
#[inline]
fn unwrap_try_reserve_result<T>(result: Result<T, TryReserveError>) -> T {
#[cold]
#[inline(never)]
fn handle_err(e: TryReserveError) -> ! {
match e.kind {
TryReserveErrorKind::CapacityOverflow => panic!("Capacity overflowed `isize::MAX`"),
TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout),
}
}
match result {
Ok(val) => val,
Err(e) => handle_err(e),
}
}
impl From<::alloc::collections::TryReserveError> for TryReserveError {
fn from(value: ::alloc::collections::TryReserveError) -> Self {
TryReserveError { kind: value.kind() }
}
}
type AlignTypeFor<T> = <<T as UnsizedVecImpl>::Impl as UnsizedVecProvider<T>>::Align;
type SizeTypeFor<T> = <<T as UnsizedVecImpl>::Impl as UnsizedVecProvider<T>>::Size;
#[repr(transparent)]
pub struct UnsizedVec<T>
where
T: ?Sized,
{
inner: <T as UnsizedVecImpl>::Impl,
}
impl<T: ?Sized> UnsizedVec<T> {
#[must_use]
#[inline]
pub const fn new() -> UnsizedVec<T> {
UnsizedVec {
inner: UnsizedVecProvider::NEW_ALIGN_PTR,
}
}
#[must_use]
#[inline]
pub fn with_capacity(capacity: usize) -> UnsizedVec<T> {
let mut vec = UnsizedVec::new();
vec.reserve_exact(capacity);
vec
}
#[must_use]
#[inline]
pub fn with_capacity_bytes(capacity: usize, byte_capacity: usize) -> UnsizedVec<T> {
let mut vec = UnsizedVec::new();
vec.reserve_exact_capacity_bytes(capacity, byte_capacity);
vec
}
#[must_use]
#[inline]
pub fn with_capacity_bytes_align(
capacity: usize,
byte_capacity: usize,
align: usize,
) -> UnsizedVec<T> {
let mut vec = UnsizedVec {
inner: UnsizedVecProvider::NEW_ALIGN_1,
};
vec.reserve_exact_capacity_bytes_align(capacity, byte_capacity, align);
vec
}
#[must_use]
#[inline]
pub fn capacity(&self) -> usize {
self.inner.capacity()
}
#[must_use]
#[inline]
pub fn byte_capacity(&self) -> usize {
self.inner.byte_capacity()
}
#[must_use]
#[inline]
pub fn align(&self) -> usize {
self.inner.align()
}
#[inline]
pub fn reserve(&mut self, additional: usize) {
unwrap_try_reserve_result(self.try_reserve(additional));
}
#[inline]
pub fn reserve_capacity_bytes(&mut self, additional: usize, additional_bytes: usize) {
unwrap_try_reserve_result(self.try_reserve_capacity_bytes(additional, additional_bytes));
}
#[inline]
pub fn reserve_capacity_bytes_align(
&mut self,
additional: usize,
additional_bytes: usize,
align: usize,
) {
unwrap_try_reserve_result(self.try_reserve_capacity_bytes_align(
additional,
additional_bytes,
align,
));
}
#[inline]
pub fn reserve_exact(&mut self, additional: usize) {
unwrap_try_reserve_result(self.try_reserve_exact_capacity_bytes(additional, 0));
}
#[inline]
pub fn reserve_exact_capacity_bytes(&mut self, additional: usize, additional_bytes: usize) {
unwrap_try_reserve_result(
self.try_reserve_exact_capacity_bytes(additional, additional_bytes),
);
}
#[inline]
pub fn reserve_exact_capacity_bytes_align(
&mut self,
additional: usize,
additional_bytes: usize,
align: usize,
) {
unwrap_try_reserve_result(self.try_reserve_exact_capacity_bytes_align(
additional,
additional_bytes,
align,
));
}
#[inline]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.inner.try_reserve(additional)
}
#[inline]
pub fn try_reserve_capacity_bytes(
&mut self,
additional: usize,
additional_bytes: usize,
) -> Result<(), TryReserveError> {
self.try_reserve_capacity_bytes_align(additional, additional_bytes, 1)
}
#[inline]
pub fn try_reserve_capacity_bytes_align(
&mut self,
additional: usize,
additional_bytes: usize,
align: usize,
) -> Result<(), TryReserveError> {
self.try_reserve(additional)?;
debug_assert!(self.capacity() >= self.len() + additional);
let align = to_align::<T>(align);
let byte_cap = self.byte_capacity();
let needed_bytes = additional_bytes.saturating_sub(self.unused_byte_cap());
let optimist_bytes = if needed_bytes > 0 {
cmp::max(needed_bytes, byte_cap)
} else {
0
};
if optimist_bytes > needed_bytes {
let result = self
.inner
.try_reserve_additional_bytes_align(optimist_bytes, align);
if result.is_ok() {
return result;
}
}
let result = self
.inner
.try_reserve_additional_bytes_align(needed_bytes, align);
debug_assert!(self.byte_capacity() >= self.byte_len() + additional_bytes);
result
}
#[inline]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.inner.try_reserve_exact(additional)
}
#[inline]
pub fn try_reserve_exact_capacity_bytes(
&mut self,
additional: usize,
additional_bytes: usize,
) -> Result<(), TryReserveError> {
self.try_reserve_exact_capacity_bytes_align(additional, additional_bytes, 1)
}
#[inline]
pub fn try_reserve_exact_capacity_bytes_align(
&mut self,
additional: usize,
additional_bytes: usize,
align: usize,
) -> Result<(), TryReserveError> {
self.inner.try_reserve(additional)?;
let align = to_align::<T>(align);
self.inner
.try_reserve_additional_bytes_align(additional_bytes, align)
}
#[inline]
pub fn shrink_to_fit(&mut self) {
self.inner
.shrink_capacity_bytes_align_to(0, 0, to_align::<T>(1));
}
#[inline]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.inner.shrink_capacity_bytes_align_to(
min_capacity,
usize::MAX,
to_align::<T>(1 << (usize::BITS - 1)),
);
}
#[inline]
pub fn shrink_capacity_bytes_to(&mut self, min_capacity: usize, min_byte_capacity: usize) {
self.inner.shrink_capacity_bytes_align_to(
min_capacity,
min_byte_capacity,
to_align::<T>(1 << (usize::BITS - 1)),
);
}
#[inline]
pub fn shrink_capacity_bytes_align_to(
&mut self,
min_capacity: usize,
min_byte_capacity: usize,
min_align: usize,
) {
self.inner.shrink_capacity_bytes_align_to(
min_capacity,
min_byte_capacity,
to_align::<T>(min_align),
);
}
#[inline]
pub fn insert(&mut self, index: usize, value: T) {
#[track_caller]
#[cold]
#[inline(never)]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("insertion index (is {index}) should be <= len (is {len})");
}
if index <= self.len() {
let size_of_val = SizeTypeFor::<T>::of_val(&value);
self.reserve_capacity_bytes_align(1, size_of_val.get(), mem::align_of_val(&value));
unsafe { self.inner.insert_unchecked(index, value, size_of_val) }
} else {
assert_failed(index, self.len())
}
}
#[inline]
pub fn insert_unsize<S>(&mut self, index: usize, value: S)
where
S: Unsize<T>,
{
self.insert(index, unsize!(value, (S) -> T));
}
#[inline]
pub fn insert_with(&mut self, index: usize, value: Emplacable<T, impl EmplacableFn<T>>) {
#[track_caller]
#[cold]
#[inline(never)]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("insertion index (is {index}) should be <= len (is {len})");
}
if index <= self.len() {
unsafe { self.inner.insert_with_unchecked(index, value) }
} else {
assert_failed(index, self.len())
}
}
#[inline]
pub fn remove_into(&mut self, index: usize) -> Emplacable<T, impl EmplacableFn<T> + '_> {
#[track_caller]
#[cold]
#[inline(never)]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("removal index (is {index}) should be < len (is {len})");
}
if index < self.len() {
let closure = move |emplacer: &mut Emplacer<'_, T>| {
unsafe { self.inner.remove_into_unchecked(index, emplacer) };
};
unsafe { Emplacable::from_fn(closure) }
} else {
assert_failed(index, self.len())
}
}
#[inline]
pub fn push(&mut self, value: T) {
let size_of_val = SizeTypeFor::<T>::of_val(&value);
self.reserve_capacity_bytes_align(1, size_of_val.get(), mem::align_of_val(&value));
unsafe { self.inner.push_unchecked(value, size_of_val) }
}
#[inline]
pub fn push_unsize<S: Unsize<T>>(&mut self, value: S) {
self.push(unsize!(value, (S) -> T));
}
#[inline]
pub fn push_with(&mut self, value: Emplacable<T, impl EmplacableFn<T>>) {
self.inner.push_with(value);
}
#[inline]
pub fn pop_into(&mut self) -> Option<Emplacable<T, impl EmplacableFn<T> + '_>> {
if !self.is_empty() {
let closure = move |emplacer: &mut Emplacer<'_, T>| {
unsafe { self.inner.pop_into_unchecked(emplacer) }
};
Some(unsafe { Emplacable::from_fn(closure) })
} else {
None
}
}
#[must_use]
#[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
#[must_use]
#[inline]
pub fn byte_len(&self) -> usize {
self.inner.byte_len()
}
#[must_use]
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[must_use]
#[inline]
pub fn get(&self, index: usize) -> Option<&T> {
(index < self.len()).then(|| unsafe { self.get_unchecked(index) })
}
#[must_use]
#[inline]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
(index < self.len()).then(|| unsafe { self.get_unchecked_mut(index) })
}
#[must_use]
#[inline]
pub unsafe fn get_unchecked(&self, index: usize) -> &T {
unsafe { self.inner.get_unchecked_raw(index).as_ref() }
}
#[must_use]
#[inline]
pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T {
unsafe { self.inner.get_unchecked_raw(index).as_mut() }
}
#[must_use]
#[inline]
pub fn iter(&self) -> UnsizedIter<'_, T> {
UnsizedIter {
inner: self.inner.iter(),
}
}
#[must_use]
#[inline]
pub fn iter_mut(&mut self) -> UnsizedIterMut<'_, T> {
UnsizedIterMut {
inner: self.inner.iter_mut(),
}
}
#[must_use]
#[inline]
pub fn unsize<U>(self) -> UnsizedVec<U>
where
T: Sized + Unsize<U>,
U: ?Sized,
{
UnsizedVec {
inner: <U as UnsizedVecImpl>::Impl::from_sized(self.inner),
}
}
#[must_use]
#[inline]
fn unused_byte_cap(&self) -> usize {
unsafe { self.byte_capacity().unchecked_sub(self.byte_len()) }
}
}
impl<T> Default for UnsizedVec<T>
where
T: ?Sized,
{
#[inline]
fn default() -> Self {
Self::new()
}
}
#[repr(transparent)]
pub struct UnsizedIter<'a, T>
where
T: ?Sized + 'a,
{
inner: <<T as UnsizedVecImpl>::Impl as UnsizedVecProvider<T>>::Iter<'a>,
}
#[repr(transparent)]
pub struct UnsizedIterMut<'a, T>
where
T: ?Sized + 'a,
{
inner: <<T as UnsizedVecImpl>::Impl as UnsizedVecProvider<T>>::IterMut<'a>,
}
impl<T> From<::alloc::vec::Vec<T>> for UnsizedVec<T> {
#[inline]
fn from(value: ::alloc::vec::Vec<T>) -> Self {
UnsizedVec { inner: value }
}
}
impl<T> From<UnsizedVec<T>> for ::alloc::vec::Vec<T> {
#[inline]
fn from(value: UnsizedVec<T>) -> Self {
value.inner
}
}
impl<T> Index<usize> for UnsizedVec<T>
where
T: ?Sized,
{
type Output = T;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("index out of range")
}
}
impl<T> IndexMut<usize> for UnsizedVec<T>
where
T: ?Sized,
{
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("index out of range")
}
}
impl<'a, T> From<core::slice::Iter<'a, T>> for UnsizedIter<'a, T>
where
T: 'a,
{
#[inline]
fn from(value: core::slice::Iter<'a, T>) -> Self {
UnsizedIter { inner: value }
}
}
impl<'a, T> From<UnsizedIter<'a, T>> for core::slice::Iter<'a, T>
where
T: 'a,
{
#[inline]
fn from(value: UnsizedIter<'a, T>) -> Self {
value.inner
}
}
macro_rules! iter_ref {
($iter_ty:ident $($muta:ident)?) => {
impl<'a, T> Iterator for $iter_ty<'a, T>
where
T: ?Sized + 'a,
{
type Item = &'a $($muta)? T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
#[inline]
fn count(self) -> usize {
self.inner.count()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.inner.nth(n)
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.inner.last()
}
#[inline]
fn for_each<F>(self, f: F)
where
F: FnMut(Self::Item),
{
self.inner.for_each(f);
}
#[inline]
fn all<F>(&mut self, f: F) -> bool
where
F: FnMut(Self::Item) -> bool,
{
self.inner.all(f)
}
#[inline]
fn any<F>(&mut self, f: F) -> bool
where
F: FnMut(Self::Item) -> bool,
{
self.inner.any(f)
}
#[inline]
fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
self.inner.find(predicate)
}
#[inline]
fn find_map<B, F>(&mut self, f: F) -> Option<B>
where
F: FnMut(Self::Item) -> Option<B>,
{
self.inner.find_map(f)
}
#[inline]
fn position<P>(&mut self, predicate: P) -> Option<usize>
where
P: FnMut(Self::Item) -> bool,
{
self.inner.position(predicate)
}
}
impl<'a, T> DoubleEndedIterator for $iter_ty<'a, T>
where
T: ?Sized + 'a,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
self.inner.next_back()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.inner.nth_back(n)
}
}
impl<'a, T> ExactSizeIterator for $iter_ty<'a, T>
where
T: ?Sized + 'a,
{}
impl<'a, T> FusedIterator for $iter_ty<'a, T>
where
T: ?Sized + 'a,
{}
}
}
iter_ref!(UnsizedIter);
iter_ref!(UnsizedIterMut mut);
impl<'a, T> IntoIterator for &'a UnsizedVec<T>
where
T: ?Sized + 'a,
{
type Item = &'a T;
type IntoIter = UnsizedIter<'a, T>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T> IntoIterator for &'a mut UnsizedVec<T>
where
T: ?Sized + 'a,
{
type Item = &'a mut T;
type IntoIter = UnsizedIterMut<'a, T>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T, F> FromIterator<Emplacable<T, F>> for UnsizedVec<T>
where
T: ?Sized,
F: EmplacableFn<T>,
{
#[inline]
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = Emplacable<T, F>>,
{
let mut vec = UnsizedVec::new();
vec.extend(iter);
vec
}
}
impl<T, F> Extend<Emplacable<T, F>> for UnsizedVec<T>
where
T: ?Sized,
F: EmplacableFn<T>,
{
#[inline]
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = Emplacable<T, F>>,
{
fn extend_inner<T: ?Sized, F: EmplacableFn<T>, I: Iterator<Item = Emplacable<T, F>>>(
vec: &mut UnsizedVec<T>,
iter: I,
) {
vec.reserve_exact(iter.size_hint().0);
for emplacable in iter {
vec.push_with(emplacable);
}
}
extend_inner(self, iter.into_iter());
}
}
impl<T> Debug for UnsizedVec<T>
where
T: ?Sized + Debug,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T> Clone for UnsizedVec<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
let mut ret = UnsizedVec::with_capacity_bytes_align(
self.capacity(),
self.byte_capacity(),
self.align(),
);
for elem in self {
ret.push(elem.clone());
}
ret
}
}
impl<T, U> PartialEq<UnsizedVec<U>> for UnsizedVec<T>
where
T: ?Sized + PartialEq<U>,
U: ?Sized,
{
#[inline]
fn eq(&self, other: &UnsizedVec<U>) -> bool {
self.len() == other.len() && self.iter().zip(other).all(|(l, r)| l == r)
}
}
impl<T> Eq for UnsizedVec<T> where T: ?Sized + Eq {}
impl<T, U> PartialOrd<UnsizedVec<U>> for UnsizedVec<T>
where
T: ?Sized + PartialOrd<U>,
U: ?Sized,
{
fn partial_cmp(&self, other: &UnsizedVec<U>) -> Option<cmp::Ordering> {
for (l, r) in self.iter().zip(other) {
match l.partial_cmp(r) {
Some(cmp::Ordering::Equal) => (),
res => return res,
}
}
self.len().partial_cmp(&other.len())
}
}
impl<T> Ord for UnsizedVec<T>
where
T: ?Sized + Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
for (l, r) in self.iter().zip(other) {
match l.cmp(r) {
cmp::Ordering::Equal => (),
res => return res,
}
}
self.len().cmp(&other.len())
}
}
impl<T> Hash for UnsizedVec<T>
where
T: ?Sized + Hash,
{
#[inline]
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
for elem in self {
elem.hash(state);
}
}
}
#[cfg(feature = "serde")]
use serde::{Serialize, ser::SerializeSeq};
#[cfg(feature = "serde")]
impl<T> Serialize for UnsizedVec<T>
where
T: ?Sized + Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut elem_serialize = serializer.serialize_seq(Some(self.len()))?;
for elem in self {
elem_serialize.serialize_element(elem)?;
}
elem_serialize.end()
}
}
#[doc(hidden)]
pub trait PushToUnsizedVec<U: ?Sized> {
fn push_to_unsized_vec(self, vec: &mut UnsizedVec<U>);
}
impl<T: ?Sized> PushToUnsizedVec<T> for T {
#[inline]
fn push_to_unsized_vec(self, vec: &mut UnsizedVec<T>) {
vec.push(self);
}
}
impl<T: ?Sized, F: EmplacableFn<T>> PushToUnsizedVec<T> for Emplacable<T, F> {
#[inline]
fn push_to_unsized_vec(self, vec: &mut UnsizedVec<T>) {
vec.push_with(self);
}
}
#[macro_export]
macro_rules! unsized_vec {
() => (
$crate::UnsizedVec::new()
);
($($x:expr),+ $(,)?) => (
{
let mut ret = $crate::UnsizedVec::new();
$($crate::PushToUnsizedVec::push_to_unsized_vec($x, &mut ret);)+
ret
}
);
}
#[macro_export]
macro_rules! unsize_vec {
() => (
$crate::UnsizedVec::new()
);
($($x:expr),+ $(,)?) => (
{
let mut ret = $crate::UnsizedVec::new();
$(ret.push_unsize($x);)+
ret
}
);
}