use crate::{
len::{LengthType, Usize},
mem::{
alloc::{alloc_buffer, realloc_buffer, DOHAE, NOHAE},
errors::ReservationError,
SpareMemoryPolicy, Uninitialized,
},
};
use core::{
marker::PhantomData,
mem,
ops::{Bound, RangeBounds},
ptr, slice,
};
mod buffer;
use buffer::SetLenOnDrop;
mod retain;
use retain::RetainGuard;
pub struct SmallVec<T, const C: usize, L = Usize, SM = Uninitialized>
where
L: LengthType,
SM: SpareMemoryPolicy<T>,
{
buf: buffer::Buffer<T, C, L, SM>,
capacity: L,
phantom: PhantomData<T>,
}
impl<T, const C: usize, L, SM> SmallVec<T, C, L, SM>
where
L: LengthType,
SM: SpareMemoryPolicy<T>,
{
#[inline]
pub fn is_heap(&self) -> bool {
(mem::size_of::<T>() != 0) && (self.capacity.as_usize() > C)
}
#[inline]
pub fn is_local(&self) -> bool {
(mem::size_of::<T>() == 0) || (self.capacity.as_usize() <= C)
}
#[inline]
pub fn new() -> Self {
Self {
buf: buffer::Buffer::new(),
capacity: L::new(0),
phantom: PhantomData,
}
}
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let mut v = Self::new();
v.reserve_exact(capacity);
v
}
#[inline]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 {
L::MAX
} else {
self.capacity.as_usize().max(C)
}
}
#[inline]
pub fn spare_capacity(&self) -> usize {
if mem::size_of::<T>() == 0 {
L::MAX - self.capacity.as_usize()
} else {
let cap = self.capacity.as_usize();
if cap <= C {
C - cap
} else {
cap - self.buf.heap_len().as_usize()
}
}
}
#[inline]
pub fn spare_capacity_mut(&mut self) -> &mut [mem::MaybeUninit<T>] {
if mem::size_of::<T>() == 0 {
unsafe {
slice::from_raw_parts_mut(
self.buf.local_mut_ptr().cast(),
L::MAX - self.capacity.as_usize(),
)
}
} else {
let cap = self.capacity.as_usize();
unsafe {
let (spare_size, p) = if cap <= C {
(C - cap, self.buf.local_mut_ptr().add(cap))
} else {
let (len, p) = self.buf.heap_len_mut_p();
let len = len.as_usize();
(cap - len, p.add(len))
};
slice::from_raw_parts_mut(p.cast(), spare_size)
}
}
}
#[inline]
pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [mem::MaybeUninit<T>]) {
let cap = self.capacity.as_usize();
if mem::size_of::<T>() == 0 {
let p = self.buf.local_mut_ptr();
unsafe {
(
slice::from_raw_parts_mut(p, cap),
slice::from_raw_parts_mut(p.add(cap).cast(), L::MAX - cap),
)
}
} else if cap <= C {
let p = self.buf.local_mut_ptr();
unsafe {
(
slice::from_raw_parts_mut(p, cap),
slice::from_raw_parts_mut(p.add(cap).cast(), C - cap),
)
}
} else {
let (len, p) = self.buf.heap_len_mut_p();
let len = len.as_usize();
unsafe {
(
slice::from_raw_parts_mut(p, len),
slice::from_raw_parts_mut(p.add(len).cast(), cap - len),
)
}
}
}
#[inline]
pub fn has_spare_capacity(&self) -> bool {
if mem::size_of::<T>() == 0 {
L::MAX > self.capacity.as_usize()
} else {
let cap = self.capacity.as_usize();
if cap <= C {
cap < C
} else {
self.buf.heap_len().as_usize() < cap
}
}
}
#[inline]
pub fn len(&self) -> usize {
let cap = self.capacity.as_usize();
if mem::size_of::<T>() == 0 || cap <= C {
cap
} else {
self.buf.heap_len().as_usize()
}
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn is_full(&self) -> bool {
let cap = self.capacity.as_usize();
if mem::size_of::<T>() == 0 {
cap == L::MAX
} else if cap <= C {
cap == C
} else {
cap == self.buf.heap_len().as_usize()
}
}
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
if mem::size_of::<T>() == 0 {
debug_assert!(len <= L::MAX);
self.capacity.set(len);
} else {
let cap = self.capacity.as_usize();
if cap <= C {
debug_assert!(len <= C);
self.capacity.set(len);
} else {
debug_assert!(len <= cap);
self.buf.set_heap_len(L::new(len))
}
}
}
#[inline]
pub fn as_slice(&self) -> &[T] {
let (len, p) = self.len_p();
unsafe { slice::from_raw_parts(p, len.as_usize()) }
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [T] {
let (len, p) = self.len_mut_p();
unsafe { slice::from_raw_parts_mut(p, len.as_usize()) }
}
#[inline]
pub fn as_ptr(&self) -> *const T {
if self.is_local() {
self.buf.local_ptr()
} else {
self.buf.heap_ptr()
}
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
if self.is_local() {
self.buf.local_mut_ptr()
} else {
self.buf.heap_mut_ptr()
}
}
#[inline]
pub fn iter(&self) -> slice::Iter<'_, T> {
self.as_slice().iter()
}
#[inline]
pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
self.as_mut_slice().iter_mut()
}
#[inline]
fn len_p(&self) -> (L, *const T) {
let cap = self.capacity;
if mem::size_of::<T>() == 0 || cap.as_usize() <= C {
(cap, self.buf.local_ptr())
} else {
self.buf.heap_len_p()
}
}
#[inline]
fn len_mut_p(&mut self) -> (L, *mut T) {
let cap = self.capacity;
if mem::size_of::<T>() == 0 || cap.as_usize() <= C {
(cap, self.buf.local_mut_ptr())
} else {
self.buf.heap_len_mut_p()
}
}
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.try_reserve_impl::<DOHAE>(additional)
.expect("smallvec reserve failed");
}
#[inline]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), ReservationError> {
self.try_reserve_impl::<NOHAE>(additional).map(|_| ())
}
#[inline(never)]
fn try_reserve_impl<const HAE: bool>(
&mut self,
additional: usize,
) -> Result<(&mut L, *mut T), ReservationError> {
self.reserve_core::<_, HAE>(additional, |l, a| {
Ok(l.checked_add_usize(a)
.ok_or(ReservationError::CapacityOverflow)?
.next_power_of_two_or_max())
})
}
#[inline]
pub fn reserve_exact(&mut self, additional: usize) {
self.try_reserve_exact_impl::<DOHAE>(additional)
.expect("smallvec reserve_exact failed");
}
#[inline]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), ReservationError> {
self.try_reserve_exact_impl::<NOHAE>(additional).map(|_| ())
}
#[inline(never)]
fn try_reserve_exact_impl<const HAE: bool>(
&mut self,
additional: usize,
) -> Result<(&mut L, *mut T), ReservationError> {
self.reserve_core::<_, HAE>(additional, |l, a| {
l.checked_add_usize(a)
.ok_or(ReservationError::CapacityOverflow)
})
}
#[inline]
fn reserve_core<F, const HAE: bool>(
&mut self,
additional: usize,
nc: F,
) -> Result<(&mut L, *mut T), ReservationError>
where
F: FnOnce(L, usize) -> Result<L, ReservationError>,
{
let cap = self.capacity.as_usize();
if mem::size_of::<T>() == 0 {
let len = cap;
let cap = L::MAX;
if cap - len >= additional {
Ok((&mut self.capacity, self.buf.local_mut_ptr()))
} else {
Err(ReservationError::CapacityOverflow)
}
} else if cap <= C {
let len = cap;
let cap = C;
if cap - len >= additional {
return Ok((&mut self.capacity, self.buf.local_mut_ptr()));
}
let new_cap = nc(self.capacity, additional)?;
debug_assert!(new_cap > C);
debug_assert!(new_cap > cap);
let p = unsafe {
let prefix = if SM::NOOP { len } else { cap };
let tmp = alloc_buffer::<T, HAE>(new_cap.as_usize())?;
ptr::copy_nonoverlapping(self.buf.local_ptr(), tmp, prefix);
if !SM::NOOP {
SM::init(tmp.add(cap), new_cap.as_usize() - cap);
SM::init(self.buf.local_mut_ptr(), len)
}
tmp
};
self.buf.set_heap(p, self.capacity);
self.capacity = new_cap;
Ok(self.buf.heap_mut_len_mut_p())
} else {
let len = self.buf.heap_len();
if cap - len.as_usize() >= additional {
return Ok(self.buf.heap_mut_len_mut_p());
}
let new_cap = nc(len, additional)?;
debug_assert!(new_cap > cap);
let p = unsafe {
let tmp = realloc_buffer::<T, SM, HAE>(
self.buf.heap_mut_ptr(),
len.as_usize(),
cap,
new_cap.as_usize(),
)?;
SM::init(tmp.add(cap), new_cap.as_usize() - cap);
tmp
};
self.buf.set_heap_ptr(p);
self.capacity = new_cap;
Ok(self.buf.heap_mut_len_mut_p())
}
}
#[inline]
pub fn push(&mut self, e: T) {
self.try_push_impl::<DOHAE>(e)
.expect("smallvec push failed")
}
#[inline]
pub fn try_push(&mut self, e: T) -> Result<(), ReservationError> {
self.try_push_impl::<NOHAE>(e)
}
#[inline]
#[allow(clippy::comparison_chain)]
fn try_push_impl<const HAE: bool>(&mut self, e: T) -> Result<(), ReservationError> {
let cap = self.capacity.as_usize();
if mem::size_of::<T>() == 0 {
if cap < L::MAX {
self.capacity.add_assign(1);
unsafe { self.buf.local_mut_ptr().write(e) };
} else {
return Err(ReservationError::CapacityOverflow);
}
Ok(())
} else {
let len;
let p;
if cap < C {
p = self.buf.local_mut_ptr();
len = cap;
self.capacity.add_assign(1);
} else if cap > C {
len = self.buf.heap_len().as_usize();
if len == cap {
self.try_reserve_impl::<HAE>(1)?;
}
p = self.buf.heap_mut_ptr();
self.buf.heap_len_add_assign(1);
} else {
self.try_reserve_impl::<HAE>(1)?;
p = self.buf.heap_mut_ptr();
len = self.buf.heap_len().as_usize();
self.buf.heap_len_add_assign(1);
}
unsafe {
p.add(len).write(e);
}
Ok(())
}
}
#[inline]
#[allow(clippy::comparison_chain)]
pub fn pop(&mut self) -> Option<T> {
if mem::size_of::<T>() == 0 {
let len = self.capacity.as_usize();
if len > 0 {
self.capacity.sub_assign(1);
unsafe { Some(self.buf.local_ptr().read()) }
} else {
None
}
} else {
let cap = self.capacity.as_usize();
if cap <= C {
if cap > 0 {
let new_len = cap - 1;
self.capacity = L::new(new_len);
unsafe {
let p = self.buf.local_mut_ptr().add(new_len);
let e = p.read();
SM::init(p, 1);
Some(e)
}
} else {
None
}
} else {
let len = self.buf.heap_len().as_usize();
if len > 0 {
let new_len = len - 1;
self.buf.set_heap_len(L::new(new_len));
unsafe {
let p = self.buf.heap_mut_ptr().add(new_len);
let e = p.read();
SM::init(p, 1);
Some(e)
}
} else {
None
}
}
}
}
#[inline]
pub fn truncate(&mut self, len: usize) {
let my_len = self.len();
if len < my_len {
unsafe {
self.set_len(len);
let s = slice::from_raw_parts_mut(self.as_mut_ptr().add(len), my_len - len);
ptr::drop_in_place(s);
SM::init(s.as_mut_ptr(), s.len());
}
}
}
#[inline]
pub fn clear(&mut self) {
self.truncate(0)
}
#[inline]
pub fn try_from_iter<I>(iter: I) -> Result<Self, ReservationError>
where
I: IntoIterator<Item = T>,
{
Self::try_from_iter_impl::<I, NOHAE>(iter)
}
#[inline]
fn try_from_iter_impl<I, const HAE: bool>(iter: I) -> Result<Self, ReservationError>
where
I: IntoIterator<Item = T>,
{
let mut tmp = Self::new();
tmp.try_extend_impl::<I, HAE>(iter)?;
Ok(tmp)
}
#[inline]
fn try_extend_impl<I, const HAE: bool>(&mut self, iter: I) -> Result<(), ReservationError>
where
I: IntoIterator<Item = T>,
{
let it = iter.into_iter();
let (min, max) = it.size_hint();
let cap = max.unwrap_or(min);
let len = self.len();
let mut g = SetLenOnDrop::new(self, len);
g.sv.try_reserve_impl::<HAE>(cap)?;
let mut cap = g.sv.capacity();
let mut p = unsafe { g.sv.as_mut_ptr().add(len) };
for e in it {
unsafe {
if g.len >= cap {
g.sv.set_len(g.len);
let (_, tmp_p) = g.sv.try_reserve_impl::<HAE>(1)?;
p = tmp_p.add(g.len);
cap = g.sv.capacity();
}
p.write(e);
p = p.add(1);
g.len += 1;
}
}
drop(g);
Ok(())
}
#[inline]
pub fn insert(&mut self, index: usize, value: T) {
self.try_insert_impl::<DOHAE>(index, value)
.expect("smallvec insert failed")
}
#[inline]
pub fn try_insert(&mut self, index: usize, value: T) -> Result<(), InsertError> {
self.try_insert_impl::<NOHAE>(index, value)
}
#[inline]
#[allow(clippy::comparison_chain)]
fn try_insert_impl<const HAE: bool>(
&mut self,
index: usize,
value: T,
) -> Result<(), InsertError> {
let cap = self.capacity.as_usize();
if mem::size_of::<T>() == 0 {
if index > cap {
Err(InsertError::InvalidIndex)
} else if cap < L::MAX {
self.capacity.add_assign(1);
unsafe { self.buf.local_mut_ptr().write(value) };
Ok(())
} else {
Err(InsertError::ReservationError(
ReservationError::CapacityOverflow,
))
}
} else {
let len;
let p;
if cap < C {
p = self.buf.local_mut_ptr();
len = cap;
if index > len {
return Err(InsertError::InvalidIndex);
}
self.capacity.add_assign(1);
} else if cap > C {
len = self.buf.heap_len().as_usize();
if index > len {
return Err(InsertError::InvalidIndex);
}
if len == cap {
self.try_reserve_impl::<HAE>(1)
.map_err(InsertError::ReservationError)?;
}
p = self.buf.heap_mut_ptr();
self.buf.heap_len_add_assign(1);
} else {
if index > cap {
return Err(InsertError::InvalidIndex);
}
self.try_reserve_impl::<HAE>(1)
.map_err(InsertError::ReservationError)?;
p = self.buf.heap_mut_ptr();
len = self.buf.heap_len().as_usize();
self.buf.heap_len_add_assign(1);
}
unsafe {
let p = p.add(index);
ptr::copy(p, p.add(1), len - index);
p.write(value);
}
Ok(())
}
}
#[inline]
pub fn remove(&mut self, index: usize) -> T {
self.try_remove(index)
.expect("smallvec remove: index out of bounds")
}
#[inline]
pub fn try_remove(&mut self, index: usize) -> Option<T> {
if mem::size_of::<T>() == 0 {
let len = self.capacity.as_usize();
if index < len {
self.capacity.sub_assign(1);
unsafe { Some(self.buf.local_ptr().read()) }
} else {
None
}
} else {
let len;
let p;
let cap = self.capacity.as_usize();
if cap <= C {
if index < cap {
self.capacity.sub_assign(1);
p = self.buf.local_mut_ptr();
len = cap;
} else {
return None;
}
} else {
len = self.buf.heap_len().as_usize();
if index < len {
self.buf.set_heap_len(L::new(len - 1));
p = self.buf.heap_mut_ptr();
} else {
return None;
}
}
unsafe {
let p = p.add(index);
let value = p.read();
let to_copy = len - index - 1;
ptr::copy(p.add(1), p, to_copy);
SM::init(p.add(to_copy), 1);
Some(value)
}
}
}
#[inline]
pub fn swap_remove(&mut self, index: usize) -> T {
self.try_swap_remove(index)
.expect("smallvec swap_remove: index out of bounds")
}
#[inline]
pub fn try_swap_remove(&mut self, index: usize) -> Option<T> {
if mem::size_of::<T>() == 0 {
let len = self.capacity.as_usize();
if index < len {
self.capacity.sub_assign(1);
unsafe { Some(self.buf.local_ptr().read()) }
} else {
None
}
} else {
let len;
let p;
let cap = self.capacity.as_usize();
if cap <= C {
if index < cap {
self.capacity.sub_assign(1);
p = self.buf.local_mut_ptr();
len = cap;
} else {
return None;
}
} else {
len = self.buf.heap_len().as_usize();
if index < len {
self.buf.set_heap_len(L::new(len - 1));
p = self.buf.heap_mut_ptr();
} else {
return None;
}
}
unsafe {
let p_last = p.add(len - 1);
let p = p.add(index);
let value = p.read();
ptr::copy(p_last, p, 1);
SM::init(p_last, 1);
Some(value)
}
}
}
#[inline]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
self.retain_mut(|e| f(e))
}
#[inline]
pub fn retain_mut<F>(&mut self, mut f: F)
where
F: FnMut(&mut T) -> bool,
{
let mut g = RetainGuard {
sv: self,
len: 0,
deleted: 0,
processed: 0,
};
let cap = g.sv.capacity.as_usize();
let (len, p) = if mem::size_of::<T>() == 0 || cap <= C {
g.sv.capacity.set(0);
(cap, g.sv.buf.local_mut_ptr())
} else {
let (l, p) = g.sv.buf.heap_len_mut_p();
g.sv.buf.set_heap_len(L::new(0));
(l.as_usize(), p)
};
g.len = len;
unsafe {
while g.processed < len {
let item_mut_ref = &mut *p.add(g.processed);
if !f(item_mut_ref) {
g.processed += 1;
g.deleted += 1;
ptr::drop_in_place(item_mut_ref);
break;
}
g.processed += 1;
}
while g.processed < len {
let item_mut_ref = &mut *p.add(g.processed);
if !f(item_mut_ref) {
g.processed += 1;
g.deleted += 1;
ptr::drop_in_place(item_mut_ref);
} else {
ptr::copy_nonoverlapping(
item_mut_ref as *const _,
p.add(g.processed - g.deleted),
1,
);
g.processed += 1;
}
}
}
}
#[inline]
pub fn resize_with<F>(&mut self, new_len: usize, f: F)
where
F: FnMut() -> T,
{
self.try_resize_with_impl::<F, DOHAE>(new_len, f)
.expect("smallvec resize_with failed")
}
#[inline]
pub fn try_resize_with<F>(&mut self, new_len: usize, f: F) -> Result<(), ReservationError>
where
F: FnMut() -> T,
{
self.try_resize_with_impl::<F, NOHAE>(new_len, f)
}
#[inline]
#[allow(clippy::comparison_chain)]
fn try_resize_with_impl<F, const HAE: bool>(
&mut self,
new_len: usize,
mut f: F,
) -> Result<(), ReservationError>
where
F: FnMut() -> T,
{
let p;
let len;
let cap = self.capacity.as_usize();
let mut g = SetLenOnDrop::unarmed(self, 0);
if mem::size_of::<T>() == 0 || cap <= C {
len = cap;
if new_len < len {
g.sv.truncate(new_len);
return Ok(());
} else if new_len > len {
if mem::size_of::<T>() != 0 && new_len > C {
g.sv.try_reserve_impl::<HAE>(new_len - len)?;
p = g.sv.buf.heap_mut_ptr();
} else {
if mem::size_of::<T>() == 0 && new_len > L::MAX {
return Err(ReservationError::CapacityOverflow);
}
p = g.sv.buf.local_mut_ptr();
}
} else {
return Ok(());
}
} else {
len = g.sv.buf.heap_len().as_usize();
if new_len < len {
g.sv.truncate(new_len);
return Ok(());
} else if new_len > len {
if new_len > cap {
g.sv.try_reserve_impl::<HAE>(new_len - len)?;
}
p = g.sv.buf.heap_mut_ptr();
} else {
return Ok(());
}
}
g.len = len;
g.armed = true;
unsafe {
let mut p = p.add(len);
while g.len < new_len {
p.write(f());
p = p.add(1);
g.len += 1;
}
}
Ok(())
}
#[inline]
pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, L, SM, C>
where
R: RangeBounds<usize>,
{
let end = match range.end_bound() {
Bound::Included(e) => e
.checked_add(1)
.unwrap_or_else(|| panic!("end bound overflows")),
Bound::Excluded(e) => *e,
Bound::Unbounded => self.len(),
};
let len = self.len();
if end > len {
panic!("invalid end bound");
}
let start = match range.start_bound() {
Bound::Included(s) => *s,
Bound::Excluded(s) => s
.checked_add(1)
.unwrap_or_else(|| panic!("start bound overflows")),
Bound::Unbounded => 0,
};
if start > end {
panic!("invalid range");
}
unsafe {
let (iter, tail, tail_len) = if start < end {
self.set_len(start);
(
slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start).iter(),
L::new(end),
L::new(len - end),
)
} else {
([].iter(), L::new(L::MAX), L::new(L::MAX))
};
Drain {
sv: ptr::NonNull::new_unchecked(self),
iter,
tail,
tail_len,
}
}
}
}
impl<T, const C: usize, L, SM> SmallVec<T, C, L, SM>
where
T: Clone,
L: LengthType,
SM: SpareMemoryPolicy<T>,
{
#[inline]
pub fn resize(&mut self, new_len: usize, value: T) {
self.try_resize_impl::<DOHAE>(new_len, value)
.expect("smallvec resize failed")
}
#[inline]
pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), ReservationError> {
self.try_resize_impl::<NOHAE>(new_len, value)
}
#[inline]
#[allow(clippy::comparison_chain)]
fn try_resize_impl<const HAE: bool>(
&mut self,
new_len: usize,
value: T,
) -> Result<(), ReservationError> {
let p;
let len;
let cap = self.capacity.as_usize();
let mut g = SetLenOnDrop::unarmed(self, 0);
if mem::size_of::<T>() == 0 || cap <= C {
len = cap;
if new_len < len {
g.sv.truncate(new_len);
return Ok(());
} else if new_len > len {
if mem::size_of::<T>() != 0 && new_len > C {
g.sv.try_reserve_impl::<HAE>(new_len - len)?;
p = g.sv.buf.heap_mut_ptr();
} else {
if mem::size_of::<T>() == 0 && new_len > L::MAX {
return Err(ReservationError::CapacityOverflow);
}
p = g.sv.buf.local_mut_ptr();
}
} else {
return Ok(());
}
} else {
len = g.sv.buf.heap_len().as_usize();
if new_len < len {
g.sv.truncate(new_len);
return Ok(());
} else if new_len > len {
if new_len > cap {
g.sv.try_reserve_impl::<HAE>(new_len - len)?;
}
p = g.sv.buf.heap_mut_ptr();
} else {
return Ok(());
}
}
g.len = len;
g.armed = true;
unsafe {
let mut p = p.add(len);
let to_add = new_len - len;
for _ in 1..to_add {
p.write(value.clone());
p = p.add(1);
g.len += 1;
}
if to_add > 0 {
p.write(value);
g.len += 1;
}
}
Ok(())
}
}
impl<T, const C: usize, L, SM> SmallVec<T, C, L, SM>
where
T: Copy,
L: LengthType,
SM: SpareMemoryPolicy<T>,
{
#[inline]
pub fn copy_from_slice(&mut self, s: &[T]) {
self.try_copy_from_slice_impl::<DOHAE>(s)
.expect("smallvec copy_from_slice failed")
}
#[inline]
pub fn try_copy_from_slice(&mut self, s: &[T]) -> Result<(), ReservationError> {
self.try_copy_from_slice_impl::<NOHAE>(s)
}
#[inline]
fn try_copy_from_slice_impl<const HAE: bool>(
&mut self,
s: &[T],
) -> Result<(), ReservationError> {
if mem::size_of::<T>() == 0 {
if let Some(c) = self.capacity.checked_add_usize(s.len()) {
self.capacity = c;
Ok(())
} else {
Err(ReservationError::CapacityOverflow)
}
} else {
let cur_len;
let cap = self.capacity.as_usize();
let (len, p) = if cap <= C {
cur_len = cap;
let cap = C;
if cap - cur_len >= s.len() {
(&mut self.capacity, self.buf.local_mut_ptr())
} else {
self.try_reserve_impl::<HAE>(s.len())?
}
} else {
cur_len = self.buf.heap_len().as_usize();
if cap - cur_len >= s.len() {
self.buf.heap_mut_len_mut_p()
} else {
self.try_reserve_impl::<HAE>(s.len())?
}
};
unsafe { ptr::copy_nonoverlapping(s.as_ptr(), p.add(cur_len), s.len()) };
len.add_assign(s.len());
Ok(())
}
}
}
#[inline]
unsafe fn clone_from_slice_unchecked<T, L>(s: &[T], len: &mut L, mut p: *mut T)
where
T: Clone,
L: LengthType,
{
for e in s {
p.write(e.clone());
p = p.add(1);
len.add_assign(1);
}
}
pub mod errors;
use errors::*;
mod drain;
pub use drain::*;
mod macros;
mod traits;
#[cfg(all(test, feature = "std"))]
mod test_smallvec;