allocator_api/liballoc/raw_vec.rs
1use core::cmp;
2use core::marker::PhantomData;
3use core::mem;
4use core::ops::Drop;
5use core::ptr::{self, NonNull};
6use core::slice;
7
8use crate::alloc::{Alloc, Layout, LayoutExt, handle_alloc_error};
9#[cfg(feature = "std")]
10use crate::alloc::Global;
11use crate::collections::CollectionAllocErr;
12use crate::collections::CollectionAllocErr::*;
13use crate::boxed::Box;
14
15/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
16/// a buffer of memory on the heap without having to worry about all the corner cases
17/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
18/// In particular:
19///
20/// * Produces NonNull::dangling() on zero-sized types
21/// * Produces NonNull::dangling() on zero-length allocations
22/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
23/// * Guards against 32-bit systems allocating more than isize::MAX bytes
24/// * Guards against overflowing your length
25/// * Aborts on OOM or calls handle_alloc_error as applicable
26/// * Avoids freeing NonNull::dangling()
27/// * Contains a ptr::NonNull and thus endows the user with all related benefits
28///
29/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
30/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
31/// to handle the actual things *stored* inside of a RawVec.
32///
33/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
34/// This enables you to use capacity growing logic catch the overflows in your length
35/// that might occur with zero-sized types.
36///
37/// However this means that you need to be careful when round-tripping this type
38/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
39/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
40/// field. This allows zero-sized types to not be special-cased by consumers of
41/// this type.
42#[allow(missing_debug_implementations)]
43global_alloc! {
44 pub struct RawVec<T, A: Alloc> {
45 ptr: NonNull<T>,
46 marker: PhantomData<T>,
47 cap: usize,
48 a: A,
49 }
50}
51
52impl<T, A: Alloc> RawVec<T, A> {
53 /// Like `new` but parameterized over the choice of allocator for
54 /// the returned RawVec.
55 pub fn new_in(a: A) -> Self {
56 // FIXME(mark-i-m): use this line when `if`s are allowed in `const`
57 //let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
58
59 // NonNull::dangling() doubles as "unallocated" and "zero-sized allocation"
60 RawVec {
61 ptr: NonNull::dangling(),
62 marker: PhantomData,
63 // FIXME(mark-i-m): use `cap` when ifs are allowed in const
64 cap: [0, !0][(mem::size_of::<T>() == 0) as usize],
65 a,
66 }
67 }
68
69 /// Like `with_capacity` but parameterized over the choice of
70 /// allocator for the returned RawVec.
71 #[inline]
72 pub fn with_capacity_in(cap: usize, a: A) -> Self {
73 RawVec::allocate_in(cap, false, a)
74 }
75
76 /// Like `with_capacity_zeroed` but parameterized over the choice
77 /// of allocator for the returned RawVec.
78 #[inline]
79 pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
80 RawVec::allocate_in(cap, true, a)
81 }
82
83 fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
84 unsafe {
85 let elem_size = mem::size_of::<T>();
86
87 let alloc_size = cap.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
88 alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
89
90 // handles ZSTs and `cap = 0` alike
91 let ptr = if alloc_size == 0 {
92 NonNull::<T>::dangling()
93 } else {
94 let align = mem::align_of::<T>();
95 let layout = Layout::from_size_align(alloc_size, align).unwrap();
96 let result = if zeroed {
97 a.alloc_zeroed(layout)
98 } else {
99 a.alloc(layout)
100 };
101 match result {
102 Ok(ptr) => ptr.cast(),
103 Err(_) => handle_alloc_error(layout),
104 }
105 };
106
107 RawVec {
108 ptr: ptr.into(),
109 marker: PhantomData,
110 cap,
111 a,
112 }
113 }
114 }
115}
116
117#[cfg(feature = "std")]
118impl<T> RawVec<T, Global> {
119 /// Creates the biggest possible RawVec (on the system heap)
120 /// without allocating. If T has positive size, then this makes a
121 /// RawVec with capacity 0. If T has 0 size, then it makes a
122 /// RawVec with capacity `usize::MAX`. Useful for implementing
123 /// delayed allocation.
124 pub fn new() -> Self {
125 Self::new_in(Default::default())
126 }
127
128 /// Creates a RawVec (on the system heap) with exactly the
129 /// capacity and alignment requirements for a `[T; cap]`. This is
130 /// equivalent to calling RawVec::new when `cap` is 0 or T is
131 /// zero-sized. Note that if `T` is zero-sized this means you will
132 /// *not* get a RawVec with the requested capacity!
133 ///
134 /// # Panics
135 ///
136 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
137 /// * Panics on 32-bit platforms if the requested capacity exceeds
138 /// `isize::MAX` bytes.
139 ///
140 /// # Aborts
141 ///
142 /// Aborts on OOM
143 #[inline]
144 pub fn with_capacity(cap: usize) -> Self {
145 RawVec::allocate_in(cap, false, Default::default())
146 }
147
148 /// Like `with_capacity` but guarantees the buffer is zeroed.
149 #[inline]
150 pub fn with_capacity_zeroed(cap: usize) -> Self {
151 RawVec::allocate_in(cap, true, Default::default())
152 }
153}
154
155
156impl<T, A: Alloc> RawVec<T, A> {
157 /// Reconstitutes a RawVec from a pointer, capacity, and allocator.
158 ///
159 /// # Undefined Behavior
160 ///
161 /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
162 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
163 /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
164 pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
165 RawVec {
166 ptr: NonNull::new_unchecked(ptr),
167 marker: PhantomData,
168 cap,
169 a,
170 }
171 }
172}
173
174#[cfg(feature = "std")]
175impl<T> RawVec<T, Global> {
176 /// Reconstitutes a RawVec from a pointer, capacity.
177 ///
178 /// # Undefined Behavior
179 ///
180 /// The ptr must be allocated (on the system heap), and with the given capacity. The
181 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
182 /// If the ptr and capacity come from a RawVec, then this is guaranteed.
183 pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
184 RawVec::from_raw_parts_in(ptr, cap, Global)
185 }
186}
187
188impl<T, A: Alloc> RawVec<T, A> {
189 /// Converts a `Box<[T], A>` into a `RawVec<T, A>`.
190 pub fn from_box(mut slice: Box<[T], A>) -> Self {
191 unsafe {
192 let a = ptr::read(&slice.a);
193 let result = RawVec::from_raw_parts_in(slice.as_mut_ptr(), slice.len(), a);
194 mem::forget(slice);
195 result
196 }
197 }
198}
199
200impl<T, A: Alloc> RawVec<T, A> {
201 /// Gets a raw pointer to the start of the allocation. Note that this is
202 /// NonNull::dangling() if `cap = 0` or T is zero-sized. In the former case, you must
203 /// be careful.
204 pub fn ptr(&self) -> *mut T {
205 self.ptr.as_ptr()
206 }
207
208 /// Gets the capacity of the allocation.
209 ///
210 /// This will always be `usize::MAX` if `T` is zero-sized.
211 #[inline(always)]
212 pub fn cap(&self) -> usize {
213 if mem::size_of::<T>() == 0 {
214 !0
215 } else {
216 self.cap
217 }
218 }
219
220 /// Returns a shared reference to the allocator backing this RawVec.
221 pub fn alloc(&self) -> &A {
222 &self.a
223 }
224
225 /// Returns a mutable reference to the allocator backing this RawVec.
226 pub fn alloc_mut(&mut self) -> &mut A {
227 &mut self.a
228 }
229
230 fn current_layout(&self) -> Option<Layout> {
231 if self.cap == 0 {
232 None
233 } else {
234 // We have an allocated chunk of memory, so we can bypass runtime
235 // checks to get our current layout.
236 unsafe {
237 let align = mem::align_of::<T>();
238 let size = mem::size_of::<T>() * self.cap;
239 Some(Layout::from_size_align_unchecked(size, align))
240 }
241 }
242 }
243
244 /// Doubles the size of the type's backing allocation. This is common enough
245 /// to want to do that it's easiest to just have a dedicated method. Slightly
246 /// more efficient logic can be provided for this than the general case.
247 ///
248 /// This function is ideal for when pushing elements one-at-a-time because
249 /// you don't need to incur the costs of the more general computations
250 /// reserve needs to do to guard against overflow. You do however need to
251 /// manually check if your `len == cap`.
252 ///
253 /// # Panics
254 ///
255 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
256 /// all `usize::MAX` slots in your imaginary buffer.
257 /// * Panics on 32-bit platforms if the requested capacity exceeds
258 /// `isize::MAX` bytes.
259 ///
260 /// # Aborts
261 ///
262 /// Aborts on OOM
263 ///
264 /// # Examples
265 ///
266 /// ```
267 /// # #[macro_use] extern crate allocator_api;
268 /// # test_using_global! {
269 /// use allocator_api::RawVec;
270 /// # use std::ptr;
271 /// struct MyVec<T> {
272 /// buf: RawVec<T>,
273 /// len: usize,
274 /// }
275 ///
276 /// impl<T> MyVec<T> {
277 /// pub fn push(&mut self, elem: T) {
278 /// if self.len == self.buf.cap() { self.buf.double(); }
279 /// // double would have aborted or panicked if the len exceeded
280 /// // `isize::MAX` so this is safe to do unchecked now.
281 /// unsafe {
282 /// ptr::write(self.buf.ptr().add(self.len), elem);
283 /// }
284 /// self.len += 1;
285 /// }
286 /// }
287 /// # fn main() {
288 /// # let mut vec = MyVec { buf: RawVec::new(), len: 0 };
289 /// # vec.push(1);
290 /// # }
291 /// # }
292 /// ```
293 #[inline(never)]
294 #[cold]
295 pub fn double(&mut self) {
296 unsafe {
297 let elem_size = mem::size_of::<T>();
298
299 // since we set the capacity to usize::MAX when elem_size is
300 // 0, getting to here necessarily means the RawVec is overfull.
301 assert!(elem_size != 0, "capacity overflow");
302
303 let (new_cap, uniq) = match self.current_layout() {
304 Some(cur) => {
305 // Since we guarantee that we never allocate more than
306 // isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
307 // a precondition, so this can't overflow. Additionally the
308 // alignment will never be too large as to "not be
309 // satisfiable", so `Layout::from_size_align` will always
310 // return `Some`.
311 //
312 // tl;dr; we bypass runtime checks due to dynamic assertions
313 // in this module, allowing us to use
314 // `from_size_align_unchecked`.
315 let new_cap = 2 * self.cap;
316 let new_size = new_cap * elem_size;
317 alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
318 let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(),
319 cur,
320 new_size);
321 match ptr_res {
322 Ok(ptr) => (new_cap, ptr.cast().into()),
323 Err(_) => handle_alloc_error(
324 Layout::from_size_align_unchecked(new_size, cur.align())
325 ),
326 }
327 }
328 None => {
329 // skip to 4 because tiny Vec's are dumb; but not if that
330 // would cause overflow
331 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
332 match self.a.alloc_array::<T>(new_cap) {
333 Ok(ptr) => (new_cap, ptr.into()),
334 Err(_) => handle_alloc_error(<Layout as LayoutExt>::array::<T>(new_cap).unwrap()),
335 }
336 }
337 };
338 self.ptr = uniq;
339 self.cap = new_cap;
340 }
341 }
342
343 /// Attempts to double the size of the type's backing allocation in place. This is common
344 /// enough to want to do that it's easiest to just have a dedicated method. Slightly
345 /// more efficient logic can be provided for this than the general case.
346 ///
347 /// Returns `true` if the reallocation attempt has succeeded.
348 ///
349 /// # Panics
350 ///
351 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
352 /// all `usize::MAX` slots in your imaginary buffer.
353 /// * Panics on 32-bit platforms if the requested capacity exceeds
354 /// `isize::MAX` bytes.
355 #[inline(never)]
356 #[cold]
357 pub fn double_in_place(&mut self) -> bool {
358 unsafe {
359 let elem_size = mem::size_of::<T>();
360 let old_layout = match self.current_layout() {
361 Some(layout) => layout,
362 None => return false, // nothing to double
363 };
364
365 // since we set the capacity to usize::MAX when elem_size is
366 // 0, getting to here necessarily means the RawVec is overfull.
367 assert!(elem_size != 0, "capacity overflow");
368
369 // Since we guarantee that we never allocate more than isize::MAX
370 // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
371 // this can't overflow.
372 //
373 // Similarly like with `double` above we can go straight to
374 // `Layout::from_size_align_unchecked` as we know this won't
375 // overflow and the alignment is sufficiently small.
376 let new_cap = 2 * self.cap;
377 let new_size = new_cap * elem_size;
378 alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
379 match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) {
380 Ok(_) => {
381 // We can't directly divide `size`.
382 self.cap = new_cap;
383 true
384 }
385 Err(_) => {
386 false
387 }
388 }
389 }
390 }
391
392 /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
393 pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize)
394 -> Result<(), CollectionAllocErr> {
395
396 self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
397 }
398
399 /// Ensures that the buffer contains at least enough space to hold
400 /// `used_cap + needed_extra_cap` elements. If it doesn't already,
401 /// will reallocate the minimum possible amount of memory necessary.
402 /// Generally this will be exactly the amount of memory necessary,
403 /// but in principle the allocator is free to give back more than
404 /// we asked for.
405 ///
406 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
407 /// the requested space. This is not really unsafe, but the unsafe
408 /// code *you* write that relies on the behavior of this function may break.
409 ///
410 /// # Panics
411 ///
412 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
413 /// * Panics on 32-bit platforms if the requested capacity exceeds
414 /// `isize::MAX` bytes.
415 ///
416 /// # Aborts
417 ///
418 /// Aborts on OOM
419 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
420 match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
421 Err(CapacityOverflow) => capacity_overflow(),
422 Err(AllocErr) => unreachable!(),
423 Ok(()) => { /* yay */ }
424 }
425 }
426
427 /// Calculates the buffer's new size given that it'll hold `used_cap +
428 /// needed_extra_cap` elements. This logic is used in amortized reserve methods.
429 /// Returns `(new_capacity, new_alloc_size)`.
430 fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize)
431 -> Result<usize, CollectionAllocErr> {
432
433 // Nothing we can really do about these checks :(
434 let required_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
435 // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
436 let double_cap = self.cap * 2;
437 // `double_cap` guarantees exponential growth.
438 Ok(cmp::max(double_cap, required_cap))
439 }
440
441 /// The same as `reserve`, but returns on errors instead of panicking or aborting.
442 pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize)
443 -> Result<(), CollectionAllocErr> {
444 self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
445 }
446
447 /// Ensures that the buffer contains at least enough space to hold
448 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
449 /// enough capacity, will reallocate enough space plus comfortable slack
450 /// space to get amortized `O(1)` behavior. Will limit this behavior
451 /// if it would needlessly cause itself to panic.
452 ///
453 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
454 /// the requested space. This is not really unsafe, but the unsafe
455 /// code *you* write that relies on the behavior of this function may break.
456 ///
457 /// This is ideal for implementing a bulk-push operation like `extend`.
458 ///
459 /// # Panics
460 ///
461 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
462 /// * Panics on 32-bit platforms if the requested capacity exceeds
463 /// `isize::MAX` bytes.
464 ///
465 /// # Aborts
466 ///
467 /// Aborts on OOM
468 ///
469 /// # Examples
470 ///
471 /// ```
472 /// # #[macro_use] extern crate allocator_api;
473 /// # test_using_global! {
474 /// use allocator_api::RawVec;
475 /// # use std::ptr;
476 /// struct MyVec<T> {
477 /// buf: RawVec<T>,
478 /// len: usize,
479 /// }
480 ///
481 /// impl<T: Clone> MyVec<T> {
482 /// pub fn push_all(&mut self, elems: &[T]) {
483 /// self.buf.reserve(self.len, elems.len());
484 /// // reserve would have aborted or panicked if the len exceeded
485 /// // `isize::MAX` so this is safe to do unchecked now.
486 /// for x in elems {
487 /// unsafe {
488 /// ptr::write(self.buf.ptr().add(self.len), x.clone());
489 /// }
490 /// self.len += 1;
491 /// }
492 /// }
493 /// }
494 /// # fn main() {
495 /// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
496 /// # vector.push_all(&[1, 3, 5, 7, 9]);
497 /// # }
498 /// # }
499 /// ```
500 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
501 match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
502 Err(CapacityOverflow) => capacity_overflow(),
503 Err(AllocErr) => unreachable!(),
504 Ok(()) => { /* yay */ }
505 }
506 }
507 /// Attempts to ensure that the buffer contains at least enough space to hold
508 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
509 /// enough capacity, will reallocate in place enough space plus comfortable slack
510 /// space to get amortized `O(1)` behavior. Will limit this behaviour
511 /// if it would needlessly cause itself to panic.
512 ///
513 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
514 /// the requested space. This is not really unsafe, but the unsafe
515 /// code *you* write that relies on the behavior of this function may break.
516 ///
517 /// Returns `true` if the reallocation attempt has succeeded.
518 ///
519 /// # Panics
520 ///
521 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
522 /// * Panics on 32-bit platforms if the requested capacity exceeds
523 /// `isize::MAX` bytes.
524 pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
525 unsafe {
526 // NOTE: we don't early branch on ZSTs here because we want this
527 // to actually catch "asking for more than usize::MAX" in that case.
528 // If we make it past the first branch then we are guaranteed to
529 // panic.
530
531 // Don't actually need any more capacity. If the current `cap` is 0, we can't
532 // reallocate in place.
533 // Wrapping in case they give a bad `used_cap`
534 let old_layout = match self.current_layout() {
535 Some(layout) => layout,
536 None => return false,
537 };
538 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
539 return false;
540 }
541
542 let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)
543 .unwrap_or_else(|_| capacity_overflow());
544
545 // Here, `cap < used_cap + needed_extra_cap <= new_cap`
546 // (regardless of whether `self.cap - used_cap` wrapped).
547 // Therefore we can safely call grow_in_place.
548
549 let new_layout = LayoutExt::repeat(&Layout::new::<T>(), new_cap).unwrap().0;
550 // FIXME: may crash and burn on over-reserve
551 alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
552 match self.a.grow_in_place(
553 NonNull::from(self.ptr).cast(), old_layout, new_layout.size(),
554 ) {
555 Ok(_) => {
556 self.cap = new_cap;
557 true
558 }
559 Err(_) => {
560 false
561 }
562 }
563 }
564 }
565
566 /// Shrinks the allocation down to the specified amount. If the given amount
567 /// is 0, actually completely deallocates.
568 ///
569 /// # Panics
570 ///
571 /// Panics if the given amount is *larger* than the current capacity.
572 ///
573 /// # Aborts
574 ///
575 /// Aborts on OOM.
576 pub fn shrink_to_fit(&mut self, amount: usize) {
577 let elem_size = mem::size_of::<T>();
578
579 // Set the `cap` because they might be about to promote to a `Box<[T]>`
580 if elem_size == 0 {
581 self.cap = amount;
582 return;
583 }
584
585 // This check is my waterloo; it's the only thing Vec wouldn't have to do.
586 assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
587
588 if amount == 0 {
589 // We want to create a new zero-length vector within the
590 // same allocator. We use ptr::write to avoid an
591 // erroneous attempt to drop the contents, and we use
592 // ptr::read to sidestep condition against destructuring
593 // types that implement Drop.
594
595 unsafe {
596 let a = ptr::read(&self.a as *const A);
597 self.dealloc_buffer();
598 ptr::write(self, RawVec::new_in(a));
599 }
600 } else if self.cap != amount {
601 unsafe {
602 // We know here that our `amount` is greater than zero. This
603 // implies, via the assert above, that capacity is also greater
604 // than zero, which means that we've got a current layout that
605 // "fits"
606 //
607 // We also know that `self.cap` is greater than `amount`, and
608 // consequently we don't need runtime checks for creating either
609 // layout
610 let old_size = elem_size * self.cap;
611 let new_size = elem_size * amount;
612 let align = mem::align_of::<T>();
613 let old_layout = Layout::from_size_align_unchecked(old_size, align);
614 match self.a.realloc(NonNull::from(self.ptr).cast(),
615 old_layout,
616 new_size) {
617 Ok(p) => self.ptr = p.cast().into(),
618 Err(_) => handle_alloc_error(
619 Layout::from_size_align_unchecked(new_size, align)
620 ),
621 }
622 }
623 self.cap = amount;
624 }
625 }
626}
627
628enum Fallibility {
629 Fallible,
630 Infallible,
631}
632
633use Fallibility::*;
634
635enum ReserveStrategy {
636 Exact,
637 Amortized,
638}
639
640use ReserveStrategy::*;
641
642impl<T, A: Alloc> RawVec<T, A> {
643 fn reserve_internal(
644 &mut self,
645 used_cap: usize,
646 needed_extra_cap: usize,
647 fallibility: Fallibility,
648 strategy: ReserveStrategy,
649 ) -> Result<(), CollectionAllocErr> {
650 unsafe {
651 use crate::alloc::AllocErr;
652
653 // NOTE: we don't early branch on ZSTs here because we want this
654 // to actually catch "asking for more than usize::MAX" in that case.
655 // If we make it past the first branch then we are guaranteed to
656 // panic.
657
658 // Don't actually need any more capacity.
659 // Wrapping in case they gave a bad `used_cap`.
660 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
661 return Ok(());
662 }
663
664 // Nothing we can really do about these checks :(
665 let new_cap = match strategy {
666 Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?,
667 Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
668 };
669 let new_layout = <Layout as LayoutExt>::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
670
671 alloc_guard(new_layout.size())?;
672
673 let res = match self.current_layout() {
674 Some(layout) => {
675 debug_assert!(new_layout.align() == layout.align());
676 self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size())
677 }
678 None => self.a.alloc(new_layout),
679 };
680
681 match (&res, fallibility) {
682 (Err(AllocErr), Infallible) => handle_alloc_error(new_layout),
683 _ => {}
684 }
685
686 self.ptr = res?.cast().into();
687 self.cap = new_cap;
688
689 Ok(())
690 }
691 }
692
693}
694
695impl<T, A: Alloc> RawVec<T, A> {
696 /// Converts the entire buffer into `Box<[T], A>`.
697 ///
698 /// While it is not *strictly* Undefined Behavior to call
699 /// this procedure while some of the RawVec is uninitialized,
700 /// it certainly makes it trivial to trigger it.
701 ///
702 /// Note that this will correctly reconstitute any `cap` changes
703 /// that may have been performed. (see description of type for details)
704 pub unsafe fn into_box(self) -> Box<[T], A> {
705 // NOTE: not calling `cap()` here, actually using the real `cap` field!
706 let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
707 let a = ptr::read(&self.a);
708 let output: Box<[T], A> = Box::from_raw_in(slice, a);
709 mem::forget(self);
710 output
711 }
712}
713
714impl<T, A: Alloc> RawVec<T, A> {
715 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
716 pub unsafe fn dealloc_buffer(&mut self) {
717 let elem_size = mem::size_of::<T>();
718 if elem_size != 0 {
719 if let Some(layout) = self.current_layout() {
720 self.a.dealloc(NonNull::from(self.ptr).cast(), layout);
721 }
722 }
723 }
724}
725
726impl<T, A: Alloc> Drop for RawVec<T, A> {
727 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
728 fn drop(&mut self) {
729 unsafe { self.dealloc_buffer(); }
730 }
731}
732
733
734
735// We need to guarantee the following:
736// * We don't ever allocate `> isize::MAX` byte-size objects
737// * We don't overflow `usize::MAX` and actually allocate too little
738//
739// On 64-bit we just need to check for overflow since trying to allocate
740// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
741// an extra guard for this in case we're running on a platform which can use
742// all 4GB in user-space. e.g., PAE or x32
743
744#[inline]
745fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {
746 if mem::size_of::<usize>() < 8 && alloc_size > core::isize::MAX as usize {
747 Err(CapacityOverflow)
748 } else {
749 Ok(())
750 }
751}
752
753// One central function responsible for reporting capacity overflows. This'll
754// ensure that the code generation related to these panics is minimal as there's
755// only one location which panics rather than a bunch throughout the module.
756fn capacity_overflow() -> ! {
757 panic!("capacity overflow")
758}
759
760#[cfg(all(test, feature = "std"))]
761mod tests {
762 use super::*;
763 mod allocator_api {
764 pub use crate::alloc::*;
765 }
766
767 #[test]
768 fn allocator_param() {
769 use crate::alloc::AllocErr;
770
771 // Writing a test of integration between third-party
772 // allocators and RawVec is a little tricky because the RawVec
773 // API does not expose fallible allocation methods, so we
774 // cannot check what happens when allocator is exhausted
775 // (beyond detecting a panic).
776 //
777 // Instead, this just checks that the RawVec methods do at
778 // least go through the Allocator API when it reserves
779 // storage.
780
781 // A dumb allocator that consumes a fixed amount of fuel
782 // before allocation attempts start failing.
783 struct BoundedAlloc { fuel: usize }
784 unsafe impl Alloc for BoundedAlloc {
785 unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
786 let size = layout.size();
787 if size > self.fuel {
788 return Err(AllocErr);
789 }
790 match Global.alloc(layout) {
791 ok @ Ok(_) => { self.fuel -= size; ok }
792 err @ Err(_) => err,
793 }
794 }
795 unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
796 Global.dealloc(ptr, layout)
797 }
798 }
799
800 let a = BoundedAlloc { fuel: 500 };
801 let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
802 assert_eq!(v.a.fuel, 450);
803 v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
804 assert_eq!(v.a.fuel, 250);
805 }
806
807 #[test]
808 fn reserve_does_not_overallocate() {
809 {
810 let mut v: RawVec<u32> = RawVec::new();
811 // First `reserve` allocates like `reserve_exact`
812 v.reserve(0, 9);
813 assert_eq!(9, v.cap());
814 }
815
816 {
817 let mut v: RawVec<u32> = RawVec::new();
818 v.reserve(0, 7);
819 assert_eq!(7, v.cap());
820 // 97 if more than double of 7, so `reserve` should work
821 // like `reserve_exact`.
822 v.reserve(7, 90);
823 assert_eq!(97, v.cap());
824 }
825
826 {
827 let mut v: RawVec<u32> = RawVec::new();
828 v.reserve(0, 12);
829 assert_eq!(12, v.cap());
830 v.reserve(12, 3);
831 // 3 is less than half of 12, so `reserve` must grow
832 // exponentially. At the time of writing this test grow
833 // factor is 2, so new capacity is 24, however, grow factor
834 // of 1.5 is OK too. Hence `>= 18` in assert.
835 assert!(v.cap() >= 12 + 12 / 2);
836 }
837 }
838
839
840}