dyn_stack/
alloc.rs

1// copied from libcore/liballoc
2
3use core::alloc::Layout;
4use core::cell::UnsafeCell;
5use core::marker::PhantomData;
6use core::mem::MaybeUninit;
7use core::ptr::NonNull;
8use core::{fmt, ptr};
9
10extern crate alloc;
11
12#[derive(Copy, Clone, PartialEq, Eq, Debug)]
13pub struct AllocError;
14
15#[cfg(any(feature = "std", feature = "core-error"))]
16impl crate::Error for AllocError {}
17
18impl fmt::Display for AllocError {
19	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
20		f.write_str("memory allocation failed")
21	}
22}
23
24/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
25/// data described via [`Layout`][].
26///
27/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
28/// an allocator like `MyAllocator([u8; N])` cannot be moved, without updating the pointers to the
29/// allocated memory.
30///
31/// Unlike [`alloc::alloc::GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an
32/// underlying allocator does not support this (like jemalloc) or return a null pointer (such as
33/// `libc::malloc`), this must be caught by the implementation.
34///
35/// ### Currently allocated memory
36///
37/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
38/// means that:
39///
40/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`],
41///   or [`shrink`], and
42///
43/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
44///   directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
45///   [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
46///   remains valid.
47///
48/// [`allocate`]: Allocator::allocate
49/// [`grow`]: Allocator::grow
50/// [`shrink`]: Allocator::shrink
51/// [`deallocate`]: Allocator::deallocate
52///
53/// ### Memory fitting
54///
55/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
56/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
57/// following conditions must hold:
58///
59/// * The block must be allocated with the same alignment as [`layout.align()`], and
60///
61/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
62///   - `min` is the size of the layout most recently used to allocate the block, and
63///   - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
64///
65/// [`layout.align()`]: Layout::align
66/// [`layout.size()`]: Layout::size
67///
68/// # Safety
69///
70/// * Memory blocks returned from an allocator that are [*currently allocated*] must point to valid
71///   memory and retain their validity while they are [*currently allocated*] and the shorter of:
72///   - the borrow-checker lifetime of the allocator type itself.
73///
74/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
75///   method of the allocator.
76///
77/// [*currently allocated*]: #currently-allocated-memory
78pub unsafe trait Allocator {
79	/// Attempts to allocate a block of memory.
80	///
81	/// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees
82	/// of `layout`.
83	///
84	/// The returned block may have a larger size than specified by `layout.size()`, and may or may
85	/// not have its contents initialized.
86	///
87	/// The returned block of memory remains valid as long as it is [*currently allocated*] and the
88	/// shorter of:
89	///   - the borrow-checker lifetime of the allocator type itself.
90	///
91	/// # Errors
92	///
93	/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
94	/// allocator's size or alignment constraints.
95	///
96	/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
97	/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
98	/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
99	///
100	/// Clients wishing to abort computation in response to an allocation error are encouraged to
101	/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
102	///
103	/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
104	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
105
106	/// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
107	///
108	/// # Errors
109	///
110	/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
111	/// allocator's size or alignment constraints.
112	///
113	/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
114	/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
115	/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
116	///
117	/// Clients wishing to abort computation in response to an allocation error are encouraged to
118	/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
119	///
120	/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
121	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
122		let ptr = self.allocate(layout)?;
123		// SAFETY: `alloc` returns a valid memory block
124		unsafe { (ptr.as_ptr() as *mut u8).write_bytes(0, ptr.len()) }
125		Ok(ptr)
126	}
127
128	/// Deallocates the memory referenced by `ptr`.
129	///
130	/// # Safety
131	///
132	/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
133	/// * `layout` must [*fit*] that block of memory.
134	///
135	/// [*currently allocated*]: #currently-allocated-memory
136	/// [*fit*]: #memory-fitting
137	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
138
139	/// Attempts to extend the memory block.
140	///
141	/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
142	/// allocated memory. The pointer is suitable for holding data described by `new_layout`. To
143	/// accomplish this, the allocator may extend the allocation referenced by `ptr` to fit the new
144	/// layout.
145	///
146	/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
147	/// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if
148	/// the allocation was grown in-place. The newly returned pointer is the only valid pointer
149	/// for accessing this memory now.
150	///
151	/// If this method returns `Err`, then ownership of the memory block has not been transferred to
152	/// this allocator, and the contents of the memory block are unaltered.
153	///
154	/// # Safety
155	///
156	/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
157	/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit
158	///   it.).
159	/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
160	///
161	/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
162	///
163	/// [*currently allocated*]: #currently-allocated-memory
164	/// [*fit*]: #memory-fitting
165	///
166	/// # Errors
167	///
168	/// Returns `Err` if the new layout does not meet the allocator's size and alignment
169	/// constraints of the allocator, or if growing otherwise fails.
170	///
171	/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
172	/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
173	/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
174	///
175	/// Clients wishing to abort computation in response to an allocation error are encouraged to
176	/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
177	///
178	/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
179	unsafe fn grow(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
180		debug_assert!(
181			new_layout.size() >= old_layout.size(),
182			"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
183		);
184
185		let new_ptr = self.allocate(new_layout)?;
186
187		// SAFETY: because `new_layout.size()` must be greater than or equal to
188		// `old_layout.size()`, both the old and new memory allocation are valid for reads and
189		// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
190		// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
191		// safe. The safety contract for `dealloc` must be upheld by the caller.
192		unsafe {
193			ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size());
194			self.deallocate(ptr, old_layout);
195		}
196
197		Ok(new_ptr)
198	}
199
200	/// Behaves like `grow`, but also ensures that the new contents are set to zero before being
201	/// returned.
202	///
203	/// The memory block will contain the following contents after a successful call to
204	/// `grow_zeroed`:
205	///   * Bytes `0..old_layout.size()` are preserved from the original allocation.
206	///   * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on the
207	///     allocator implementation. `old_size` refers to the size of the memory block prior to the
208	///     `grow_zeroed` call, which may be larger than the size that was originally requested when
209	///     it was allocated.
210	///   * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory block
211	///     returned by the `grow_zeroed` call.
212	///
213	/// # Safety
214	///
215	/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
216	/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit
217	///   it.).
218	/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
219	///
220	/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
221	///
222	/// [*currently allocated*]: #currently-allocated-memory
223	/// [*fit*]: #memory-fitting
224	///
225	/// # Errors
226	///
227	/// Returns `Err` if the new layout does not meet the allocator's size and alignment
228	/// constraints of the allocator, or if growing otherwise fails.
229	///
230	/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
231	/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
232	/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
233	///
234	/// Clients wishing to abort computation in response to an allocation error are encouraged to
235	/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
236	///
237	/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
238	unsafe fn grow_zeroed(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
239		debug_assert!(
240			new_layout.size() >= old_layout.size(),
241			"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
242		);
243
244		let new_ptr = self.allocate_zeroed(new_layout)?;
245
246		// SAFETY: because `new_layout.size()` must be greater than or equal to
247		// `old_layout.size()`, both the old and new memory allocation are valid for reads and
248		// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
249		// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
250		// safe. The safety contract for `dealloc` must be upheld by the caller.
251		unsafe {
252			ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size());
253			self.deallocate(ptr, old_layout);
254		}
255
256		Ok(new_ptr)
257	}
258
259	/// Attempts to shrink the memory block.
260	///
261	/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
262	/// allocated memory. The pointer is suitable for holding data described by `new_layout`. To
263	/// accomplish this, the allocator may shrink the allocation referenced by `ptr` to fit the new
264	/// layout.
265	///
266	/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
267	/// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if
268	/// the allocation was shrunk in-place. The newly returned pointer is the only valid pointer
269	/// for accessing this memory now.
270	///
271	/// If this method returns `Err`, then ownership of the memory block has not been transferred to
272	/// this allocator, and the contents of the memory block are unaltered.
273	///
274	/// # Safety
275	///
276	/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
277	/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit
278	///   it.).
279	/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
280	///
281	/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
282	///
283	/// [*currently allocated*]: #currently-allocated-memory
284	/// [*fit*]: #memory-fitting
285	///
286	/// # Errors
287	///
288	/// Returns `Err` if the new layout does not meet the allocator's size and alignment
289	/// constraints of the allocator, or if shrinking otherwise fails.
290	///
291	/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
292	/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
293	/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
294	///
295	/// Clients wishing to abort computation in response to an allocation error are encouraged to
296	/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
297	///
298	/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
299	unsafe fn shrink(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
300		debug_assert!(
301			new_layout.size() <= old_layout.size(),
302			"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
303		);
304
305		let new_ptr = self.allocate(new_layout)?;
306
307		// SAFETY: because `new_layout.size()` must be lower than or equal to
308		// `old_layout.size()`, both the old and new memory allocation are valid for reads and
309		// writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
310		// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
311		// safe. The safety contract for `dealloc` must be upheld by the caller.
312		unsafe {
313			ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, new_layout.size());
314			self.deallocate(ptr, old_layout);
315		}
316
317		Ok(new_ptr)
318	}
319
320	/// Creates a "by reference" adapter for this instance of `Allocator`.
321	///
322	/// The returned adapter also implements `Allocator` and will simply borrow this.
323	#[inline(always)]
324	fn by_ref(&self) -> &Self
325	where
326		Self: Sized,
327	{
328		self
329	}
330}
331
332unsafe impl<T: ?Sized + Allocator> Allocator for &T {
333	#[inline(always)]
334	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
335		(**self).allocate(layout)
336	}
337
338	#[inline(always)]
339	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
340		(**self).deallocate(ptr, layout)
341	}
342
343	#[inline(always)]
344	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
345		(**self).allocate_zeroed(layout)
346	}
347
348	#[inline(always)]
349	unsafe fn grow(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
350		(**self).grow(ptr, old_layout, new_layout)
351	}
352
353	#[inline(always)]
354	unsafe fn grow_zeroed(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
355		(**self).grow_zeroed(ptr, old_layout, new_layout)
356	}
357
358	#[inline(always)]
359	unsafe fn shrink(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
360		(**self).shrink(ptr, old_layout, new_layout)
361	}
362}
363
364unsafe impl<T: ?Sized + Allocator> Allocator for &mut T {
365	#[inline(always)]
366	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
367		(**self).allocate(layout)
368	}
369
370	#[inline(always)]
371	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
372		(**self).deallocate(ptr, layout)
373	}
374
375	#[inline(always)]
376	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
377		(**self).allocate_zeroed(layout)
378	}
379
380	#[inline(always)]
381	unsafe fn grow(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
382		(**self).grow(ptr, old_layout, new_layout)
383	}
384
385	#[inline(always)]
386	unsafe fn grow_zeroed(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
387		(**self).grow_zeroed(ptr, old_layout, new_layout)
388	}
389
390	#[inline(always)]
391	unsafe fn shrink(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
392		(**self).shrink(ptr, old_layout, new_layout)
393	}
394}
395
396#[cfg(feature = "alloc")]
397#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
398unsafe impl<T: ?Sized + Allocator> Allocator for alloc::boxed::Box<T> {
399	#[inline(always)]
400	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
401		(**self).allocate(layout)
402	}
403
404	#[inline(always)]
405	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
406		(**self).deallocate(ptr, layout)
407	}
408
409	#[inline(always)]
410	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
411		(**self).allocate_zeroed(layout)
412	}
413
414	#[inline(always)]
415	unsafe fn grow(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
416		(**self).grow(ptr, old_layout, new_layout)
417	}
418
419	#[inline(always)]
420	unsafe fn grow_zeroed(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
421		(**self).grow_zeroed(ptr, old_layout, new_layout)
422	}
423
424	#[inline(always)]
425	unsafe fn shrink(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
426		(**self).shrink(ptr, old_layout, new_layout)
427	}
428}
429
430#[cfg(feature = "alloc")]
431#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
432pub struct Global;
433
434#[cfg(feature = "alloc")]
435#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
436unsafe impl Allocator for Global {
437	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
438		let ptr = if layout.size() == 0 {
439			core::ptr::null_mut::<u8>().wrapping_add(layout.align())
440		} else {
441			unsafe { alloc::alloc::alloc(layout) }
442		};
443
444		if ptr.is_null() {
445			Err(AllocError)
446		} else {
447			Ok(unsafe { NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, layout.size())) })
448		}
449	}
450
451	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
452		let ptr = if layout.size() == 0 {
453			core::ptr::null_mut::<u8>().wrapping_add(layout.align())
454		} else {
455			unsafe { alloc::alloc::alloc_zeroed(layout) }
456		};
457
458		if ptr.is_null() {
459			Err(AllocError)
460		} else {
461			Ok(unsafe { NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, layout.size())) })
462		}
463	}
464
465	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
466		if layout.size() != 0 {
467			alloc::alloc::dealloc(ptr.as_ptr(), layout);
468		}
469	}
470
471	unsafe fn grow(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
472		core::debug_assert!(
473			new_layout.size() >= old_layout.size(),
474			"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
475		);
476
477		if old_layout.align() == new_layout.align() {
478			let ptr = if new_layout.size() == 0 {
479				core::ptr::null_mut::<u8>().wrapping_add(new_layout.align())
480			} else {
481				alloc::alloc::realloc(ptr.as_ptr(), old_layout, new_layout.size())
482			};
483			if ptr.is_null() {
484				Err(AllocError)
485			} else {
486				Ok(unsafe { NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, new_layout.size())) })
487			}
488		} else {
489			let new_ptr = self.allocate(new_layout)?;
490
491			// SAFETY: because `new_layout.size()` must be greater than or equal to
492			// `old_layout.size()`, both the old and new memory allocation are valid for reads and
493			// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
494			// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
495			// safe. The safety contract for `dealloc` must be upheld by the caller.
496			unsafe {
497				ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size());
498				self.deallocate(ptr, old_layout);
499			}
500
501			Ok(new_ptr)
502		}
503	}
504
505	unsafe fn shrink(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
506		core::debug_assert!(
507			new_layout.size() <= old_layout.size(),
508			"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
509		);
510
511		if old_layout.align() == new_layout.align() {
512			let ptr = if new_layout.size() == 0 {
513				core::ptr::null_mut::<u8>().wrapping_add(new_layout.align())
514			} else {
515				alloc::alloc::realloc(ptr.as_ptr(), old_layout, new_layout.size())
516			};
517
518			if ptr.is_null() {
519				Err(AllocError)
520			} else {
521				Ok(unsafe { NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, new_layout.size())) })
522			}
523		} else {
524			let new_ptr = self.allocate(new_layout)?;
525
526			// SAFETY: because `new_layout.size()` must be lower than or equal to
527			// `old_layout.size()`, both the old and new memory allocation are valid for reads and
528			// writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
529			// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
530			// safe. The safety contract for `dealloc` must be upheld by the caller.
531			unsafe {
532				ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, new_layout.size());
533				self.deallocate(ptr, old_layout);
534			}
535
536			Ok(new_ptr)
537		}
538	}
539}
540
541#[derive(Copy, Clone, Debug)]
542pub(crate) struct VTable {
543	pub allocate: unsafe fn(*const (), Layout) -> Result<NonNull<[u8]>, AllocError>,
544	pub allocate_zeroed: unsafe fn(*const (), Layout) -> Result<NonNull<[u8]>, AllocError>,
545	pub deallocate: unsafe fn(*const (), ptr: NonNull<u8>, Layout),
546	pub grow: unsafe fn(*const (), NonNull<u8>, Layout, Layout) -> Result<NonNull<[u8]>, AllocError>,
547	pub grow_zeroed: unsafe fn(*const (), NonNull<u8>, Layout, Layout) -> Result<NonNull<[u8]>, AllocError>,
548	pub shrink: unsafe fn(*const (), NonNull<u8>, Layout, Layout) -> Result<NonNull<[u8]>, AllocError>,
549
550	pub clone: Option<unsafe fn(*mut (), *const ())>,
551	pub drop: unsafe fn(*mut ()),
552}
553
554pub struct DynAlloc<'a> {
555	pub(crate) alloc: UnsafeCell<MaybeUninit<*const ()>>,
556	pub(crate) vtable: &'static VTable,
557	__marker: PhantomData<&'a ()>,
558}
559
560unsafe impl Send for DynAlloc<'_> {}
561
562unsafe impl Allocator for DynAlloc<'_> {
563	#[inline]
564	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
565		unsafe { (self.vtable.allocate)(core::ptr::addr_of!(self.alloc) as *const (), layout) }
566	}
567
568	#[inline]
569	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
570		unsafe { (self.vtable.deallocate)(core::ptr::addr_of!(self.alloc) as *const (), ptr, layout) }
571	}
572
573	#[inline]
574	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
575		unsafe { (self.vtable.allocate_zeroed)(core::ptr::addr_of!(self.alloc) as *const (), layout) }
576	}
577
578	#[inline]
579	unsafe fn grow(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
580		unsafe { (self.vtable.grow)(core::ptr::addr_of!(self.alloc) as *const (), ptr, old_layout, new_layout) }
581	}
582
583	#[inline]
584	unsafe fn grow_zeroed(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
585		unsafe { (self.vtable.grow_zeroed)(core::ptr::addr_of!(self.alloc) as *const (), ptr, old_layout, new_layout) }
586	}
587
588	#[inline]
589	unsafe fn shrink(&self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
590		unsafe { (self.vtable.shrink)(core::ptr::addr_of!(self.alloc) as *const (), ptr, old_layout, new_layout) }
591	}
592}
593
594impl Drop for DynAlloc<'_> {
595	#[inline]
596	fn drop(&mut self) {
597		unsafe { (self.vtable.drop)(core::ptr::addr_of_mut!(self.alloc) as *mut ()) }
598	}
599}
600
601impl Clone for DynAlloc<'_> {
602	#[inline]
603	fn clone(&self) -> Self {
604		let mut alloc = UnsafeCell::new(MaybeUninit::uninit());
605		unsafe {
606			self.vtable.clone.unwrap()(core::ptr::addr_of_mut!(alloc) as *mut (), core::ptr::addr_of!(self.alloc) as *const ());
607		}
608
609		Self {
610			alloc,
611			vtable: self.vtable,
612			__marker: PhantomData,
613		}
614	}
615}
616
617impl<'a> DynAlloc<'a> {
618	#[inline]
619	pub fn try_new_unclone<A: 'a + Allocator + Send>(alloc: A) -> Result<Self, A> {
620		if core::mem::size_of::<A>() <= core::mem::size_of::<*const ()>() && core::mem::align_of::<A>() <= core::mem::align_of::<*const ()>() {
621			trait AllocUnclone: Allocator + Send {
622				const VTABLE: &'static VTable = &unsafe {
623					VTable {
624						allocate: core::mem::transmute(Self::allocate as fn(&Self, _) -> _),
625						allocate_zeroed: core::mem::transmute(Self::allocate_zeroed as fn(&Self, _) -> _),
626						deallocate: core::mem::transmute(Self::deallocate as unsafe fn(&Self, _, _) -> _),
627						grow: core::mem::transmute(Self::grow as unsafe fn(&Self, _, _, _) -> _),
628						grow_zeroed: core::mem::transmute(Self::grow_zeroed as unsafe fn(&Self, _, _, _) -> _),
629						shrink: core::mem::transmute(Self::shrink as unsafe fn(&Self, _, _, _) -> _),
630
631						clone: None,
632						drop: core::mem::transmute(core::ptr::drop_in_place::<Self> as unsafe fn(_) -> _),
633					}
634				};
635			}
636			impl<A: Allocator + Send> AllocUnclone for A {}
637
638			Ok(Self {
639				alloc: unsafe { core::mem::transmute_copy(&core::mem::ManuallyDrop::new(alloc)) },
640				vtable: <A as AllocUnclone>::VTABLE,
641				__marker: PhantomData,
642			})
643		} else {
644			Err(alloc)
645		}
646	}
647
648	#[inline]
649	pub fn try_new_clone<A: 'a + Clone + Allocator + Send>(alloc: A) -> Result<Self, A> {
650		if core::mem::size_of::<A>() <= core::mem::size_of::<*const ()>() && core::mem::align_of::<A>() <= core::mem::align_of::<*const ()>() {
651			trait AllocClone: Allocator + Send + Clone {
652				const VTABLE: &'static VTable = &unsafe {
653					VTable {
654						allocate: core::mem::transmute(Self::allocate as fn(_, _) -> _),
655						allocate_zeroed: core::mem::transmute(Self::allocate_zeroed as fn(_, _) -> _),
656						deallocate: core::mem::transmute(Self::deallocate as unsafe fn(_, _, _) -> _),
657						grow: core::mem::transmute(Self::grow as unsafe fn(_, _, _, _) -> _),
658						grow_zeroed: core::mem::transmute(Self::grow_zeroed as unsafe fn(_, _, _, _) -> _),
659						shrink: core::mem::transmute(Self::shrink as unsafe fn(_, _, _, _) -> _),
660
661						clone: Some(|dst: *mut (), src: *const ()| (dst as *mut Self).write((*(src as *const Self)).clone())),
662						drop: core::mem::transmute(core::ptr::drop_in_place::<Self> as unsafe fn(_) -> _),
663					}
664				};
665			}
666			impl<A: Allocator + Send + Clone> AllocClone for A {}
667
668			Ok(Self {
669				alloc: unsafe { core::mem::transmute_copy(&core::mem::ManuallyDrop::new(alloc)) },
670				vtable: <A as AllocClone>::VTABLE,
671				__marker: PhantomData,
672			})
673		} else {
674			Err(alloc)
675		}
676	}
677
678	#[inline]
679	pub fn from_ref<A: Allocator + Sync>(alloc: &'a A) -> Self {
680		match Self::try_new_clone(alloc) {
681			Ok(me) => me,
682			Err(_) => unreachable!(),
683		}
684	}
685
686	#[inline]
687	pub fn from_mut<A: Allocator + Send>(alloc: &'a mut A) -> Self {
688		match Self::try_new_unclone(alloc) {
689			Ok(me) => me,
690			Err(_) => unreachable!(),
691		}
692	}
693
694	#[inline]
695	pub fn by_mut(&mut self) -> DynAlloc<'_> {
696		DynAlloc::from_mut(self)
697	}
698
699	#[inline]
700	pub fn cloneable(&self) -> bool {
701		self.vtable.clone.is_some()
702	}
703}