stalloc/lib.rs
1#![no_std]
2#![deny(missing_docs)]
3#![cfg_attr(feature = "allocator-api", feature(allocator_api))]
4#![warn(clippy::nursery, clippy::pedantic)]
5
6//! Stalloc (Stack + alloc) is a fast first-fit memory allocator. From my benchmarking,
7//! it can be over 3x as fast as the default OS allocator! This is because all memory
8//! is allocated from the stack, which allows it to avoid all OS overhead. Since it
9//! doesn't rely on the OS (aside from `SyncStalloc`), this library is `no_std` compatible.
10//!
11//! ```
12//! use stalloc::SyncStalloc;
13//!
14//! // Create a global allocator with 1000 blocks, each 4 bytes in length.
15//! #[global_allocator]
16//! static GLOBAL: SyncStalloc<1000, 4> = SyncStalloc::new();
17//!
18//! fn main() {
19//! // All of these allocations are being handled by the global `SyncStalloc` instance.
20//! let s1 = String::from("Hello");
21//! let s2 = String::from("world");
22//! let msg = format!("{s1}, {s2}!");
23//!
24//! assert!(!GLOBAL.is_oom());
25//! println!("Allocator state: {GLOBAL:?}");
26//! }
27//! ```
28//!
29//! To avoid the risk of OOM, you can "chain" your allocator to the system allocator, using it as a fallback.
30//! ```
31//! use stalloc::{AllocChain, SyncStalloc};
32//! use std::alloc::System;
33//!
34//! #[global_allocator]
35//! static GLOBAL: AllocChain<SyncStalloc<1000, 8>, System> = SyncStalloc::new().chain(&System);
36//! ```
37//!
38//! # Feature flags
39//! - `std` (on by default) — used in the implementation of `SyncStalloc`
40//! - `allocator-api` (requires nightly)
41//! - `allocator-api2` (pulls in the `allocator-api2` crate)
42
43use core::cell::UnsafeCell;
44use core::fmt::{self, Debug, Formatter};
45use core::hint::assert_unchecked;
46use core::mem::MaybeUninit;
47use core::ptr::NonNull;
48
49mod align;
50pub use align::*;
51mod unsafestalloc;
52pub use unsafestalloc::*;
53mod chain;
54pub use chain::*;
55
56mod alloc;
57#[allow(clippy::wildcard_imports)]
58use alloc::*;
59
60#[cfg(feature = "std")]
61mod syncstalloc;
62#[cfg(feature = "std")]
63pub use syncstalloc::*;
64
65#[cfg(test)]
66#[cfg(feature = "allocator-api")]
67mod tests;
68
69#[derive(Clone, Copy)]
70#[repr(C)]
71struct Header {
72 next: u16,
73 length: u16,
74}
75
76#[derive(Clone, Copy)]
77#[repr(C)]
78union Block<const B: usize>
79where
80 Align<B>: Alignment,
81{
82 header: Header,
83 bytes: [MaybeUninit<u8>; B],
84 _align: Align<B>,
85}
86
87/// This function is always safe to call, as `ptr` is not dereferenced.
88fn header_in_block<const B: usize>(ptr: *mut Block<B>) -> *mut Header
89where
90 Align<B>: Alignment,
91{
92 unsafe { &raw mut (*ptr).header }
93}
94
95/// Converts from `usize` to `u16` assuming that no truncation occurs.
96/// Safety precondition: `val` must be less than or equal to `0xffff`.
97#[allow(clippy::cast_possible_truncation)]
98const unsafe fn as_u16(val: usize) -> u16 {
99 unsafe {
100 assert_unchecked(val <= 0xffff);
101 }
102
103 val as u16
104}
105
106// The `base` Header has a unique meaning here. Because `base.length` is useless (always 0),
107// we use it as a special flag to check whether `data` is completely filled. Every call to
108// `allocate()` and related functions must verify that base.length != OOM_MARKER.
109const OOM_MARKER: u16 = u16::MAX;
110
111/// A fast first-fit memory allocator.
112///
113/// When you create an instance of this allocator, you pass in a value for `L` and `B`.
114/// `L` is the number of blocks, and `B` is the size of each block in bytes. The total size of this type
115/// comes out to `L * B + 4` bytes, of which `L * B` can be used (4 bytes are needed to hold some metadata).
116/// `B` must be a power of two from 4 and 2^29, and `L` must be a number in the range `1..65536`.
117///
118/// `B` represents the smallest unit of memory that the allocator can manage. If `B == 16`, then asking
119/// for 17 bytes will give you a 32 byte allocation (the amount is rounded up).
120/// The alignment of the allocator is always equal to `B`. For maximum efficiency, it is recommended
121/// to set `B` equal to the alignment of the type you expect to store the most of. For example, if you're storing
122/// a lot of `u64`s, you should set `B == 8`.
123///
124/// Note that `Stalloc` cannot be used as a global allocator because it is not thread-safe. To switch out the global
125/// allocator, use `SyncStalloc` or `UnsafeStalloc`, which can be used concurrently.
126#[repr(C)]
127pub struct Stalloc<const L: usize, const B: usize>
128where
129 Align<B>: Alignment,
130{
131 data: UnsafeCell<[Block<B>; L]>,
132 base: UnsafeCell<Header>,
133}
134
135impl<const L: usize, const B: usize> Stalloc<L, B>
136where
137 Align<B>: Alignment,
138{
139 /// Initializes a new empty `Stalloc` instance.
140 ///
141 /// # Examples
142 /// ```
143 /// use stalloc::Stalloc;
144 ///
145 /// let alloc = Stalloc::<200, 8>::new();
146 /// ```
147 #[must_use]
148 #[inline]
149 pub const fn new() -> Self {
150 const {
151 assert!(L >= 1 && L <= 0xffff, "block count must be in 1..65536");
152 assert!(B >= 4, "block size must be at least 4 bytes");
153 }
154
155 let mut blocks = [Block {
156 bytes: const { [MaybeUninit::uninit(); B] },
157 }; L];
158
159 // Write the first header. SAFETY: we have already checked that `L <= 0xffff`.
160 blocks[0].header = Header {
161 next: 0,
162 length: unsafe { as_u16(L) },
163 };
164
165 Self {
166 base: UnsafeCell::new(Header { next: 0, length: 0 }),
167 data: UnsafeCell::new(blocks),
168 }
169 }
170
171 /// Checks if the allocator is completely out of memory.
172 /// If this is false, then you are guaranteed to be able to allocate
173 /// a layout with a size and alignment of `B` bytes.
174 /// This runs in O(1).
175 ///
176 /// # Examples
177 /// ```
178 /// use stalloc::Stalloc;
179 ///
180 /// let alloc = Stalloc::<200, 8>::new();
181 /// assert!(!alloc.is_oom());
182 /// let ptr = unsafe { alloc.allocate_blocks(200, 1).unwrap() };
183 /// assert!(alloc.is_oom());
184 /// ```
185 pub const fn is_oom(&self) -> bool {
186 unsafe { *self.base.get() }.length == OOM_MARKER
187 }
188
189 /// Checks if the allocator is empty.
190 /// If this is true, then you are guaranteed to be able to allocate
191 /// a layout with a size of `B * L` bytes and an alignment of `B` bytes.
192 /// If this is false, then this is guaranteed to be impossible.
193 /// This runs in O(1).
194 ///
195 /// # Examples
196 /// ```
197 /// use stalloc::Stalloc;
198 ///
199 /// let alloc = Stalloc::<60, 4>::new();
200 /// assert!(alloc.is_empty());
201 ///
202 /// let ptr = unsafe { alloc.allocate_blocks(60, 1).unwrap() };
203 /// assert!(!alloc.is_empty());
204 ///
205 /// unsafe { alloc.deallocate_blocks(ptr, 60) };
206 /// assert!(alloc.is_empty());
207 /// ```
208 pub fn is_empty(&self) -> bool {
209 !self.is_oom() && unsafe { *self.base.get() }.next == 0
210 }
211
212 /// # Safety
213 ///
214 /// Calling this function immediately invalidates all pointers into the allocator. Calling
215 /// `deallocate_blocks()` with an invalidated pointer will result in the free list being corrupted.
216 ///
217 /// # Examples
218 /// ```
219 /// use stalloc::Stalloc;
220 ///
221 /// let alloc = Stalloc::<60, 4>::new();
222 ///
223 /// let ptr1 = unsafe { alloc.allocate_blocks(20, 1) }.unwrap();
224 /// let ptr2 = unsafe { alloc.allocate_blocks(20, 1) }.unwrap();
225 /// let ptr3 = unsafe { alloc.allocate_blocks(20, 1) }.unwrap();
226 ///
227 /// unsafe { alloc.clear() }; // invalidate all allocated pointers
228 ///
229 /// assert!(alloc.is_empty());
230 /// ```
231 pub unsafe fn clear(&self) {
232 unsafe {
233 (*self.base.get()).next = 0;
234 (*self.base.get()).length = 0;
235 (*self.header_at(0)).next = 0;
236 (*self.header_at(0)).length = as_u16(L);
237 }
238 }
239
240 /// Tries to allocate `count` blocks. If the allocation succeeds, a pointer is returned. This function
241 /// never allocates more than necessary. Note that `align` is measured in units of `B`.
242 ///
243 /// # Safety
244 ///
245 /// `size` must be nonzero, and `align` must be a power of 2 in the range `1..=2^29 / B`.
246 ///
247 /// # Errors
248 ///
249 /// Will return `AllocError` if the allocation was unsuccessful, in which case this function was a no-op.
250 ///
251 /// # Examples
252 /// ```
253 /// use stalloc::Stalloc;
254 ///
255 /// const BLOCK_SIZE: usize = 4;
256 /// let alloc = Stalloc::<10, BLOCK_SIZE>::new();
257 ///
258 /// let ptr = unsafe { alloc.allocate_blocks(10, 1) }.unwrap();
259 /// unsafe { ptr.write_bytes(42, 10 * BLOCK_SIZE) };
260 ///
261 /// assert!(alloc.is_oom());
262 /// ```
263 pub unsafe fn allocate_blocks(
264 &self,
265 size: usize,
266 align: usize,
267 ) -> Result<NonNull<u8>, AllocError> {
268 // Assert unsafe preconditions.
269 unsafe {
270 assert_unchecked(size >= 1 && align.is_power_of_two() && align <= 2usize.pow(29) / B);
271 }
272
273 if self.is_oom() {
274 return Err(AllocError);
275 }
276
277 // Loop through the free list, and find the first header whose length satisfies the layout.
278 unsafe {
279 // `prev` and `curr` are pointers that run through the free list.
280 let base = self.base.get();
281 let mut prev = base;
282 let mut curr = self.header_at((*base).next.into());
283
284 loop {
285 let curr_idx = usize::from((*prev).next);
286 let next_idx = (*curr).next.into();
287
288 // Check if the current free chunk satisfies the layout.
289 let curr_chunk_len = (*curr).length.into();
290
291 // If the alignment is more than 1, there might be spare blocks in front.
292 // If it is extremely large, there might have to be more spare blocks than are available.
293 let spare_front = (curr.addr() / B).wrapping_neg() % align;
294
295 if spare_front + size <= curr_chunk_len {
296 let avail_blocks = curr_chunk_len - spare_front;
297 let avail_blocks_ptr = self.block_at(curr_idx + spare_front);
298 let spare_back = avail_blocks - size;
299
300 // If there are spare blocks, add them to the free list.
301 if spare_back > 0 {
302 let spare_back_idx = curr_idx + spare_front + size;
303 let spare_back_ptr = self.header_at(spare_back_idx);
304 (*spare_back_ptr).next = as_u16(next_idx);
305 (*spare_back_ptr).length = as_u16(spare_back);
306
307 if spare_front > 0 {
308 (*curr).next = as_u16(spare_back_idx);
309 (*curr).length = as_u16(spare_front);
310 } else {
311 (*prev).next = as_u16(spare_back_idx);
312 }
313 } else if spare_front > 0 {
314 (*curr).next = as_u16(curr_idx + spare_front + size);
315 (*curr).length = as_u16(spare_front);
316 (*prev).next = as_u16(next_idx);
317 } else {
318 (*prev).next = as_u16(next_idx);
319 // If this is the last block of memory, set the OOM marker.
320 if next_idx == 0 {
321 (*base).length = OOM_MARKER;
322 }
323 }
324
325 return Ok(NonNull::new_unchecked(avail_blocks_ptr.cast()));
326 }
327
328 // Check if we've already made a whole loop around without finding anything.
329 if next_idx == 0 {
330 return Err(AllocError);
331 }
332
333 prev = curr;
334 curr = self.header_at(next_idx);
335 }
336 }
337 }
338
339 /// Deallocates a pointer. This function always succeeds.
340 ///
341 /// # Safety
342 ///
343 /// `ptr` must point to an allocation, and `size` must be the number of blocks
344 /// in the allocation. That is, `size` is always in `1..=L`.
345 ///
346 /// # Examples
347 /// ```
348 /// use stalloc::Stalloc;
349 ///
350 /// let alloc = Stalloc::<100, 16>::new();
351 ///
352 /// let ptr = unsafe { alloc.allocate_blocks(100, 1) }.unwrap();
353 /// assert!(alloc.is_oom());
354 ///
355 /// unsafe { alloc.deallocate_blocks(ptr, 100) };
356 /// assert!(alloc.is_empty());
357 /// ```
358 pub unsafe fn deallocate_blocks(&self, ptr: NonNull<u8>, size: usize) {
359 // Assert unsafe precondition.
360 unsafe {
361 assert_unchecked(size >= 1 && size <= L);
362 }
363
364 let freed_ptr = header_in_block(ptr.as_ptr().cast());
365 let freed_idx = self.index_of(freed_ptr);
366 let base = self.base.get();
367 let before = self.header_before(freed_idx);
368
369 unsafe {
370 let prev_next = (*before).next.into();
371 (*freed_ptr).next = as_u16(prev_next);
372 (*freed_ptr).length = as_u16(size);
373
374 // Try to merge with the next free block.
375 if freed_idx + size == prev_next {
376 let header_to_merge = self.header_at(prev_next);
377 (*freed_ptr).next = (*header_to_merge).next;
378 (*freed_ptr).length += (*header_to_merge).length;
379 }
380
381 // Try to merge with the previous free block.
382 if before.eq(&base) {
383 (*base).next = as_u16(freed_idx);
384 (*base).length = 0;
385 } else if self.index_of(before) + usize::from((*before).length) == freed_idx {
386 (*before).next = (*freed_ptr).next;
387 (*before).length += (*freed_ptr).length;
388 } else {
389 // No merge is possible.
390 (*before).next = as_u16(freed_idx);
391 }
392 }
393 }
394
395 /// Shrinks the allocation. This function always succeeds and never reallocates.
396 ///
397 /// # Safety
398 ///
399 /// `ptr` must point to a valid allocation of `old_size` blocks, and `new_size` must be in `1..old_size`.
400 ///
401 /// # Examples
402 /// ```
403 /// use stalloc::Stalloc;
404 ///
405 /// let alloc = Stalloc::<100, 16>::new();
406 ///
407 /// let ptr = unsafe { alloc.allocate_blocks(100, 1) }.unwrap();
408 /// assert!(alloc.is_oom());
409 ///
410 /// // shrink the allocation from 100 to 90 blocks
411 /// unsafe { alloc.shrink_in_place(ptr, 100, 90) };
412 /// assert!(!alloc.is_oom());
413 /// ```
414 pub unsafe fn shrink_in_place(&self, ptr: NonNull<u8>, old_size: usize, new_size: usize) {
415 // Assert unsafe preconditions.
416 unsafe {
417 assert_unchecked(new_size > 0 && new_size < old_size);
418 }
419
420 let curr_block: *mut Block<B> = ptr.as_ptr().cast();
421 let curr_idx = (curr_block.addr() - self.data.get().addr()) / B;
422
423 // A new chunk will be created in the gap.
424 let new_idx = curr_idx + new_size;
425 let spare_blocks = old_size - new_size;
426
427 unsafe {
428 // Check if we can merge the block with a chunk immediately after.
429 let prev_free_chunk = self.header_before(curr_idx);
430
431 let next_free_idx = (*prev_free_chunk).next.into(); // possibly zero
432 let new_chunk = header_in_block(curr_block.add(new_size));
433
434 (*prev_free_chunk).next = as_u16(new_idx);
435
436 if new_idx + spare_blocks == next_free_idx {
437 let next_free_chunk = self.header_at(next_free_idx);
438 (*new_chunk).next = (*next_free_chunk).next;
439 (*new_chunk).length = as_u16(spare_blocks) + (*next_free_chunk).length;
440 } else {
441 (*new_chunk).next = as_u16(next_free_idx);
442 (*new_chunk).length = as_u16(spare_blocks);
443 }
444
445 // We are definitely no longer OOM.
446 (*self.base.get()).length = 0;
447 }
448 }
449
450 /// Tries to grow the current allocation in-place. If that isn't possible, this function is a no-op.
451 ///
452 /// # Safety
453 ///
454 /// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`.
455 ///
456 /// # Errors
457 ///
458 /// Will return `AllocError` if the grow was unsuccessful, in which case this function was a no-op.
459 ///
460 /// # Examples
461 /// ```
462 /// use stalloc::Stalloc;
463 ///
464 /// let alloc = Stalloc::<100, 16>::new();
465 ///
466 /// let ptr = unsafe { alloc.allocate_blocks(25, 1) }.unwrap();
467 /// assert!(!alloc.is_oom());
468 ///
469 /// // grow the allocation from 25 to 100 blocks
470 /// unsafe { alloc.grow_in_place(ptr, 25, 100) }.unwrap();
471 /// assert!(alloc.is_oom());
472 /// ```
473 pub unsafe fn grow_in_place(
474 &self,
475 ptr: NonNull<u8>,
476 old_size: usize,
477 new_size: usize,
478 ) -> Result<(), AllocError> {
479 // Assert unsafe preconditions.
480 unsafe {
481 assert_unchecked(old_size >= 1 && old_size <= L && new_size > old_size);
482 }
483
484 let curr_block: *mut Block<B> = ptr.as_ptr().cast();
485 let curr_idx = (curr_block.addr() - self.data.get().addr()) / B;
486 let prev_free_chunk = self.header_before(curr_idx);
487
488 unsafe {
489 let next_free_idx = (*prev_free_chunk).next.into();
490
491 // The next free chunk must be directly adjacent to the current allocation.
492 if curr_idx + old_size != next_free_idx {
493 return Err(AllocError);
494 }
495
496 let next_free_chunk = self.header_at(next_free_idx);
497 let room_to_grow = (*next_free_chunk).length.into();
498
499 // There must be enough room to grow.
500 let needed_blocks = new_size - old_size;
501 if needed_blocks > room_to_grow {
502 return Err(AllocError);
503 }
504
505 // Check if there would be any blocks left over after growing into the next chunk.
506 let blocks_left_over = room_to_grow - needed_blocks;
507
508 if blocks_left_over > 0 {
509 let new_chunk_idx = next_free_idx + needed_blocks;
510 let new_chunk_head = self.header_at(new_chunk_idx);
511
512 // Insert the new chunk into the free list.
513 (*prev_free_chunk).next = as_u16(new_chunk_idx);
514 (*new_chunk_head).next = (*next_free_chunk).next;
515 (*new_chunk_head).length = as_u16(blocks_left_over);
516 } else {
517 // The free chunk is completely consumed.
518 (*prev_free_chunk).next = (*next_free_chunk).next;
519
520 // If `prev_free_chunk` is the base pointer and we just set it to 0, we are OOM.
521 let base = self.base.get();
522 if prev_free_chunk.eq(&base) && (*next_free_chunk).next == 0 {
523 (*base).length = OOM_MARKER;
524 }
525 }
526
527 Ok(())
528 }
529 }
530
531 /// Tries to grow the current allocation in-place. If that isn't possible, the allocator grows by as much
532 /// as it is able to, and the new length of the allocation is returned. The new length is guaranteed to be
533 /// in the range `old_size..=new_size`.
534 /// # Safety
535 ///
536 /// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`.
537 ///
538 /// # Examples
539 /// ```
540 /// use stalloc::Stalloc;
541 ///
542 /// let alloc1 = Stalloc::<7, 4>::new();
543 /// unsafe {
544 /// let ptr = alloc1.allocate_blocks(3, 1).unwrap(); // allocate 3 blocks
545 /// let new_size = alloc1.grow_up_to(ptr, 3, 9999); // try to grow to a ridiculous amount
546 /// assert_eq!(new_size, 7); // can only grow up to 7
547 /// }
548 ///
549 /// let alloc2 = Stalloc::<21, 16>::new();
550 /// unsafe {
551 /// let ptr = alloc2.allocate_blocks(9, 1).unwrap(); // allocate 9 blocks
552 /// let new_size = alloc2.grow_up_to(ptr, 9, 21);
553 /// assert_eq!(new_size, 21); // grow was successful
554 /// }
555 /// ```
556 pub unsafe fn grow_up_to(&self, ptr: NonNull<u8>, old_size: usize, new_size: usize) -> usize {
557 // Assert unsafe preconditions.
558 unsafe {
559 assert_unchecked(old_size >= 1 && old_size <= L && new_size > old_size);
560 }
561
562 let curr_block: *mut Block<B> = ptr.as_ptr().cast();
563 let curr_idx = (curr_block.addr() - self.data.get().addr()) / B;
564 let prev_free_chunk = self.header_before(curr_idx);
565
566 unsafe {
567 let next_free_idx = (*prev_free_chunk).next.into();
568
569 // The next free chunk must be directly adjacent to the current allocation.
570 if curr_idx + old_size != next_free_idx {
571 return old_size;
572 }
573
574 let next_free_chunk = self.header_at(next_free_idx);
575 let room_to_grow = (*next_free_chunk).length.into();
576
577 // If there isn't enough room to grow, grow as much as possible.
578 let needed_blocks = (new_size - old_size).min(room_to_grow);
579
580 // Check if there would be any blocks left over after growing into the next chunk.
581 let blocks_left_over = room_to_grow - needed_blocks;
582
583 if blocks_left_over > 0 {
584 let new_chunk_idx = next_free_idx + needed_blocks;
585 let new_chunk_head = self.header_at(new_chunk_idx);
586
587 // Insert the new chunk into the free list.
588 (*prev_free_chunk).next = as_u16(new_chunk_idx);
589 (*new_chunk_head).next = (*next_free_chunk).next;
590 (*new_chunk_head).length = as_u16(blocks_left_over);
591 } else {
592 // The free chunk is completely consumed.
593 (*prev_free_chunk).next = (*next_free_chunk).next;
594
595 // If `prev_free_chunk` is the base pointer and we just set it to 0, we are OOM.
596 let base = self.base.get();
597 if prev_free_chunk.eq(&base) && (*next_free_chunk).next == 0 {
598 (*base).length = OOM_MARKER;
599 }
600 }
601
602 old_size + needed_blocks
603 }
604 }
605}
606
607// Internal functions.
608impl<const L: usize, const B: usize> Stalloc<L, B>
609where
610 Align<B>: Alignment,
611{
612 /// Get the index of a pointer to `data`. This function is always safe
613 /// to call, but the result may not be meaningful.
614 /// Even if the header is not at the start of the block (compiler's choice),
615 /// dividing by B rounds down and produces the correct result.
616 fn index_of(&self, ptr: *mut Header) -> usize {
617 (ptr.addr() - self.data.get().addr()) / B
618 }
619
620 /// Safety precondition: idx must be in `0..L`.
621 const unsafe fn block_at(&self, idx: usize) -> *mut Block<B> {
622 let root: *mut Block<B> = self.data.get().cast();
623 unsafe { root.add(idx) }
624 }
625
626 /// Safety precondition: idx must be in `0..L`.
627 unsafe fn header_at(&self, idx: usize) -> *mut Header {
628 header_in_block(unsafe { self.block_at(idx) })
629 }
630
631 /// This function always is safe to call. If `idx` is very large,
632 /// the returned value will simply be the last header in the free list.
633 /// Note: this function may return a pointer to `base`.
634 fn header_before(&self, idx: usize) -> *mut Header {
635 let mut ptr = self.base.get();
636
637 unsafe {
638 if (*ptr).length == OOM_MARKER || usize::from((*ptr).next) >= idx {
639 return ptr;
640 }
641
642 loop {
643 ptr = self.header_at((*ptr).next.into());
644 let next_idx = usize::from((*ptr).next);
645 if next_idx == 0 || next_idx >= idx {
646 return ptr;
647 }
648 }
649 }
650 }
651}
652
653impl<const L: usize, const B: usize> Debug for Stalloc<L, B>
654where
655 Align<B>: Alignment,
656{
657 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
658 write!(f, "Stallocator with {L} blocks of {B} bytes each")?;
659
660 let mut ptr = self.base.get();
661 if unsafe { (*ptr).length } == OOM_MARKER {
662 return write!(f, "\n\tNo free blocks (OOM)");
663 }
664
665 loop {
666 unsafe {
667 let idx = (*ptr).next.into();
668 ptr = self.header_at(idx);
669
670 let length = (*ptr).length;
671 if length == 1 {
672 write!(f, "\n\tindex {idx}: {length} free block")?;
673 } else {
674 write!(f, "\n\tindex {idx}: {length} free blocks")?;
675 }
676
677 if (*ptr).next == 0 {
678 return Ok(());
679 }
680 }
681 }
682 }
683}
684
685impl<const L: usize, const B: usize> Default for Stalloc<L, B>
686where
687 Align<B>: Alignment,
688{
689 fn default() -> Self {
690 Self::new()
691 }
692}
693
694#[cfg(any(feature = "allocator-api", feature = "allocator-api2"))]
695unsafe impl<const L: usize, const B: usize> Allocator for &Stalloc<L, B>
696where
697 Align<B>: Alignment,
698{
699 #[inline]
700 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
701 // We can only allocate memory in units of `B`, so round up.
702 let size = layout.size().div_ceil(B);
703 let align = layout.align().div_ceil(B);
704
705 // If `size` is zero, give away a dangling pointer.
706 if size == 0 {
707 let addr = layout.align().try_into().unwrap();
708 let dangling = NonNull::without_provenance(addr);
709 return Ok(NonNull::slice_from_raw_parts(dangling, 0));
710 }
711
712 // SAFETY: We have made sure that `size` and `align` are valid.
713 unsafe { self.allocate_blocks(size, align) }
714 .map(|p| NonNull::slice_from_raw_parts(p, size * B))
715 }
716
717 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
718 let ptr = self.allocate(layout)?;
719
720 // We intentionally shorten the length of the allocated pointer and hence write fewer zeros.
721 let ptr = NonNull::slice_from_raw_parts(ptr.cast(), layout.size());
722
723 // SAFETY: We are filling in the entire allocated range with zeros.
724 unsafe { ptr.cast::<u8>().write_bytes(0, ptr.len()) }
725 Ok(ptr)
726 }
727
728 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
729 let size = layout.size().div_ceil(B);
730
731 if size == 0 {
732 return;
733 }
734
735 // SAFETY: We just made sure that size != 0. Everything else is upheld by the caller.
736 unsafe { self.deallocate_blocks(ptr, size) };
737 }
738
739 unsafe fn grow(
740 &self,
741 ptr: NonNull<u8>,
742 old_layout: Layout,
743 new_layout: Layout,
744 ) -> Result<NonNull<[u8]>, AllocError> {
745 let old_size = old_layout.size().div_ceil(B);
746 let new_size = new_layout.size().div_ceil(B);
747 let align = new_layout.align().div_ceil(B);
748
749 // If the size hasn't changed, do nothing.
750 if new_size == old_size {
751 return Ok(NonNull::slice_from_raw_parts(ptr, new_size * B));
752 }
753
754 // If the old size was 0, the pointer was dangling, so just allocate.
755 if old_size == 0 {
756 // SAFETY: we know that `new_size` is non-zero, because we just made sure
757 // that `new_size != old_size`, and we know that `align` has a valid value.
758 return unsafe {
759 self.allocate_blocks(new_size, align)
760 .map(|p| NonNull::slice_from_raw_parts(p, new_size * B))
761 };
762 }
763
764 unsafe {
765 // Try to grow in place.
766 // SAFETY: `ptr` and `old_size` are upheld by the caller. As for `new_size`,
767 // we have already made sure that `old_size != new_size`, and the fact that
768 // new_size >= old_size is upheld by the caller.
769 if self.grow_in_place(ptr, old_size, new_size).is_ok() {
770 Ok(NonNull::slice_from_raw_parts(ptr, new_size * B))
771 } else {
772 // Otherwise just reallocate and copy.
773 // SAFETY: We have made sure that `new_size > 0` and that `align` is valid.
774 let new = self.allocate_blocks(new_size, align)?;
775
776 // SAFETY: We are copying all the necessary bytes from `ptr` into `new`.
777 // `ptr` and `new` both point to an allocation of at least `old_layout.size()` bytes.
778 ptr.copy_to_nonoverlapping(new, old_layout.size());
779
780 // SAFETY: We already made sure that old_size > 0.
781 self.deallocate_blocks(ptr, old_size);
782
783 Ok(NonNull::slice_from_raw_parts(new, new_size * B))
784 }
785 }
786 }
787
788 unsafe fn grow_zeroed(
789 &self,
790 ptr: NonNull<u8>,
791 old_layout: Layout,
792 new_layout: Layout,
793 ) -> Result<NonNull<[u8]>, AllocError> {
794 unsafe {
795 // SAFETY: Upheld by the caller.
796 let new_ptr = self.grow(ptr, old_layout, new_layout)?;
797 let count = new_ptr.len() - old_layout.size();
798
799 // SAFETY: We are filling in the extra capacity with zeros.
800 new_ptr
801 .cast::<u8>()
802 .add(old_layout.size())
803 .write_bytes(0, count);
804
805 Ok(new_ptr)
806 }
807 }
808
809 unsafe fn shrink(
810 &self,
811 ptr: NonNull<u8>,
812 old_layout: Layout,
813 new_layout: Layout,
814 ) -> Result<NonNull<[u8]>, AllocError> {
815 let old_size = old_layout.size().div_ceil(B);
816 let new_size = new_layout.size().div_ceil(B);
817
818 // Check if the old size is zero, in which case we can just return a dangling pointer.
819 if new_size == 0 {
820 unsafe {
821 // SAFETY: If `old_size` isn't zero, we need to free it. The caller
822 // upholds that `ptr` and `old_size` are valid.
823 if old_size != 0 {
824 self.deallocate_blocks(ptr, old_size);
825 }
826
827 let addr = new_layout.align().try_into().unwrap();
828 let dangling = NonNull::without_provenance(addr);
829 return Ok(NonNull::slice_from_raw_parts(dangling, 0));
830 }
831 }
832
833 // We have to reallocate only if the alignment isn't good enough anymore.
834 if !ptr.as_ptr().addr().is_multiple_of(new_layout.align()) {
835 // Since the address of `ptr` must be a multiple of `B` (upheld by the caller),
836 // entering this branch means that `new_layout.align() > B`.
837 let align = new_layout.align() / B;
838
839 unsafe {
840 // SAFETY: We just made sure that `new_size > 0`, and `align` is always valid.
841 let new = self.allocate_blocks(new_size, align)?;
842
843 // SAFETY: We are copying all the necessary bytes from `ptr` into `new`.
844 // `ptr` and `new` both point to an allocation of at least `old_layout.size()` bytes.
845 ptr.copy_to_nonoverlapping(new, old_layout.size());
846
847 // SAFETY: We already made sure that old_size > 0.
848 self.deallocate_blocks(ptr, old_size);
849
850 return Ok(NonNull::slice_from_raw_parts(new, new_size * B));
851 }
852 }
853
854 // Check if the size hasn't changed.
855 if old_size == new_size {
856 return Ok(NonNull::slice_from_raw_parts(ptr, old_size * B));
857 }
858
859 // SAFETY: We just made sure that new_size > 0 and old_size > new_size,
860 // and `ptr` and `old_size` are valid (upheld by the caller).
861 unsafe {
862 self.shrink_in_place(ptr, old_size, new_size);
863 }
864
865 Ok(NonNull::slice_from_raw_parts(ptr, new_size * B))
866 }
867}
868
869/// SAFETY: Since zero-sized allocations unconditionally succeed, it can be assumed
870/// that a zero-sized allocation was allocated by this Stalloc. Otherwise, check whether
871/// the pointer is contained within this Stalloc's buffer.
872unsafe impl<const L: usize, const B: usize> ChainableAlloc for Stalloc<L, B>
873where
874 Align<B>: Alignment,
875{
876 #[inline]
877 fn claims(&self, ptr: *mut u8, layout: Layout) -> bool {
878 let base_addr = self.data.get().addr();
879 layout.size() == 0 || (ptr.addr() >= base_addr && ptr.addr() < base_addr + B * L)
880 }
881}
882
883// Note: `chain` should be part of `ChainableAlloc` if const trait methods get stabilized
884impl<const L: usize, const B: usize> Stalloc<L, B>
885where
886 Align<B>: Alignment,
887{
888 /// Creates a new `AllocChain` containing this allocator and `next`.
889 pub const fn chain<T>(self, next: &T) -> AllocChain<'_, Self, T>
890 where
891 Self: Sized,
892 {
893 AllocChain::new(self, next)
894 }
895}