lite_alloc/single_threaded/
bump_freelist.rs

1use crate::{PAGE_SIZE, grow_memory};
2use core::{
3    alloc::{GlobalAlloc, Layout},
4    ptr::{self, null_mut},
5};
6
7/// Safety Warning:
8/// Allocators in this module are designed for [Single Threaded] environments.
9/// `Sync` is implemented only to satisfy `GlobalAlloc` trait requirements.
10/// Using this allocator in a multi-threaded environment will lead to Undefined Behavior (UB).
11/// Please ensure it is used only in single-threaded environments (e.g., WASM or single-threaded embedded).
12///
13/// 安全性警示 (Safety Warning):
14/// 本模块中的分配器均为【单线程】设计。
15/// 实现了 `Sync` 仅为了满足 `GlobalAlloc` trait 的要求。
16/// 在多线程环境中使用此分配器会导致未定义行为 (UB)。
17/// 请确保只在单线程环境(如 WASM 或单线程嵌入式环境)中使用。
18unsafe impl Sync for BumpFreeListAllocator {}
19
20/// Minimal Bump Pointer + Unordered Free List Allocator.
21///
22/// 极简 Bump Pointer + 无序链表分配器。
23///
24/// # Features
25/// - **Extreme Size**: Removes binning and merging logic to minimize code size.
26/// - **Fast Startup**: No initialization overhead.
27/// - **Fragmentation**: Does not merge memory, long-running processes will cause OOM. Only suitable for short-lived tasks.
28///
29/// # 特性
30/// - **极致体积**:移除分箱和合并逻辑,代码量最小化。
31/// - **快速启动**:无初始化开销。
32/// - **碎片化**:不合并内存,长期运行会导致 OOM。仅适用于短生命周期任务。
33pub struct BumpFreeListAllocator;
34
35impl BumpFreeListAllocator {
36    pub const fn new() -> Self {
37        Self
38    }
39}
40
41// Linked list node: must store size because we have only one mixed list
42// 链表节点:必须存储大小,因为我们只有一个混杂的链表
43struct Node {
44    next: *mut Node,
45    size: usize,
46}
47
48// --------------------------------------------------------------------------
49// Global State
50// 全局状态
51// --------------------------------------------------------------------------
52
53// Single unordered free list head
54// 单个无序空闲链表头
55static mut FREE_LIST: *mut Node = null_mut();
56
57// Bump Pointer State
58// Bump Pointer 状态
59static mut HEAP_TOP: usize = 0;
60static mut HEAP_END: usize = 0;
61
62unsafe impl GlobalAlloc for BumpFreeListAllocator {
63    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
64        // 1. Unify alignment to 16 bytes.
65        // This simplifies all pointer calculations and adapts to Wasm SIMD.
66        // 1. 统一对齐到 16 字节
67        // 这简化了所有指针计算,并且适配 Wasm SIMD
68        let align_req = layout.align().max(16);
69        let size = layout.size().max(16);
70
71        // Ensure size is also a multiple of 16 for easier management
72        // 确保 size 也是 16 的倍数,方便后续管理
73        let size = (size + 15) & !15;
74
75        // 2. Try to allocate from the free list (First Fit).
76        // Iterate through the list to find the first block that is large enough.
77        // Note: This is an O(N) operation. However, in short-lived applications, the list is usually short.
78        // 2. 尝试从空闲链表分配 (First Fit)
79        // 遍历链表找到第一个足够大的块。
80        // 注意:这是 O(N) 操作。但在短生命周期应用中,链表通常很短。
81        unsafe {
82            let mut prev = ptr::addr_of_mut!(FREE_LIST);
83            let mut curr = *prev;
84
85            while !curr.is_null() {
86                if (*curr).size >= size {
87                    // Found a suitable block: remove from list
88                    // 找到合适的块:从链表中移除
89                    *prev = (*curr).next;
90                    return curr as *mut u8;
91                }
92                // Move to next node
93                // 移动到下一个节点
94                prev = ptr::addr_of_mut!((*curr).next);
95                curr = *prev;
96            }
97        }
98
99        // 3. No suitable block in the free list -> Use Bump Pointer allocation
100        // 3. 链表中没有合适的块 -> 使用 Bump Pointer 分配
101        // self.bump_alloc is unsafe
102        unsafe { self.bump_alloc(size, align_req) }
103    }
104
105    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
106        // 1. Calculate size (must be consistent with calculation in alloc)
107        // 1. 计算大小 (必须与 alloc 中的计算方式一致)
108        let size = layout.size().max(16);
109        let size = (size + 15) & !15;
110
111        // 2. Insert into free list at head (O(1)).
112        // No merging, simply thread it through.
113        // 2. 头插法插入空闲链表 (O(1))
114        // 不进行合并,直接通过
115        unsafe {
116            let node = ptr as *mut Node;
117            (*node).size = size;
118            (*node).next = FREE_LIST;
119            FREE_LIST = node;
120        }
121    }
122
123    #[cfg(feature = "realloc")]
124    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
125        // Optimization: Check if at heap top, if so, extend in place
126        // 优化:检查是否在堆顶,如果是则原地扩容
127        let old_size = (layout.size().max(16) + 15) & !15;
128        let req_new_size = (new_size.max(16) + 15) & !15;
129
130        // Accessing mutable statics requires unsafe block
131        let heap_top = unsafe { HEAP_TOP };
132
133        if ptr as usize + old_size == heap_top {
134            let diff = req_new_size.saturating_sub(old_size);
135            if diff == 0 {
136                return ptr;
137            }
138
139            // Try to extend heap top
140            // 尝试扩容堆顶
141            unsafe {
142                if HEAP_TOP + diff <= HEAP_END {
143                    HEAP_TOP += diff;
144                    return ptr;
145                }
146
147                // Request more pages
148                // 申请更多页面
149                let pages_needed =
150                    ((HEAP_TOP + diff - HEAP_END + PAGE_SIZE - 1) / PAGE_SIZE).max(1);
151                if grow_memory(pages_needed) != usize::MAX {
152                    HEAP_END += pages_needed * PAGE_SIZE;
153                    HEAP_TOP += diff;
154                    return ptr;
155                }
156            }
157        }
158
159        // Default fallback
160        // 默认回退
161        unsafe {
162            let new_ptr = self.alloc(Layout::from_size_align_unchecked(new_size, layout.align()));
163            if !new_ptr.is_null() {
164                ptr::copy_nonoverlapping(ptr, new_ptr, layout.size());
165                self.dealloc(ptr, layout);
166            }
167            new_ptr
168        }
169    }
170}
171
172impl BumpFreeListAllocator {
173    unsafe fn bump_alloc(&self, size: usize, align: usize) -> *mut u8 {
174        unsafe {
175            let mut ptr = HEAP_TOP;
176            // Alignment handling
177            // 对齐处理
178            ptr = (ptr + align - 1) & !(align - 1);
179
180            if ptr + size > HEAP_END || ptr < HEAP_TOP {
181                let bytes_needed = (ptr + size).saturating_sub(HEAP_END);
182                let pages_needed = ((bytes_needed + PAGE_SIZE - 1) / PAGE_SIZE).max(1);
183
184                let prev_page = grow_memory(pages_needed);
185                if prev_page == usize::MAX {
186                    return null_mut();
187                }
188
189                if HEAP_END == 0 {
190                    let memory_start = prev_page * PAGE_SIZE;
191                    ptr = memory_start;
192                    ptr = (ptr + align - 1) & !(align - 1);
193                    HEAP_END = memory_start + pages_needed * PAGE_SIZE;
194                } else {
195                    HEAP_END += pages_needed * PAGE_SIZE;
196                }
197            }
198
199            HEAP_TOP = ptr + size;
200            ptr as *mut u8
201        }
202    }
203
204    /// Testing only: Reset the internal state.
205    /// Safety: usage is inherently unsafe if allocator is in use.
206    ///
207    /// 仅测试用:重置内部状态。
208    /// 安全性:如果分配器正在使用,未定义的行为。
209    pub unsafe fn reset() {
210        unsafe {
211            FREE_LIST = null_mut();
212            HEAP_TOP = 0;
213            HEAP_END = 0;
214        }
215    }
216}
217
218#[cfg(test)]
219mod tests {
220    use super::*;
221    use crate::reset_heap;
222    use core::alloc::Layout;
223    use std::sync::{Mutex, MutexGuard};
224
225    static TEST_MUTEX: Mutex<()> = Mutex::new(());
226
227    struct SafeAllocator {
228        inner: BumpFreeListAllocator,
229        _guard: MutexGuard<'static, ()>,
230    }
231
232    impl SafeAllocator {
233        fn new() -> Self {
234            let guard = TEST_MUTEX.lock().unwrap();
235            unsafe {
236                BumpFreeListAllocator::reset(); // Reset allocator state
237                reset_heap(); // Reset memory mock
238                Self {
239                    inner: BumpFreeListAllocator::new(),
240                    _guard: guard,
241                }
242            }
243        }
244
245        fn alloc(&self, layout: Layout) -> *mut u8 {
246            unsafe { self.inner.alloc(layout) }
247        }
248
249        fn dealloc(&self, ptr: *mut u8, layout: Layout) {
250            unsafe { self.inner.dealloc(ptr, layout) }
251        }
252    }
253
254    impl Drop for SafeAllocator {
255        fn drop(&mut self) {
256            unsafe {
257                BumpFreeListAllocator::reset();
258                reset_heap();
259            }
260        }
261    }
262
263    #[test]
264    fn test_basic_allocation() {
265        let allocator = SafeAllocator::new();
266        let layout = Layout::new::<u64>();
267        let ptr = allocator.alloc(layout);
268        assert!(!ptr.is_null());
269
270        unsafe {
271            *ptr.cast::<u64>() = 42;
272            assert_eq!(*ptr.cast::<u64>(), 42);
273        }
274
275        allocator.dealloc(ptr, layout);
276    }
277
278    #[test]
279    fn test_multiple_allocations() {
280        let allocator = SafeAllocator::new();
281        let layout = Layout::new::<u32>();
282
283        let ptr1 = allocator.alloc(layout);
284        let ptr2 = allocator.alloc(layout);
285
286        assert!(!ptr1.is_null());
287        assert!(!ptr2.is_null());
288        assert_ne!(ptr1, ptr2);
289
290        // Bump pointer should advance by at least size (aligned to 16)
291        let diff = (ptr2 as usize).wrapping_sub(ptr1 as usize);
292        assert!(diff >= 16);
293    }
294
295    #[test]
296    fn test_memory_grow() {
297        let allocator = SafeAllocator::new();
298        // PAGE_SIZE is 65536. Allocate 40KB twice to trigger growth.
299        let layout = Layout::from_size_align(40 * 1024, 16).unwrap();
300
301        let ptr1 = allocator.alloc(layout);
302        let ptr2 = allocator.alloc(layout);
303
304        assert!(!ptr1.is_null());
305        assert!(!ptr2.is_null());
306        assert_ne!(ptr1, ptr2);
307
308        unsafe {
309            ptr1.write_bytes(1, layout.size());
310            ptr2.write_bytes(2, layout.size());
311        }
312    }
313
314    #[test]
315    fn test_freelist_reuse() {
316        let allocator = SafeAllocator::new();
317        let layout = Layout::from_size_align(128, 16).unwrap();
318
319        let ptr1 = allocator.alloc(layout);
320        allocator.dealloc(ptr1, layout);
321
322        let ptr2 = allocator.alloc(layout);
323        // Should reuse the same memory block (LIFO freelist where ptr1 was just added)
324        assert_eq!(ptr1, ptr2);
325    }
326
327    #[test]
328    fn test_realloc_in_place() {
329        let allocator = SafeAllocator::new();
330        let layout = Layout::from_size_align(32, 16).unwrap();
331        let ptr = allocator.alloc(layout);
332        assert!(!ptr.is_null());
333
334        unsafe {
335            ptr.write_bytes(0xAA, layout.size());
336        }
337
338        // Reallocate to a larger size, should be in-place if possible
339        let new_size = 64;
340        let new_layout = Layout::from_size_align(new_size, 16).unwrap();
341        let realloc_ptr = unsafe { allocator.inner.realloc(ptr, layout, new_size) };
342
343        #[cfg(feature = "realloc")]
344        assert_eq!(ptr, realloc_ptr); // Should be in-place
345        #[cfg(not(feature = "realloc"))]
346        assert_ne!(ptr, realloc_ptr); // Fallback allocation moves data (alloc before dealloc)
347        unsafe {
348            assert_eq!(*realloc_ptr.cast::<u8>(), 0xAA); // Content should be preserved
349            assert_eq!(*realloc_ptr.add(31).cast::<u8>(), 0xAA); // Original content preserved
350            realloc_ptr
351                .add(32)
352                .write_bytes(0xBB, new_size - layout.size()); // Write to new part
353            assert_eq!(*realloc_ptr.add(32).cast::<u8>(), 0xBB);
354        }
355        allocator.dealloc(realloc_ptr, new_layout);
356    }
357
358    #[test]
359    fn test_realloc_not_in_place() {
360        let allocator = SafeAllocator::new();
361        let layout1 = Layout::from_size_align(32, 16).unwrap();
362        let ptr1 = allocator.alloc(layout1); // This will be HEAP_TOP
363        assert!(!ptr1.is_null());
364
365        let layout2 = Layout::from_size_align(32, 16).unwrap();
366        let ptr2 = allocator.alloc(layout2); // This will prevent ptr1 from being HEAP_TOP
367        assert!(!ptr2.is_null());
368        assert_ne!(ptr1, ptr2);
369
370        unsafe {
371            ptr1.write_bytes(0xAA, layout1.size());
372        }
373
374        // Reallocate ptr1 to a larger size. It's not HEAP_TOP, so it should move.
375        let new_size = 64;
376        let new_layout = Layout::from_size_align(new_size, 16).unwrap();
377        let realloc_ptr = unsafe { allocator.inner.realloc(ptr1, layout1, new_size) };
378
379        assert!(!realloc_ptr.is_null());
380        assert_ne!(ptr1, realloc_ptr); // Should not be in-place
381        unsafe {
382            assert_eq!(*realloc_ptr.cast::<u8>(), 0xAA); // Content should be preserved
383            assert_eq!(*realloc_ptr.add(31).cast::<u8>(), 0xAA); // Original content preserved
384        }
385
386        allocator.dealloc(realloc_ptr, new_layout);
387        allocator.dealloc(ptr2, layout2);
388    }
389
390    #[test]
391    fn test_realloc_shrink() {
392        let allocator = SafeAllocator::new();
393        let layout = Layout::from_size_align(64, 16).unwrap();
394        let ptr = allocator.alloc(layout);
395        assert!(!ptr.is_null());
396
397        unsafe {
398            ptr.write_bytes(0xCC, layout.size());
399        }
400
401        // Reallocate to a smaller size
402        let new_size = 32;
403        let new_layout = Layout::from_size_align(new_size, 16).unwrap();
404        let realloc_ptr = unsafe { allocator.inner.realloc(ptr, layout, new_size) };
405
406        #[cfg(feature = "realloc")]
407        assert_eq!(ptr, realloc_ptr); // Shrinking can often be in-place
408
409        #[cfg(not(feature = "realloc"))]
410        assert_ne!(ptr, realloc_ptr);
411
412        unsafe {
413            assert_eq!(*realloc_ptr.cast::<u8>(), 0xCC); // Content should be preserved
414            assert_eq!(*realloc_ptr.add(31).cast::<u8>(), 0xCC); // Original content preserved
415        }
416        allocator.dealloc(realloc_ptr, new_layout);
417    }
418}