Skip to main content

dynamo_memory/
arena.rs

1// SPDX-FileCopyrightText: Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2// SPDX-License-Identifier: Apache-2.0
3
4//! # Arena Allocator
5//!
6//! This module provides an arena allocator for generally heap-like allocations.
7//! An [`ArenaAllocator`] can be created by taking ownership of a [`MemoryDescription`] instance.
8//!
9//! The [`ArenaAllocator`] allocates memory contiguous regions using the [`offset_allocator`] crate,
10//! which builds on [Sebastian Aaltonen's ArenaAllocator](https://github.com/sebbbi/ArenaAllocator)
11
12use crate::StorageKind;
13
14use super::{MemoryDescription, StorageError};
15use offset_allocator::{Allocation, Allocator};
16use std::{
17    any::Any,
18    sync::{Arc, Mutex},
19};
20
21/// Errors specific to arena allocation.
22#[derive(Debug, thiserror::Error)]
23pub enum ArenaError {
24    #[error("Page size must be a power of 2")]
25    PageSizeNotAligned,
26
27    #[error("Allocation failed")]
28    AllocationFailed,
29
30    #[error("Failed to convert pages to u32")]
31    PagesNotConvertible,
32
33    #[error("Storage error: {0}")]
34    StorageError(#[from] StorageError),
35}
36
37/// Arena allocator backed by an instance of a [`MemoryDescription`] object.
38///
39/// This struct wraps an [`Allocator`] from the [`offset_allocator`] crate,
40/// and provides methods for allocating memory from the storage.
41///
42/// The allocator is thread-safe, and the storage is shared between the allocator and the buffers.
43#[derive(Clone)]
44pub struct ArenaAllocator<S: MemoryDescription> {
45    storage: Arc<S>,
46    allocator: Arc<Mutex<Allocator>>,
47    page_size: u64,
48}
49
50impl<S: MemoryDescription> std::fmt::Debug for ArenaAllocator<S> {
51    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
52        write!(
53            f,
54            "ArenaAllocator {{ storage: {:?}, page_size: {} }}",
55            self.storage, self.page_size
56        )
57    }
58}
59
60/// A buffer allocated from an [`ArenaAllocator`].
61///
62/// This struct wraps an [`Allocation`] from the [`offset_allocator`] crate,
63/// and provides methods for interacting with the allocated memory.
64///
65/// The buffer is backed by a [`MemoryDescription`] object, and the allocation is freed when the buffer is dropped.
66pub struct ArenaBuffer<S: MemoryDescription> {
67    offset: usize,
68    address: usize,
69    requested_size: usize,
70    storage: Arc<S>,
71    allocation: Allocation,
72    allocator: Arc<Mutex<Allocator>>,
73}
74
75impl<S: MemoryDescription> ArenaAllocator<S> {
76    /// Create a new [`ArenaAllocator`] from a [`MemoryDescription`] object and a page size.
77    ///
78    /// The page size must be a power of two.
79    ///
80    /// The allocator will divide the storage into pages and allocations will consist of a set of contiguous
81    /// pages whose aggregate size is greater than or equal to the requested size.
82    ///
83    /// The allocator is thread-safe, and the storage is shared between the allocator and the buffers.
84    pub fn new(storage: S, page_size: usize) -> std::result::Result<Self, ArenaError> {
85        let storage = Arc::new(storage);
86
87        if !page_size.is_power_of_two() {
88            return Err(ArenaError::PageSizeNotAligned);
89        }
90
91        // divide storage into pages,
92        // round down such that all pages are fully and any remaining bytes are discarded
93        let pages = storage.size() / page_size;
94
95        let allocator = Allocator::new(
96            pages
97                .try_into()
98                .map_err(|_| ArenaError::PagesNotConvertible)?,
99        );
100
101        let allocator = Arc::new(Mutex::new(allocator));
102
103        Ok(Self {
104            storage,
105            allocator,
106            page_size: page_size as u64,
107        })
108    }
109
110    /// Allocate a new [`ArenaBuffer`] from the allocator.
111    pub fn allocate(&self, size: usize) -> std::result::Result<ArenaBuffer<S>, ArenaError> {
112        let size = size as u64;
113        let pages = size.div_ceil(self.page_size);
114
115        let allocation = self
116            .allocator
117            .lock()
118            .unwrap()
119            .allocate(pages.try_into().map_err(|_| ArenaError::AllocationFailed)?)
120            .ok_or(ArenaError::AllocationFailed)?;
121
122        let offset = allocation.offset as u64 * self.page_size;
123        let address = self.storage.addr() + offset as usize;
124
125        debug_assert!(address + size as usize <= self.storage.addr() + self.storage.size());
126
127        Ok(ArenaBuffer {
128            offset: offset as usize,
129            address,
130            requested_size: size as usize,
131            allocation,
132            storage: self.storage.clone(),
133            allocator: self.allocator.clone(),
134        })
135    }
136}
137
138impl<S: MemoryDescription> std::fmt::Debug for ArenaBuffer<S> {
139    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
140        write!(
141            f,
142            "ArenaBuffer {{ addr: {}, size: {}, kind: {:?}, allocator: {:p} }}",
143            self.address,
144            self.requested_size,
145            self.storage.storage_kind(),
146            Arc::as_ptr(&self.storage)
147        )
148    }
149}
150
151impl<S: MemoryDescription + 'static> MemoryDescription for ArenaBuffer<S> {
152    fn addr(&self) -> usize {
153        self.address
154    }
155    fn size(&self) -> usize {
156        self.requested_size
157    }
158    fn storage_kind(&self) -> StorageKind {
159        self.storage.storage_kind()
160    }
161    fn as_any(&self) -> &dyn Any {
162        self
163    }
164    fn nixl_descriptor(&self) -> Option<NixlDescriptor> {
165        if let Some(mut descriptor) = self.storage.nixl_descriptor() {
166            descriptor.addr = self.addr() as u64;
167            descriptor.size = self.size();
168            Some(descriptor)
169        } else {
170            None
171        }
172    }
173}
174
175// NIXL integration helpers
176use super::nixl::{NixlCompatible, NixlDescriptor, RegisteredView};
177
178impl<S> ArenaBuffer<S>
179where
180    S: MemoryDescription + NixlCompatible,
181{
182    /// Create a NIXL descriptor for this buffer with the correct offset and size.
183    ///
184    /// This can be used when the base storage implements NixlCompatible to create
185    /// a descriptor that points to just this buffer's region.
186    pub fn nixl_descriptor(&self) -> Option<NixlDescriptor> {
187        let (base_ptr, _base_size, mem_type, device_id) = self.storage.nixl_params();
188
189        // Calculate the offset pointer
190        let buffer_ptr = unsafe { base_ptr.add(self.offset) };
191
192        Some(NixlDescriptor {
193            addr: buffer_ptr as u64,
194            size: self.requested_size,
195            mem_type,
196            device_id,
197        })
198    }
199}
200
201impl<S> ArenaBuffer<S>
202where
203    S: MemoryDescription + RegisteredView,
204{
205    /// Get the agent name from registered storage.
206    ///
207    /// This is a convenience method when using ArenaAllocator with NixlRegistered<T> storage.
208    pub fn agent_name(&self) -> &str {
209        self.storage.agent_name()
210    }
211
212    /// Get a NIXL descriptor that includes registration information.
213    pub fn registered_descriptor(&self) -> NixlDescriptor {
214        let base_descriptor = self.storage.descriptor();
215
216        // Create a new descriptor with adjusted address and size for this buffer
217        NixlDescriptor {
218            addr: base_descriptor.addr + self.offset as u64,
219            size: self.requested_size,
220            mem_type: base_descriptor.mem_type,
221            device_id: base_descriptor.device_id,
222        }
223    }
224}
225
226impl<S: MemoryDescription> Drop for ArenaBuffer<S> {
227    fn drop(&mut self) {
228        self.allocator.lock().unwrap().free(self.allocation);
229    }
230}
231
232#[cfg(test)]
233mod tests {
234    use super::*;
235    use crate::SystemStorage;
236
237    const PAGE_SIZE: usize = 4096;
238    const PAGE_COUNT: usize = 10;
239    const TOTAL_STORAGE_SIZE: usize = PAGE_SIZE * PAGE_COUNT;
240
241    fn create_allocator() -> ArenaAllocator<SystemStorage> {
242        let storage = SystemStorage::new(TOTAL_STORAGE_SIZE).unwrap();
243        ArenaAllocator::new(storage, PAGE_SIZE).unwrap()
244    }
245
246    #[test]
247    /// Tests successful creation of an `ArenaAllocator` with valid page size.
248    /// Verifies that `ArenaAllocator::new` returns `Ok`.
249    fn test_arena_allocator_new_success() {
250        let storage = SystemStorage::new(TOTAL_STORAGE_SIZE).unwrap();
251        let allocator_result = ArenaAllocator::new(storage, PAGE_SIZE);
252        assert!(allocator_result.is_ok());
253    }
254
255    #[test]
256    /// Tests `ArenaAllocator` creation with an invalid page size (not a power of 2).
257    /// Verifies that `ArenaAllocator::new` returns an `ArenaError::PageSizeNotAligned` error.
258    fn test_arena_allocator_new_invalid_page_size() {
259        let storage = SystemStorage::new(TOTAL_STORAGE_SIZE).unwrap();
260        let allocator_result = ArenaAllocator::new(storage, PAGE_SIZE + 1);
261        assert!(allocator_result.is_err());
262        assert!(matches!(
263            allocator_result,
264            Err(ArenaError::PageSizeNotAligned)
265        ));
266    }
267
268    #[test]
269    /// Tests allocation of a single buffer that is a multiple of the page size.
270    /// Verifies that the allocation is successful, the buffer has the correct size,
271    /// and its address is the start of the storage area (as it's the first allocation).
272    fn test_allocate_single_buffer() {
273        let allocator = create_allocator();
274        let buffer_size = PAGE_SIZE * 2;
275        let buffer_result = allocator.allocate(buffer_size);
276        assert!(buffer_result.is_ok());
277        let buffer = buffer_result.unwrap();
278        assert_eq!(buffer.size(), buffer_size);
279        assert_eq!(buffer.addr(), allocator.storage.addr()); // First allocation starts at addr
280    }
281
282    #[test]
283    /// Tests allocation of multiple buffers of varying sizes (multiples of page size).
284    /// Verifies that allocations are successful, buffers have correct sizes, and their
285    /// addresses are correctly offset from each other based on previous allocations.
286    fn test_allocate_multiple_buffers() {
287        let allocator = create_allocator();
288        let buffer_size1 = PAGE_SIZE * 2;
289        let buffer1_result = allocator.allocate(buffer_size1);
290        assert!(buffer1_result.is_ok());
291        let buffer1 = buffer1_result.unwrap();
292        assert_eq!(buffer1.size(), buffer_size1);
293        assert_eq!(buffer1.addr(), allocator.storage.addr());
294
295        let buffer_size2 = PAGE_SIZE * 3;
296        let buffer2_result = allocator.allocate(buffer_size2);
297        assert!(buffer2_result.is_ok());
298        let buffer2 = buffer2_result.unwrap();
299        assert_eq!(buffer2.size(), buffer_size2);
300        assert_eq!(buffer2.addr(), allocator.storage.addr() + buffer_size1);
301    }
302
303    #[test]
304    /// Tests allocation of a single buffer that consumes the entire storage space.
305    /// Verifies that the allocation is successful and the buffer has the correct size.
306    fn test_allocate_exact_size() {
307        let allocator = create_allocator();
308        let buffer_size = TOTAL_STORAGE_SIZE;
309        let buffer_result = allocator.allocate(buffer_size);
310        assert!(buffer_result.is_ok());
311        let buffer = buffer_result.unwrap();
312        assert_eq!(buffer.size(), buffer_size);
313    }
314
315    #[test]
316    /// Tests an attempt to allocate a buffer larger than the total available storage.
317    /// Verifies that the allocation fails with `ArenaError::AllocationFailed`.
318    fn test_allocate_too_large() {
319        let allocator = create_allocator();
320        let buffer_size = TOTAL_STORAGE_SIZE + PAGE_SIZE;
321        let buffer_result = allocator.allocate(buffer_size);
322        assert!(buffer_result.is_err());
323        assert!(matches!(buffer_result, Err(ArenaError::AllocationFailed)));
324    }
325
326    #[test]
327    /// Tests the `Drop` implementation of `ArenaBuffer` for freeing allocated pages.
328    /// It allocates a buffer, lets it go out of scope (triggering `drop`), and then
329    /// attempts to reallocate a buffer of the same size. This second allocation should
330    /// succeed and reuse the initially allocated space, starting at the storage address.
331    fn test_buffer_drop_and_reallocate() {
332        let allocator = create_allocator();
333        // we can not allocate two buffers of `buffer_size` as it will exceed the total storage size
334        // if the memory is properly returned, then we should be able to reallocate the same size buffer
335        let buffer_size = PAGE_SIZE * 6;
336
337        {
338            let buffer1 = allocator.allocate(buffer_size).unwrap();
339            assert_eq!(buffer1.size(), buffer_size);
340            assert_eq!(buffer1.addr(), allocator.storage.addr());
341        } // buffer1 is dropped here, freeing its pages
342
343        // Try to allocate a new buffer of the same size, it should succeed and reuse the space
344        let buffer2_result = allocator.allocate(buffer_size);
345        assert!(buffer2_result.is_ok());
346        let buffer2 = buffer2_result.unwrap();
347        assert_eq!(buffer2.size(), buffer_size);
348        assert_eq!(buffer2.addr(), allocator.storage.addr()); // Should be at the start again
349    }
350
351    #[test]
352    /// Tests filling the arena with two buffers that together consume all available pages
353    /// and then attempting one more small allocation, which should fail.
354    /// Verifies that after the allocator is full, `ArenaError::AllocationFailed` is returned.
355    fn test_allocate_fill_and_fail() {
356        let allocator = create_allocator();
357        let buffer_size_half = TOTAL_STORAGE_SIZE / 2; // Each takes 5 pages
358
359        let buffer1 = allocator.allocate(buffer_size_half).unwrap();
360        assert_eq!(buffer1.size(), buffer_size_half);
361
362        let buffer2 = allocator.allocate(buffer_size_half).unwrap();
363        assert_eq!(buffer2.size(), buffer_size_half);
364        assert_eq!(buffer2.addr(), allocator.storage.addr() + buffer_size_half);
365
366        // Now try to allocate one more page, should fail
367        let buffer3_result = allocator.allocate(PAGE_SIZE);
368        assert!(buffer3_result.is_err());
369        assert!(matches!(buffer3_result, Err(ArenaError::AllocationFailed)));
370    }
371
372    #[test]
373    /// Tests allocation of a single byte.
374    /// Verifies that the allocation is successful and the buffer reports its size as 1.
375    /// The actual page consumption is tested behaviorally in exhaustion tests.
376    fn test_allocate_non_page_aligned_single_byte() {
377        let allocator = create_allocator();
378        let buffer = allocator.allocate(1).unwrap();
379        assert_eq!(buffer.size(), 1);
380        // Internal page allocation is behaviorally tested by exhaustion tests
381    }
382
383    #[test]
384    /// Tests allocation of a size that is one byte less than a full page.
385    /// Verifies that the allocation is successful and the buffer reports the correct size.
386    /// The actual page consumption is tested behaviorally in exhaustion tests.
387    fn test_allocate_non_page_aligned_almost_full_page() {
388        let allocator = create_allocator();
389        let buffer = allocator.allocate(PAGE_SIZE - 1).unwrap();
390        assert_eq!(buffer.size(), PAGE_SIZE - 1);
391    }
392
393    #[test]
394    /// Tests allocation of a size that is one byte more than a full page.
395    /// Verifies that the allocation is successful and the buffer reports the correct size.
396    /// This will consume two pages, which is tested behaviorally in exhaustion tests.
397    fn test_allocate_non_page_aligned_just_over_one_page() {
398        let allocator = create_allocator();
399        let buffer = allocator.allocate(PAGE_SIZE + 1).unwrap();
400        assert_eq!(buffer.size(), PAGE_SIZE + 1);
401    }
402
403    #[test]
404    /// Tests a specific scenario of non-page-aligned allocations leading to arena exhaustion.
405    /// Allocates `(PAGE_COUNT / 2 * PAGE_SIZE) + 1` bytes. This requires `(PAGE_COUNT / 2) + 1` pages.
406    /// The first allocation should succeed. The second allocation of the same size should fail
407    /// because not enough pages remain, verifying the page rounding and consumption logic.
408    fn test_allocate_half_plus_one_byte_twice_exhausts_arena() {
409        let allocator = create_allocator();
410        let allocation_size = (PAGE_COUNT / 2 * PAGE_SIZE) + 1;
411        // This allocation will require (PAGE_COUNT / 2) + 1 pages.
412        // For PAGE_COUNT = 10, this is 5 * PAGE_SIZE + 1 bytes, requiring 6 pages.
413
414        let buffer1_result = allocator.allocate(allocation_size);
415        assert!(buffer1_result.is_ok(), "First allocation should succeed");
416        let buffer1 = buffer1_result.unwrap();
417        assert_eq!(buffer1.size(), allocation_size);
418        let pages_for_first_alloc = (allocation_size as u64).div_ceil(allocator.page_size);
419        assert_eq!(pages_for_first_alloc, (PAGE_COUNT / 2 + 1) as u64);
420
421        // Second allocation of the same size should fail because we don't have enough pages left.
422        // Remaining pages = PAGE_COUNT - pages_for_first_alloc
423        // For PAGE_COUNT = 10, remaining = 10 - 6 = 4 pages.
424        // We need (PAGE_COUNT / 2 + 1) = 6 pages.
425        let buffer2_result = allocator.allocate(allocation_size);
426        assert!(
427            buffer2_result.is_err(),
428            "Second allocation should fail due to insufficient pages"
429        );
430        assert!(matches!(buffer2_result, Err(ArenaError::AllocationFailed)));
431    }
432
433    #[test]
434    /// Tests filling the arena with multiple non-page-aligned allocations that each consume more
435    /// than one page due to rounding (specifically, `PAGE_SIZE + 1` bytes, consuming 2 pages each).
436    /// After filling the arena based on this consumption, it verifies that a subsequent small
437    /// allocation fails with `ArenaError::AllocationFailed`.
438    fn test_fill_with_non_aligned_and_fail() {
439        let allocator = create_allocator();
440        // This test verifies that multiple small allocations, each consuming slightly more than one page
441        // (thus taking two pages from the underlying offset_allocator), correctly fill the arena.
442        // Let's allocate (PAGE_SIZE + 1) multiple times. Each will take 2 pages.
443
444        let single_alloc_size = PAGE_SIZE + 1; // Will take 2 pages
445        let num_possible_allocs = PAGE_COUNT / 2; // e.g., 10 / 2 = 5 such allocations
446
447        let mut allocated_buffers = Vec::with_capacity(num_possible_allocs);
448
449        for i in 0..num_possible_allocs {
450            let buffer_result = allocator.allocate(single_alloc_size);
451            assert!(buffer_result.is_ok(), "Allocation {} should succeed", i + 1);
452            let buffer = buffer_result.unwrap();
453            assert_eq!(buffer.size(), single_alloc_size);
454            allocated_buffers.push(buffer);
455        }
456
457        // At this point, all pages should be consumed (num_possible_allocs * 2 pages)
458        // So, allocating even 1 byte should fail.
459        let final_alloc_result = allocator.allocate(1);
460        assert!(
461            final_alloc_result.is_err(),
462            "Final allocation of 1 byte should fail as arena is full"
463        );
464        assert!(matches!(
465            final_alloc_result,
466            Err(ArenaError::AllocationFailed)
467        ));
468    }
469}