1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::alloc::Layout;
use std::cell::Cell;
use std::mem::ManuallyDrop;
use std::ptr::NonNull;

use bit_set::BitSet;
use bit_vec::BitVec;

use super::alloc;
use super::alloc_layout;

/// In debug mode we use a signature to ensure that raw pointers are pointing to the correct
/// shape of arena object.
#[cfg(debug_assertions)]
const SIGNATURE: usize = 0x1234567812345678;

/// A very-`unsafe`, arena for raw pointers that falls back to raw allocation when full. This
/// should be used with great care, and ideally you should only be using the higher-level arenas
/// built on top of this.
///
/// # Safety
///
/// Items placed into the RawArena are dropped, but there is no check to ensure that an allocated
/// item is valid before dropping it. Use `recycle_without_drop` to return an item to the arena
/// without dropping it.
///
/// # Example
///
/// ```rust
/// # use deno_core::arena::RawArena;
/// // Create a RawArena with a capacity of 10 elements
/// let arena = RawArena::<usize>::with_capacity(10);
///
/// // Allocate elements in the arena
/// unsafe {
///   let mut elements = Vec::new();
///   for i in 0..10 {
///     let mut element_ptr = arena.allocate();
///     *element_ptr.as_mut() = i * 2;
///     elements.push(element_ptr);
///   }
///
///   // Recycle elements back into the arena
///   for &element_ptr in elements.iter() {
///     arena.recycle(element_ptr);
///   }
/// }
/// ```
pub struct RawArena<T> {
  #[cfg(debug_assertions)]
  signature: usize,
  alloc: NonNull<RawArenaEntry<T>>,
  past_alloc_end: NonNull<RawArenaEntry<T>>,
  max: Cell<NonNull<RawArenaEntry<T>>>,
  next: Cell<NonNull<RawArenaEntry<T>>>,
  allocated: Cell<usize>,
  capacity: usize,
}

/// The [`RawArena`] is [`Send`], but not [`Sync`].
unsafe impl<T> Send for RawArena<T> {}

static_assertions::assert_impl_one!(RawArena<()>: Send);
static_assertions::assert_not_impl_any!(RawArena<()>: Sync);

union RawArenaEntry<T> {
  /// If this is a vacant entry, points to the next entry.
  next: NonNull<RawArenaEntry<T>>,
  /// If this is a valid entry, contains the raw data.
  value: ManuallyDrop<T>,
}

impl<T> RawArenaEntry<T> {
  #[inline(always)]
  unsafe fn next(
    entry: NonNull<RawArenaEntry<T>>,
  ) -> NonNull<RawArenaEntry<T>> {
    (*(entry.as_ptr())).next
  }

  #[inline(always)]
  unsafe fn drop(entry: NonNull<RawArenaEntry<T>>) {
    std::ptr::drop_in_place(
      std::ptr::addr_of_mut!((*entry.as_ptr()).value) as *mut T
    );
  }
}

impl<T> RawArena<T> {
  /// Returns the constant overhead per allocation to assist with making allocations
  /// page-aligned.
  pub const fn overhead() -> usize {
    Self::allocation_size() - std::mem::size_of::<T>()
  }

  /// Returns the size of each allocation.
  pub const fn allocation_size() -> usize {
    std::mem::size_of::<RawArenaEntry<T>>()
  }

  /// Allocate an arena, completely initialized. This memory is not zeroed, and
  /// we use the high-water mark to keep track of what we've initialized so far.
  ///
  /// This is safe, because dropping the [`RawArena`] without doing anything to
  /// it is safe.
  pub fn with_capacity(capacity: usize) -> Self {
    let alloc = alloc_layout(Self::layout(capacity));
    Self {
      #[cfg(debug_assertions)]
      signature: SIGNATURE,
      alloc,
      past_alloc_end: unsafe {
        NonNull::new_unchecked(alloc.as_ptr().add(capacity))
      },
      max: alloc.into(),
      next: Cell::new(alloc),
      allocated: Default::default(),
      capacity,
    }
  }

  // TODO(mmastrac): const when https://github.com/rust-lang/rust/issues/67521 is fixed
  fn layout(capacity: usize) -> Layout {
    match Layout::array::<RawArenaEntry<T>>(capacity) {
      Ok(l) => l,
      _ => panic!("Zero-sized objects are not supported"),
    }
  }

  /// Helper method to transmute internal pointers.
  ///
  /// # Safety
  ///
  /// For internal use.
  #[inline(always)]
  unsafe fn entry_to_data(entry: NonNull<RawArenaEntry<T>>) -> NonNull<T> {
    // Transmute the union
    entry.cast()
  }

  /// Helper method to transmute internal pointers.
  ///
  /// # Safety
  ///
  /// For internal use.
  #[inline(always)]
  unsafe fn data_to_entry(data: NonNull<T>) -> NonNull<RawArenaEntry<T>> {
    // Transmute the union
    data.cast()
  }

  /// Gets the next free entry, allocating if necessary. This is `O(1)` if we have free space in
  /// the arena, `O(?)` if we need to allocate from the allocator (where `?` is defined by the
  /// system allocator).
  ///
  /// # Safety
  ///
  /// As the memory area is considered uninitialized and you must be careful to fully and validly
  /// initialize the underlying data, this method is marked as unsafe.
  ///
  /// This pointer will be invalidated when we drop the `RawArena`, so the allocator API is `unsafe`
  /// as there are no lifetimes here.
  ///
  /// **IMPORTANT:** Ensure all allocated entries are fully initialized before dropping `RawArena`,
  /// or use `recycle_without_drop` to manually handle recycling, as dropping the arena does not
  /// perform any validation or cleanup on the allocated items. Dropping `RawArena` will automatically
  /// trigger the drop of all items allocated within.
  pub unsafe fn allocate(&self) -> NonNull<T> {
    #[cfg(debug_assertions)]
    debug_assert_eq!(self.signature, SIGNATURE);
    let next = self.next.get();
    let max = self.max.get();

    // Check to see if we have gone past our high-water mark, and we need to extend it. The high-water
    // mark allows us to leave the allocation uninitialized, and assume that the remaining part of the
    // next-free list is a trivial linked-list where each node points to the next one.
    if max == next {
      // Are we out of room?
      if max == self.past_alloc_end {
        // We filled the RawArena, so start allocating
        return Self::entry_to_data(alloc());
      }

      // Nope, we can extend by one
      let next = NonNull::new_unchecked(self.max.get().as_ptr().add(1));
      self.next.set(next);
      self.max.set(next);
    } else {
      // We haven't passed the high-water mark, so walk the internal next-free list
      // for our next allocation
      self.next.set(RawArenaEntry::next(next));
    }

    // Update accounting
    self.allocated.set(self.allocated.get() + 1);
    Self::entry_to_data(next)
  }

  /// Gets the next free entry, returning null if full. This is `O(1)`.
  ///
  /// # Safety
  ///
  /// As the memory area is considered uninitialized and you must be careful to fully and validly
  /// initialize the underlying data, this method is marked as unsafe.
  ///
  /// This pointer will be invalidated when we drop the `RawArena`, so the allocator API is `unsafe`
  /// as there are no lifetimes here.
  ///
  /// **IMPORTANT:** Ensure all allocated entries are fully initialized before dropping `RawArena`,
  /// or use `recycle_without_drop` to manually handle recycling, as dropping the arena does not
  /// perform any validation or cleanup on the allocated items. Dropping `RawArena` will automatically
  /// trigger the drop of all items allocated within.
  pub unsafe fn allocate_if_space(&self) -> Option<NonNull<T>> {
    #[cfg(debug_assertions)]
    debug_assert_eq!(self.signature, SIGNATURE);
    let next = self.next.get();
    let max = self.max.get();

    // Check to see if we have gone past our high-water mark, and we need to extend it. The high-water
    // mark allows us to leave the allocation uninitialized, and assume that the remaining part of the
    // next-free list is a trivial linked-list where each node points to the next one.
    if max == next {
      // Are we out of room?
      if max == self.past_alloc_end {
        // We filled the RawArena, so return None
        return None;
      }

      // Nope, we can extend by one
      let next = NonNull::new_unchecked(self.max.get().as_ptr().add(1));
      self.next.set(next);
      self.max.set(next);
    } else {
      // We haven't passed the high-water mark, so walk the internal next-free list
      // for our next allocation
      self.next.set((*(next.as_ptr())).next);
    }

    // Update accounting
    self.allocated.set(self.allocated.get() + 1);
    Some(Self::entry_to_data(next))
  }

  /// Returns the remaining capacity of this [`RawArena`] that can be provided without allocation.
  pub fn remaining(&self) -> usize {
    self.capacity - self.allocated.get()
  }

  /// Returns the remaining capacity of this [`RawArena`] that can be provided without allocation.
  pub fn allocated(&self) -> usize {
    self.allocated.get()
  }

  /// Clear all internally-allocated entries, resetting the arena state to its original state. Any
  /// non-vacant entries are dropped.
  ///
  /// This operation must walk the vacant list and is worst-case `O(n)`, where `n` is the largest
  /// size of this arena since the last clear operation.
  ///
  /// # Safety
  ///
  /// Does not clear system-allocator entries. Pointers previously [`allocate`](Self::allocate)d may still be in use.
  pub unsafe fn clear_allocated(&self) {
    #[cfg(debug_assertions)]
    debug_assert_eq!(self.signature, SIGNATURE);

    // We need to drop the allocated pointers, but we don't know which ones they are. We only
    // know the vacant slots.
    if self.allocated.get() > 0 {
      unsafe {
        // How many entries are we possibly using?
        let max = self.max.get();

        // Compute the vacant set by walking the `next` pointers
        let count = max.as_ptr().offset_from(self.alloc.as_ptr()) as usize;
        let mut vacant = BitVec::with_capacity(count);
        vacant.grow(count, false);

        let mut next = self.next.get();
        while next != max {
          let i = next.as_ptr().offset_from(self.alloc.as_ptr()) as usize;
          vacant.set(i, true);
          next = RawArenaEntry::next(next);
        }

        vacant.negate();

        // Iterate over the inverse of the vacant set and free those items
        for alloc in BitSet::from_bit_vec(vacant).into_iter() {
          let entry = self.alloc.as_ptr().add(alloc);
          std::ptr::drop_in_place(
            std::ptr::addr_of_mut!((*entry).value) as *mut T
          );
        }
      }
    }

    self.max.set(self.alloc);
    self.next.set(self.alloc);
    self.allocated.set(0);
  }

  /// Recycle a used item, returning it to the next-free list. Drops the associated item
  /// in place before recycling.
  ///
  /// # Safety
  ///
  /// We assume this pointer is either internal to the arena (in which case we return it
  /// to the arena), or allocated via [`std::alloc::alloc`] in [`allocate`](Self::allocate).
  pub unsafe fn recycle(&self, data: NonNull<T>) -> bool {
    #[cfg(debug_assertions)]
    debug_assert_eq!(self.signature, SIGNATURE);
    let mut entry = Self::data_to_entry(data);
    let mut emptied = false;
    RawArenaEntry::drop(entry);
    if entry >= self.alloc && entry < self.past_alloc_end {
      let next = self.next.get();
      let count = self.allocated.get() - 1;
      emptied = count == 0;
      self.allocated.set(count);
      entry.as_mut().next = next;
      self.next.set(entry);
    } else {
      std::alloc::dealloc(
        entry.as_ptr() as _,
        Layout::new::<RawArenaEntry<T>>(),
      );
    }
    emptied
  }

  /// Recycle a used item, returning it to the next-free list.
  ///
  /// # Safety
  ///
  /// We assume this pointer is either internal to the arena (in which case we return it
  /// to the arena), or allocated via [`std::alloc::alloc`] in [`allocate`](Self::allocate).
  pub unsafe fn recycle_without_drop(&self, data: NonNull<T>) -> bool {
    #[cfg(debug_assertions)]
    debug_assert_eq!(self.signature, SIGNATURE);
    let mut entry = Self::data_to_entry(data);
    let mut emptied = false;
    if entry >= self.alloc && entry < self.past_alloc_end {
      let next = self.next.get();
      let count = self.allocated.get() - 1;
      emptied = count == 0;
      self.allocated.set(count);
      entry.as_mut().next = next;
      self.next.set(entry);
    } else {
      std::alloc::dealloc(
        entry.as_ptr() as _,
        Layout::new::<RawArenaEntry<T>>(),
      );
    }
    emptied
  }
}

impl<T> Drop for RawArena<T> {
  /// Drop the arena. All pointers are invalidated at this point, except for those
  /// allocated outside outside of the arena.
  ///
  /// The allocation APIs are unsafe because we don't track lifetimes here.
  fn drop(&mut self) {
    unsafe { self.clear_allocated() };

    #[cfg(debug_assertions)]
    {
      debug_assert_eq!(self.signature, SIGNATURE);
      self.signature = 0;
    }
    unsafe {
      std::alloc::dealloc(self.alloc.as_ptr() as _, Self::layout(self.capacity))
    }
  }
}

#[cfg(test)]
mod tests {
  use super::*;

  #[must_use = "If you don't use this, it'll leak!"]
  unsafe fn allocate(arena: &RawArena<usize>, i: usize) -> NonNull<usize> {
    let mut new = arena.allocate();
    *new.as_mut() = i;
    new
  }

  #[test]
  fn test_add_remove_many() {
    let arena = RawArena::<usize>::with_capacity(1024);
    unsafe {
      for i in 0..2000 {
        let v = allocate(&arena, i);
        assert_eq!(arena.remaining(), 1023);
        assert_eq!(*v.as_ref(), i);
        arena.recycle(v);
        assert_eq!(arena.remaining(), 1024);
      }
    }
  }

  #[test]
  fn test_add_clear_many() {
    let arena = RawArena::<usize>::with_capacity(1024);
    unsafe {
      for i in 0..2000 {
        _ = allocate(&arena, i);
        assert_eq!(arena.remaining(), 1023);
        arena.clear_allocated();
        assert_eq!(arena.remaining(), 1024);
      }
    }
  }

  #[test]
  fn test_add_remove_many_separate() {
    let arena = RawArena::<usize>::with_capacity(1024);
    unsafe {
      let mut nodes = vec![];
      // This will spill over into memory allocations
      for i in 0..2000 {
        nodes.push(allocate(&arena, i));
      }
      assert_eq!(arena.remaining(), 0);
      for i in (0..2000).rev() {
        let node = nodes.pop().unwrap();
        assert_eq!(*node.as_ref(), i);
        arena.recycle(node);
      }
      assert_eq!(arena.remaining(), 1024);
    }
  }

  #[test]
  fn test_droppable() {
    // Make sure we correctly drop all the items in this arena if they are droppable
    let arena = RawArena::<_>::with_capacity(16);
    unsafe {
      let mut nodes = vec![];
      // This will spill over into memory allocations
      for i in 0..20 {
        let node = arena.allocate();
        std::ptr::write(
          node.as_ptr(),
          Box::new(std::future::ready(format!("iteration {i}"))),
        );
        nodes.push(node);
      }
      assert_eq!(arena.remaining(), 0);
      for node in nodes {
        arena.recycle(node);
      }
      assert_eq!(arena.remaining(), 16);
    }
  }

  #[test]
  fn test_no_drop() {
    let arena = RawArena::<String>::with_capacity(16);
    unsafe {
      arena.recycle_without_drop(arena.allocate());
      arena.clear_allocated();
    }
  }

  #[test]
  fn test_drops() {
    let arena = RawArena::<_>::with_capacity(16);
    unsafe {
      for i in 0..2 {
        let ptr = arena.allocate();
        std::ptr::write(ptr.as_ptr(), format!("iteration {i}"));
      }
      // Leave a space in the internal allocations
      let ptr = arena.allocate();
      std::ptr::write(ptr.as_ptr(), "deleted".to_owned());
      arena.recycle(ptr);
      arena.clear_allocated();
    }
  }

  #[test]
  fn test_drops_full() {
    #[allow(dead_code)]
    struct Droppable(String);

    let arena = RawArena::<_>::with_capacity(16);
    unsafe {
      for i in 0..2 {
        let ptr = arena.allocate();
        std::ptr::write(ptr.as_ptr(), Droppable(format!("iteration {i}")));
      }
      arena.clear_allocated();
    }
  }
}