Skip to main content

nearest/
buf.rs

1#[cfg(feature = "alloc")]
2use core::{marker::PhantomData, ptr::NonNull};
3use core::{mem, mem::MaybeUninit, num::NonZero};
4
5use crate::{Flat, emitter::Pos, list::Segment};
6
7/// Backing storage for a [`Region`](crate::Region).
8///
9/// # Safety
10///
11/// Implementors must guarantee:
12/// - `as_ptr()` / `as_mut_ptr()` return pointers valid for `len()` bytes
13/// - Buffer base is aligned to at least `ALIGN` bytes
14/// - `resize` zero-fills new bytes `[old_len..new_len)` and preserves alignment
15pub unsafe trait Buf: Sized {
16  /// Buffer alignment guarantee.
17  const ALIGN: usize;
18
19  /// Create an empty buffer (len = 0).
20  fn empty() -> Self;
21
22  /// Raw pointer to the buffer start.
23  fn as_ptr(&self) -> *const u8;
24
25  /// Mutable raw pointer to the buffer start.
26  fn as_mut_ptr(&mut self) -> *mut u8;
27
28  /// View the buffer contents as a byte slice.
29  fn as_bytes(&self) -> &[u8];
30
31  /// Current byte length.
32  fn len(&self) -> u32;
33
34  /// Returns `true` if the buffer contains no bytes.
35  fn is_empty(&self) -> bool {
36    self.len() == 0
37  }
38
39  /// Current capacity in bytes.
40  fn capacity(&self) -> u32;
41
42  /// Grow or shrink the buffer, filling new bytes with `fill`.
43  fn resize(&mut self, new_len: u32, fill: u8);
44
45  /// Ensure at least `additional` bytes of spare capacity.
46  fn reserve(&mut self, additional: u32);
47
48  /// Append bytes from a slice.
49  fn extend_from_slice(&mut self, data: &[u8]);
50
51  /// Pad `len` up to the next multiple of `align`.
52  fn align_to(&mut self, align: usize) {
53    let rem = (self.len() as usize) % align;
54    if rem != 0 {
55      let pad = (align - rem) as u32;
56      self.resize(self.len() + pad, 0);
57    }
58  }
59
60  /// Align + allocate space for one `U`, return its position.
61  ///
62  /// # Compile-time invariant
63  ///
64  /// `align_of::<U>() <= ALIGN` — the buffer base is aligned to `ALIGN`
65  /// (at least 8), so any position that is a multiple of `align_of::<U>()` yields
66  /// a correctly aligned absolute address.
67  fn alloc<U: Flat>(&mut self) -> Pos {
68    const {
69      assert!(align_of::<U>() <= Self::ALIGN, "allocated type alignment exceeds buffer alignment");
70    }
71    self.align_to(align_of::<U>());
72    let pos = Pos(self.len());
73    let size = size_of::<U>() as u32;
74    self.resize(self.len() + size, 0);
75    pos
76  }
77
78  /// Expose provenance so [`Near::get`](crate::Near::get) can recover it
79  /// via `with_exposed_provenance`.
80  fn expose_provenance(&self) {
81    let _ = self.as_ptr().expose_provenance();
82  }
83}
84
85// ---------------------------------------------------------------------------
86// FixedBuf
87// ---------------------------------------------------------------------------
88
89/// Stack-backed fixed-capacity buffer with 8-byte alignment.
90///
91/// `FixedBuf<N>` provides `N` bytes of inline storage with no heap allocation,
92/// enabling `nearest` regions in `no_std` environments or on the stack.
93///
94/// # Panics
95///
96/// [`reserve`](Buf::reserve) and [`resize`](Buf::resize) panic if the
97/// requested size exceeds `N`.
98#[repr(C, align(8))]
99pub struct FixedBuf<const N: usize> {
100  data: [MaybeUninit<u8>; N],
101  len: u32,
102}
103
104impl<const N: usize> FixedBuf<N> {
105  /// Create an empty fixed buffer.
106  ///
107  /// This is `const`, enabling `static` regions.
108  #[must_use]
109  pub const fn new() -> Self {
110    Self { data: [MaybeUninit::uninit(); N], len: 0 }
111  }
112}
113
114impl<const N: usize> Default for FixedBuf<N> {
115  fn default() -> Self {
116    Self::new()
117  }
118}
119
120// SAFETY: FixedBuf is repr(C, align(8)), so the data pointer is 8-byte aligned.
121// resize zero-fills new bytes. as_ptr/as_mut_ptr return pointers to self.data.
122unsafe impl<const N: usize> Buf for FixedBuf<N> {
123  const ALIGN: usize = 8;
124
125  fn empty() -> Self {
126    Self::new()
127  }
128
129  fn as_ptr(&self) -> *const u8 {
130    self.data.as_ptr().cast()
131  }
132
133  fn as_mut_ptr(&mut self) -> *mut u8 {
134    self.data.as_mut_ptr().cast()
135  }
136
137  fn as_bytes(&self) -> &[u8] {
138    if self.len == 0 {
139      return &[];
140    }
141    // SAFETY: data[..len] has been initialized by resize/extend_from_slice.
142    unsafe { core::slice::from_raw_parts(self.data.as_ptr().cast(), self.len as usize) }
143  }
144
145  fn len(&self) -> u32 {
146    self.len
147  }
148
149  fn capacity(&self) -> u32 {
150    N as u32
151  }
152
153  fn resize(&mut self, new_len: u32, fill: u8) {
154    assert!(new_len as usize <= N, "FixedBuf capacity exceeded: requested {new_len}, capacity {N}");
155    if new_len > self.len {
156      // SAFETY: new_len <= N, so data[len..new_len] is within bounds.
157      unsafe {
158        core::ptr::write_bytes(
159          self.data.as_mut_ptr().add(self.len as usize).cast::<u8>(),
160          fill,
161          (new_len - self.len) as usize,
162        );
163      }
164    }
165    self.len = new_len;
166  }
167
168  fn reserve(&mut self, additional: u32) {
169    let required = self.len.checked_add(additional).expect("capacity overflow");
170    assert!(required as usize <= N, "FixedBuf capacity exceeded: need {required}, capacity {N}");
171  }
172
173  fn extend_from_slice(&mut self, data: &[u8]) {
174    let n = data.len() as u32;
175    self.reserve(n);
176    // SAFETY: reserve checked capacity. data[len..len+n] is within bounds.
177    unsafe {
178      core::ptr::copy_nonoverlapping(
179        data.as_ptr(),
180        self.data.as_mut_ptr().add(self.len as usize).cast(),
181        data.len(),
182      );
183    }
184    self.len += n;
185  }
186}
187
188impl<const N: usize> Clone for FixedBuf<N> {
189  fn clone(&self) -> Self {
190    let mut new = Self::new();
191    if self.len > 0 {
192      // SAFETY: data[..len] is initialized. The new buffer has capacity N >= len.
193      unsafe {
194        core::ptr::copy_nonoverlapping(
195          self.data.as_ptr(),
196          new.data.as_mut_ptr(),
197          self.len as usize,
198        );
199      }
200      new.len = self.len;
201    }
202    new
203  }
204}
205
206// SAFETY: FixedBuf contains only [MaybeUninit<u8>; N] and u32 — no shared mutable state.
207unsafe impl<const N: usize> Send for FixedBuf<N> {}
208
209// SAFETY: &FixedBuf only provides &[u8] access.
210unsafe impl<const N: usize> Sync for FixedBuf<N> {}
211
212// ---------------------------------------------------------------------------
213// AlignedBuf (alloc feature)
214// ---------------------------------------------------------------------------
215
216/// Growable byte buffer with base pointer aligned to `max(align_of::<T>(), 8)`.
217///
218/// Uses `u32` for len/cap (max ~4 GiB, matches `Pos(u32)`).
219///
220/// The buffer base is always at least 8-byte aligned, which guarantees correct
221/// alignment for all standard primitive types (up to `i64`/`u64`). If the root
222/// type `T` has stricter alignment, the buffer uses that instead.
223///
224/// # Soundness
225///
226/// **Allocation**: Uses `alloc::{alloc, realloc, dealloc}` with a layout
227/// whose alignment is `BUF_ALIGN`. New regions are zero-filled. Allocation
228/// failure calls [`handle_alloc_error`](alloc::alloc::handle_alloc_error)
229/// (never returns null silently).
230///
231/// **Provenance exposure**: After every `grow` or `clone`, the new pointer's
232/// provenance is exposed via `expose_provenance`.
233/// This allows [`Near::get`](crate::Near::get) and session operations to
234/// recover the full allocation's provenance using
235/// [`with_exposed_provenance`](core::ptr::with_exposed_provenance) when
236/// following self-relative offsets.
237///
238/// **`Send`/`Sync`**: The buffer is exclusively owned (no aliased mutable
239/// pointers). `&AlignedBuf` only provides `&[u8]` access. Both impls are
240/// sound.
241#[cfg(feature = "alloc")]
242pub struct AlignedBuf<T> {
243  ptr: NonNull<u8>,
244  len: u32,
245  cap: u32,
246  _type: PhantomData<T>,
247}
248
249#[cfg(feature = "alloc")]
250impl<T> AlignedBuf<T> {
251  const BUF_ALIGN: usize = if align_of::<T>() >= 8 { align_of::<T>() } else { 8 };
252
253  /// Create an empty buffer with a dangling aligned pointer.
254  #[must_use]
255  pub const fn new() -> Self {
256    // Non-null, properly aligned, never dereferenced when cap == 0.
257    // `without_provenance_mut` avoids an integer-to-pointer cast that would
258    // confuse Miri's provenance tracking.
259    // SAFETY: `BUF_ALIGN` is a power-of-two > 0, so `without_provenance_mut`
260    // returns a non-null, well-aligned dangling pointer.
261    let ptr = unsafe { NonNull::new_unchecked(core::ptr::without_provenance_mut(Self::BUF_ALIGN)) };
262    Self { ptr, len: 0, cap: 0, _type: PhantomData }
263  }
264
265  /// Create an empty buffer pre-allocated to hold at least `capacity` bytes.
266  #[must_use]
267  pub fn with_capacity(capacity: u32) -> Self {
268    let mut buf = Self::new();
269    if capacity > 0 {
270      buf.reserve(capacity);
271    }
272    buf
273  }
274
275  /// Reallocate to `new_cap`, preserving existing data.
276  fn grow(&mut self, new_cap: u32) {
277    debug_assert!(new_cap > self.cap);
278    let align = Self::BUF_ALIGN;
279    let new_size = new_cap as usize;
280
281    let ptr = if self.cap == 0 {
282      // First allocation — cannot realloc a dangling pointer.
283      let layout = alloc::alloc::Layout::from_size_align(new_size, align).expect("invalid layout");
284      // SAFETY: `layout` has non-zero size (`new_cap > 0` by `debug_assert`).
285      let p = unsafe { alloc::alloc::alloc(layout) };
286      if p.is_null() {
287        alloc::alloc::handle_alloc_error(layout);
288      }
289      p
290    } else {
291      let old_layout =
292        alloc::alloc::Layout::from_size_align(self.cap as usize, align).expect("invalid layout");
293      // SAFETY: `self.ptr` was allocated with `old_layout`. `new_size >= old_size`
294      // (guaranteed by callers). The layout has non-zero size.
295      let p = unsafe { alloc::alloc::realloc(self.ptr.as_ptr(), old_layout, new_size) };
296      if p.is_null() {
297        alloc::alloc::handle_alloc_error(
298          alloc::alloc::Layout::from_size_align(new_size, align).expect("invalid layout"),
299        );
300      }
301      p
302    };
303
304    // SAFETY: `ptr` is non-null (checked above), valid for `new_cap` bytes.
305    // Zero-fill the new region `[cap..new_cap)`. Expose provenance so that
306    // `Near::get` can recover it via `with_exposed_provenance`.
307    unsafe {
308      core::ptr::write_bytes(ptr.add(self.cap as usize), 0, (new_cap - self.cap) as usize);
309      let _ = ptr.expose_provenance();
310      self.ptr = NonNull::new_unchecked(ptr);
311    }
312    self.cap = new_cap;
313  }
314}
315
316#[cfg(feature = "alloc")]
317// SAFETY: AlignedBuf's BUF_ALIGN >= 8. Buffer base is properly aligned via Layout.
318// resize zero-fills new bytes. as_ptr/as_mut_ptr return the allocation pointer.
319unsafe impl<T> Buf for AlignedBuf<T> {
320  const ALIGN: usize = Self::BUF_ALIGN;
321
322  fn empty() -> Self {
323    Self::new()
324  }
325
326  fn as_ptr(&self) -> *const u8 {
327    self.ptr.as_ptr()
328  }
329
330  fn as_mut_ptr(&mut self) -> *mut u8 {
331    self.ptr.as_ptr()
332  }
333
334  fn as_bytes(&self) -> &[u8] {
335    if self.len == 0 {
336      return &[];
337    }
338    // SAFETY: When `len > 0`, the buffer was allocated via `grow()` which
339    // guarantees `ptr` is valid for `cap >= len` bytes, all initialized.
340    unsafe { core::slice::from_raw_parts(self.ptr.as_ptr(), self.len as usize) }
341  }
342
343  fn len(&self) -> u32 {
344    self.len
345  }
346
347  fn capacity(&self) -> u32 {
348    self.cap
349  }
350
351  fn resize(&mut self, new_len: u32, fill: u8) {
352    if new_len > self.len {
353      self.reserve(new_len - self.len);
354      // SAFETY: `reserve` ensures `cap >= new_len`, so writing
355      // `[len..new_len)` is within the allocation.
356      unsafe {
357        core::ptr::write_bytes(
358          self.ptr.as_ptr().add(self.len as usize),
359          fill,
360          (new_len - self.len) as usize,
361        );
362      }
363    }
364    self.len = new_len;
365  }
366
367  fn reserve(&mut self, additional: u32) {
368    let required = self.len.checked_add(additional).expect("capacity overflow");
369    if required <= self.cap {
370      return;
371    }
372    let new_cap = required.max(self.cap.saturating_mul(2)).max(64);
373    self.grow(new_cap);
374  }
375
376  fn extend_from_slice(&mut self, data: &[u8]) {
377    let n = data.len() as u32;
378    self.reserve(n);
379    // SAFETY: `reserve` ensures `cap >= len + n`. The source slice is valid
380    // for `n` bytes, and the destination `[len..len+n)` does not overlap.
381    unsafe {
382      core::ptr::copy_nonoverlapping(
383        data.as_ptr(),
384        self.ptr.as_ptr().add(self.len as usize),
385        data.len(),
386      );
387    }
388    self.len += n;
389  }
390}
391
392#[cfg(feature = "alloc")]
393impl<T> Clone for AlignedBuf<T> {
394  fn clone(&self) -> Self {
395    if self.cap == 0 {
396      return Self::new();
397    }
398    let align = Self::BUF_ALIGN;
399    let layout =
400      alloc::alloc::Layout::from_size_align(self.cap as usize, align).expect("invalid layout");
401    // SAFETY: `layout` has non-zero size (`cap > 0`). After allocation, we
402    // copy `len` bytes from the source buffer, expose provenance for
403    // `Near::get`, and wrap in `NonNull` (checked non-null above).
404    let ptr = unsafe {
405      let p = alloc::alloc::alloc(layout);
406      if p.is_null() {
407        alloc::alloc::handle_alloc_error(layout);
408      }
409      core::ptr::copy_nonoverlapping(self.ptr.as_ptr(), p, self.len as usize);
410      let _ = p.expose_provenance();
411      NonNull::new_unchecked(p)
412    };
413    Self { ptr, len: self.len, cap: self.cap, _type: PhantomData }
414  }
415}
416
417#[cfg(feature = "alloc")]
418impl<T> Default for AlignedBuf<T> {
419  fn default() -> Self {
420    Self::new()
421  }
422}
423
424#[cfg(feature = "alloc")]
425impl<T> Drop for AlignedBuf<T> {
426  fn drop(&mut self) {
427    if self.cap == 0 {
428      return; // dangling pointer — never allocated
429    }
430    let align = Self::BUF_ALIGN;
431    // SAFETY: `self.ptr` was allocated with this layout (`cap > 0`).
432    // `from_size_align_unchecked` is safe because `align` is a power of two
433    // and `cap` was previously accepted by the allocator.
434    unsafe {
435      let layout = alloc::alloc::Layout::from_size_align_unchecked(self.cap as usize, align);
436      alloc::alloc::dealloc(self.ptr.as_ptr(), layout);
437    }
438  }
439}
440
441#[cfg(feature = "alloc")]
442// SAFETY: AlignedBuf owns its buffer exclusively; no shared mutable state.
443unsafe impl<T> Send for AlignedBuf<T> {}
444
445#[cfg(feature = "alloc")]
446// SAFETY: &AlignedBuf only provides &[u8] access.
447unsafe impl<T> Sync for AlignedBuf<T> {}
448
449// ---------------------------------------------------------------------------
450// Shared buffer operations (used by both Emitter and Region)
451// ---------------------------------------------------------------------------
452
453/// Write a [`Flat`] value at position `at`.
454///
455/// # Safety
456///
457/// `at` must have been allocated for `U`, ensuring correct alignment.
458pub unsafe fn write_flat<U: Flat>(buf: &mut impl Buf, at: Pos, val: U) {
459  let start = at.0 as usize;
460  let size = mem::size_of::<U>();
461  assert!(
462    start + size <= buf.len() as usize,
463    "write_flat out of bounds: {}..{} but len is {}",
464    start,
465    start + size,
466    buf.len()
467  );
468  // SAFETY: Bounds checked above. `at` was allocated for `U` (caller contract),
469  // ensuring correct alignment. `mem::forget` prevents double-drop.
470  unsafe {
471    let src = core::ptr::from_ref(&val).cast::<u8>();
472    core::ptr::copy_nonoverlapping(src, buf.as_mut_ptr().add(start), size);
473  }
474  mem::forget(val);
475}
476
477/// Patch a [`Near<U>`](crate::Near) at position `at` to point to `target`.
478///
479/// # Safety
480///
481/// `at` must point to a `Near<U>` field within a previously allocated value,
482/// and `target` must be a position allocated for `U`.
483pub unsafe fn patch_near(buf: &mut impl Buf, at: Pos, target: Pos) {
484  let rel = i64::from(target.0) - i64::from(at.0);
485  let rel_i32: i32 = rel.try_into().expect("near offset overflow");
486  let nz = NonZero::new(rel_i32).expect("near offset must be non-zero (target == at)");
487
488  let start = at.0 as usize;
489  let size = mem::size_of::<NonZero<i32>>();
490  assert!(start + size <= buf.len() as usize, "patch_near out of bounds");
491  // SAFETY: Bounds checked above. `at` points to the `Near<U>` field whose
492  // first 4 bytes hold a `NonZero<i32>` offset.
493  unsafe {
494    let src = core::ptr::from_ref(&nz).cast::<u8>();
495    core::ptr::copy_nonoverlapping(src, buf.as_mut_ptr().add(start), size);
496  }
497}
498
499/// Patch a [`NearList<U>`](crate::NearList) header at position `at`.
500///
501/// # Safety
502///
503/// `at` must point to a `NearList<U>` field within a previously allocated
504/// value, and `target` must be a position of a `Segment<U>` (or `Pos::ZERO`
505/// when `len == 0`).
506pub unsafe fn patch_list_header(buf: &mut impl Buf, at: Pos, target: Pos, len: u32) {
507  let off_pos = at.0 as usize;
508  let len_pos = off_pos + mem::size_of::<i32>();
509
510  assert!(len_pos + mem::size_of::<u32>() <= buf.len() as usize, "patch_list_header out of bounds");
511
512  let rel: i32 = if len == 0 {
513    0
514  } else {
515    let r = i64::from(target.0) - i64::from(at.0);
516    r.try_into().expect("list header offset overflow")
517  };
518
519  // SAFETY: Bounds checked above. The list header at `at` has layout
520  // `[i32 offset, u32 len]`, and both writes are within bounds.
521  unsafe {
522    let buf_ptr = buf.as_mut_ptr();
523    core::ptr::copy_nonoverlapping(
524      core::ptr::from_ref(&rel).cast::<u8>(),
525      buf_ptr.add(off_pos),
526      mem::size_of::<i32>(),
527    );
528    core::ptr::copy_nonoverlapping(
529      core::ptr::from_ref(&len).cast::<u8>(),
530      buf_ptr.add(len_pos),
531      mem::size_of::<u32>(),
532    );
533  }
534}
535
536/// Copy raw bytes to position `at`.
537///
538/// # Safety
539///
540/// `src` must be valid for reading `len` bytes. `at` must be a valid
541/// position with at least `len` bytes available.
542pub unsafe fn write_bytes(buf: &mut impl Buf, at: Pos, src: *const u8, len: usize) {
543  let start = at.0 as usize;
544  assert!(
545    start + len <= buf.len() as usize,
546    "write_bytes out of bounds: {}..{} but len is {}",
547    start,
548    start + len,
549    buf.len()
550  );
551  // SAFETY: Bounds checked above. `src` is valid for `len` bytes (caller
552  // contract). The destination does not overlap the source.
553  unsafe {
554    core::ptr::copy_nonoverlapping(src, buf.as_mut_ptr().add(start), len);
555  }
556}
557
558/// Allocate a segment header plus `count` contiguous values of type `U`.
559///
560/// Returns the position of the segment header. The segment's `len` field
561/// is initialized to `count`; `next` is 0 (end of chain, from zero-fill).
562pub fn alloc_segment<U: Flat>(buf: &mut impl Buf, count: u32) -> Pos {
563  buf.align_to(align_of::<Segment<U>>());
564  let pos = Pos(buf.len());
565  let values_size = count.checked_mul(size_of::<U>() as u32).expect("segment values overflow");
566  let total =
567    (size_of::<Segment<U>>() as u32).checked_add(values_size).expect("segment total size overflow");
568  buf.resize(buf.len() + total, 0);
569  // Write segment len at offset 4 (next is already 0 from zero-fill).
570  let len_offset = pos.0 as usize + size_of::<i32>();
571  // SAFETY: `resize` just allocated `total` bytes starting at `pos`.
572  // The `len` field is at `pos + 4`, within the freshly allocated region.
573  unsafe {
574    core::ptr::copy_nonoverlapping(
575      core::ptr::from_ref(&count).cast::<u8>(),
576      buf.as_mut_ptr().add(len_offset),
577      size_of::<u32>(),
578    );
579  }
580  pos
581}
582
583/// Patch the `next` pointer of a segment at `seg_pos`.
584///
585/// # Safety
586///
587/// `seg_pos` must be a position of a previously allocated `Segment<T>`.
588pub unsafe fn patch_segment_next(buf: &mut impl Buf, seg_pos: Pos, next_seg_pos: Pos) {
589  let rel = i64::from(next_seg_pos.0) - i64::from(seg_pos.0);
590  let rel_i32: i32 = rel.try_into().expect("segment next offset overflow");
591  let start = seg_pos.0 as usize;
592  assert!(start + mem::size_of::<i32>() <= buf.len() as usize, "patch_segment_next out of bounds");
593  // SAFETY: Bounds checked above. The `next` field is at offset 0 of
594  // `Segment<T>`, and we write exactly `size_of::<i32>()` bytes.
595  unsafe {
596    core::ptr::copy_nonoverlapping(
597      core::ptr::from_ref(&rel_i32).cast::<u8>(),
598      buf.as_mut_ptr().add(start),
599      mem::size_of::<i32>(),
600    );
601  }
602}