Skip to main content

oxideav_core/arena/
mod.rs

1//! Refcounted arena pool for decoder frame allocations.
2//!
3//! This module is the runtime half of the DoS-protection framework
4//! described in [`crate::limits`]. It provides three types:
5//!
6//! - [`ArenaPool`] — a pool of reusable raw byte buffers (`Box<[u8]>`)
7//!   that a decoder leases from. Pool size and per-buffer capacity are
8//!   fixed at construction; together they bound peak RSS by
9//!   construction (`max_arenas × cap_per_arena`).
10//!
11//! - [`Arena`] — a single buffer leased from the pool. Allocations are
12//!   bump-pointer (no per-alloc bookkeeping, no fragmentation). When
13//!   the `Arena` is dropped, its buffer is returned to the pool, *not*
14//!   freed — this is what makes the pool memory-reusing rather than
15//!   memory-leaking. If the pool has been dropped before the arena
16//!   (last-arena-outlives-pool), the arena's buffer is freed normally.
17//!
18//! - [`Frame`] / [`FrameInner`] — a refcounted (`Rc<FrameInner>`)
19//!   handle that holds an `Arena` plus per-plane offset/length pairs
20//!   and a small [`FrameHeader`]. As long as any clone of a `Frame`
21//!   exists, its arena (and therefore its buffer) stays out of the
22//!   pool. The last `Drop` returns the buffer.
23//!
24//! ## Design choices for round 1
25//!
26//! - **Hand-rolled bump allocator** inside a `Box<[u8]>`. We
27//!   deliberately do not depend on the `bumpalo` crate yet — the
28//!   logic is twenty lines and avoids pulling in a dependency before
29//!   profiling justifies it. The signature is intentionally compatible
30//!   with what a `bumpalo`-backed implementation would look like, so
31//!   swapping later is a contained refactor.
32//!
33//! - **`Rc` for `Frame`, not `Arc`.** This module targets the
34//!   single-threaded decode path (one decoder, one consumer thread).
35//!   The bump-pointer cursor is `Cell<usize>` for the same reason
36//!   (no atomics on the hot path). For the cross-thread decode path
37//!   — where a decoder produces frames on one thread and a consumer
38//!   reads them on another — see the sibling [`sync`] module, which
39//!   mirrors this API 1:1 with `Arc<FrameInner>` / atomic cursor so
40//!   `Frame: Send + Sync`.
41//!
42//! - **`Arena::alloc<T>` returns `&mut [T]` borrowed from the arena.**
43//!   The borrow is bounded by the lifetime of the `&Arena` reference,
44//!   not the lifetime of the arena itself; the arena's buffer is
45//!   held inside an `UnsafeCell` so multiple calls to `alloc` against
46//!   the same `&Arena` can each carve out non-overlapping sub-slices.
47//!   This matches `bumpalo::Bump::alloc_slice_*` semantics.
48
49pub mod sync;
50
51use std::cell::{Cell, UnsafeCell};
52use std::mem::{align_of, size_of};
53use std::rc::Rc;
54use std::sync::{Arc, Mutex, Weak};
55
56use crate::error::{Error, Result};
57use crate::format::PixelFormat;
58
59/// Pool of reusable byte buffers for arena-backed frame allocations.
60///
61/// Construct one per decoder via [`ArenaPool::new`]. Lease an
62/// [`Arena`] per frame via [`ArenaPool::lease`]; drop the arena (or
63/// drop the last clone of a [`Frame`] holding it) to return its
64/// buffer to the pool.
65///
66/// **Backpressure:** when all `max_arenas` slots are checked out the
67/// next [`ArenaPool::lease`] returns
68/// [`Error::ResourceExhausted`]. A decoder that hits this should
69/// surface the error to its caller rather than busy-loop — the
70/// upstream pipeline is supposed to drop frames it no longer needs,
71/// which returns a buffer to the pool.
72///
73/// `ArenaPool` is `Send + Sync` (the inner `Mutex<Vec<…>>` makes it
74/// safe to share across threads even though [`Arena`] / [`Frame`]
75/// themselves are `!Send` due to their `Rc`/`Cell` contents). This
76/// asymmetry is intentional: a parallel-decoder thread can share a
77/// single pool while each thread owns its own arenas — see also the
78/// sibling [`sync::ArenaPool`] whose leases are themselves `Send + Sync`.
79pub struct ArenaPool {
80    inner: Mutex<PoolInner>,
81    cap_per_arena: usize,
82    max_arenas: usize,
83    max_alloc_count_per_arena: u32,
84}
85
86struct PoolInner {
87    /// Buffers currently sitting idle in the pool (ready to lease).
88    idle: Vec<Box<[u8]>>,
89    /// Total buffers ever allocated by this pool (idle + in-flight).
90    /// Caps lazy growth at `max_arenas`.
91    total_allocated: usize,
92}
93
94impl ArenaPool {
95    /// Construct a new pool with `max_arenas` buffer slots, each of
96    /// `cap_per_arena` bytes. Buffers are allocated lazily on first
97    /// lease — a freshly constructed pool holds no memory.
98    ///
99    /// Per-arena allocation count is capped at `max_alloc_count` (use
100    /// [`ArenaPool::new`] which defaults to a generous 1M, or
101    /// [`ArenaPool::with_alloc_count_cap`] to tighten further).
102    pub fn new(max_arenas: usize, cap_per_arena: usize) -> Arc<Self> {
103        Self::with_alloc_count_cap(max_arenas, cap_per_arena, 1_000_000)
104    }
105
106    /// Like [`ArenaPool::new`] but lets the caller set the per-arena
107    /// allocation-count cap. Useful when the caller is plumbing
108    /// [`crate::DecoderLimits`] through.
109    pub fn with_alloc_count_cap(
110        max_arenas: usize,
111        cap_per_arena: usize,
112        max_alloc_count_per_arena: u32,
113    ) -> Arc<Self> {
114        Arc::new(Self {
115            inner: Mutex::new(PoolInner {
116                idle: Vec::with_capacity(max_arenas),
117                total_allocated: 0,
118            }),
119            cap_per_arena,
120            max_arenas,
121            max_alloc_count_per_arena,
122        })
123    }
124
125    /// Capacity of each arena buffer this pool hands out, in bytes.
126    pub fn cap_per_arena(&self) -> usize {
127        self.cap_per_arena
128    }
129
130    /// Maximum number of arenas that may be checked out at once.
131    pub fn max_arenas(&self) -> usize {
132        self.max_arenas
133    }
134
135    /// Lease one arena from the pool. Returns
136    /// [`Error::ResourceExhausted`] if every arena slot is already
137    /// checked out by an [`Arena`] (or a [`Frame`] holding one).
138    pub fn lease(self: &Arc<Self>) -> Result<Arena> {
139        let buffer = {
140            let mut inner = self.inner.lock().expect("ArenaPool mutex poisoned");
141            if let Some(buf) = inner.idle.pop() {
142                buf
143            } else if inner.total_allocated < self.max_arenas {
144                inner.total_allocated += 1;
145                vec![0u8; self.cap_per_arena].into_boxed_slice()
146            } else {
147                return Err(Error::resource_exhausted(format!(
148                    "ArenaPool exhausted: all {} arenas checked out",
149                    self.max_arenas
150                )));
151            }
152        };
153
154        Ok(Arena {
155            buffer: UnsafeCell::new(buffer),
156            cursor: Cell::new(0),
157            alloc_count: Cell::new(0),
158            cap: self.cap_per_arena,
159            alloc_count_cap: self.max_alloc_count_per_arena,
160            pool: Arc::downgrade(self),
161        })
162    }
163
164    /// Return a buffer to the idle list. Called from `Arena::Drop`;
165    /// not part of the public API.
166    fn release(&self, buffer: Box<[u8]>) {
167        if let Ok(mut inner) = self.inner.lock() {
168            inner.idle.push(buffer);
169        }
170        // If the lock is poisoned, drop the buffer normally — the
171        // pool is in an unusable state already.
172    }
173}
174
175/// One leased buffer from an [`ArenaPool`].
176///
177/// Allocations are bump-pointer: each call to [`Arena::alloc`] carves
178/// out a fresh aligned slice from the head of the buffer. There is no
179/// per-allocation header and no individual free — the entire arena
180/// is reset (returned to the pool) only when the `Arena` is dropped.
181///
182/// `Arena` is `!Send + !Sync` because its bump cursor is a `Cell` and
183/// its buffer is an `UnsafeCell` accessed without locks. This is
184/// fine for the round-1 single-threaded decoder path. A future
185/// parallel-decoder variant can use `AtomicUsize` for the cursor and
186/// regain `Send`.
187pub struct Arena {
188    /// Backing buffer leased from the pool. Wrapped in `UnsafeCell`
189    /// so `&Arena::alloc` can return `&mut [T]` slices that borrow
190    /// non-overlapping ranges of the same buffer.
191    buffer: UnsafeCell<Box<[u8]>>,
192    /// Bump cursor: the next free byte offset within `buffer`.
193    cursor: Cell<usize>,
194    /// Number of allocations performed so far.
195    alloc_count: Cell<u32>,
196    /// Cached cap (== `pool.cap_per_arena` at lease time).
197    cap: usize,
198    /// Cached cap (== `pool.max_alloc_count_per_arena` at lease time).
199    alloc_count_cap: u32,
200    /// Weak handle back to the pool so `Drop` can return the buffer.
201    pool: Weak<ArenaPool>,
202}
203
204impl Arena {
205    /// Capacity of this arena in bytes.
206    pub fn capacity(&self) -> usize {
207        self.cap
208    }
209
210    /// Bytes consumed by allocations so far.
211    pub fn used(&self) -> usize {
212        self.cursor.get()
213    }
214
215    /// Number of allocations performed so far.
216    pub fn alloc_count(&self) -> u32 {
217        self.alloc_count.get()
218    }
219
220    /// `true` once the per-arena allocation-count cap has been
221    /// reached. Decoders that produce many small allocations should
222    /// poll this and bail with [`Error::ResourceExhausted`] when it
223    /// flips, instead of waiting for the next [`Arena::alloc`] call
224    /// to fail.
225    pub fn alloc_count_exceeded(&self) -> bool {
226        self.alloc_count.get() >= self.alloc_count_cap
227    }
228
229    /// Allocate `count` `T`s out of this arena. Returns a borrowed
230    /// `&mut [T]` (lifetime bounded by the borrow of `self`) initialised
231    /// to `T::default()` for `Default`-implementing primitive types
232    /// — actually, we leave the bytes untouched and rely on the type
233    /// `T` being a plain integer/byte type. **The caller is
234    /// responsible for fully writing the returned slice before reading
235    /// it.** This matches the "decoder fills it, then never re-reads
236    /// uninitialised bytes" pattern.
237    ///
238    /// Returns [`Error::ResourceExhausted`] if either the per-arena
239    /// byte cap or the per-arena allocation-count cap would be
240    /// exceeded.
241    ///
242    /// # Safety / contract
243    ///
244    /// `T` must be a "plain old data" type with no `Drop` glue and
245    /// no invariants that need a constructor — typically `u8`, `i16`,
246    /// `u32`, `f32`, etc. The arena does not run destructors on
247    /// allocated values. This is enforced via a `T: Copy` bound.
248    ///
249    /// **Aliasing model:** the bump cursor is monotonically
250    /// non-decreasing, so successive `alloc` calls return slices
251    /// covering disjoint regions of the arena's internal `UnsafeCell`.
252    /// This is the standard arena-allocator pattern (cf.
253    /// `bumpalo::Bump::alloc_slice_*`) and is the reason this method
254    /// takes `&self` rather than `&mut self`.
255    #[allow(clippy::mut_from_ref)] // see "Aliasing model" doc above.
256    pub fn alloc<T>(&self, count: usize) -> Result<&mut [T]>
257    where
258        T: Copy,
259    {
260        // Allocation-count cap.
261        let next_count =
262            self.alloc_count.get().checked_add(1).ok_or_else(|| {
263                Error::resource_exhausted("Arena alloc_count overflow".to_string())
264            })?;
265        if next_count > self.alloc_count_cap {
266            return Err(Error::resource_exhausted(format!(
267                "Arena alloc-count cap of {} exceeded",
268                self.alloc_count_cap
269            )));
270        }
271
272        let elem_size = size_of::<T>();
273        let elem_align = align_of::<T>();
274        // Bytes requested.
275        let bytes = elem_size
276            .checked_mul(count)
277            .ok_or_else(|| Error::resource_exhausted("Arena alloc size overflow".to_string()))?;
278
279        // Align cursor up to T's alignment.
280        let cursor = self.cursor.get();
281        let aligned = align_up(cursor, elem_align).ok_or_else(|| {
282            Error::resource_exhausted("Arena cursor alignment overflow".to_string())
283        })?;
284        let new_cursor = aligned.checked_add(bytes).ok_or_else(|| {
285            Error::resource_exhausted("Arena cursor advance overflow".to_string())
286        })?;
287
288        if new_cursor > self.cap {
289            return Err(Error::resource_exhausted(format!(
290                "Arena cap of {} bytes exceeded (would consume {} bytes)",
291                self.cap, new_cursor
292            )));
293        }
294
295        // SAFETY: we hold &self, the buffer lives inside the arena
296        // for the duration of `&self`, and we return a slice covering
297        // `aligned..aligned+bytes` which we have just verified does
298        // not overlap any previously-handed-out range (cursor is
299        // monotonically non-decreasing). T: Copy guarantees we don't
300        // need to drop the previous contents.
301        let slice: &mut [T] = unsafe {
302            let buf_ptr = (*self.buffer.get()).as_mut_ptr();
303            let elem_ptr = buf_ptr.add(aligned).cast::<T>();
304            std::slice::from_raw_parts_mut(elem_ptr, count)
305        };
306
307        self.cursor.set(new_cursor);
308        self.alloc_count.set(next_count);
309        Ok(slice)
310    }
311
312    /// Reset the arena to empty without releasing its buffer to the
313    /// pool. Useful for a decoder that wants to reuse the same arena
314    /// across several intermediate stages of the same frame. Callers
315    /// must ensure no slice previously returned from [`Arena::alloc`]
316    /// is still in use — Rust's borrow checker enforces this, since
317    /// `reset` takes `&mut self`.
318    pub fn reset(&mut self) {
319        self.cursor.set(0);
320        self.alloc_count.set(0);
321    }
322}
323
324impl Drop for Arena {
325    fn drop(&mut self) {
326        // Take the buffer out of the UnsafeCell. We're in Drop, so
327        // no other references to it can exist.
328        let buffer = std::mem::replace(
329            unsafe { &mut *self.buffer.get() },
330            Vec::new().into_boxed_slice(),
331        );
332        if let Some(pool) = self.pool.upgrade() {
333            pool.release(buffer);
334        }
335        // else: pool was dropped before us — buffer drops here.
336    }
337}
338
339/// Round `n` up to the next multiple of `align`. `align` must be a
340/// power of two. Returns `None` on overflow.
341fn align_up(n: usize, align: usize) -> Option<usize> {
342    debug_assert!(align.is_power_of_two(), "alignment must be a power of two");
343    let mask = align - 1;
344    n.checked_add(mask).map(|m| m & !mask)
345}
346
347/// Per-frame metadata carried alongside an [`Arena`] inside a
348/// [`Frame`]. Kept minimal in round 1; round 2 will extend with
349/// stride/colorspace/HDR fields as decoders need them.
350///
351/// `Copy` so it travels through the hot path with no allocation.
352#[non_exhaustive]
353#[derive(Copy, Clone, Debug)]
354pub struct FrameHeader {
355    pub width: u32,
356    pub height: u32,
357    pub pixel_format: PixelFormat,
358    /// Presentation timestamp in stream time-base units. `None` when
359    /// the codec did not surface one (e.g. a still image).
360    pub presentation_timestamp: Option<i64>,
361}
362
363impl FrameHeader {
364    /// Construct a header with all four mandatory fields set. Use
365    /// functional-update syntax (`FrameHeader { ..header }`) to add
366    /// future fields safely.
367    pub fn new(
368        width: u32,
369        height: u32,
370        pixel_format: PixelFormat,
371        presentation_timestamp: Option<i64>,
372    ) -> Self {
373        Self {
374            width,
375            height,
376            pixel_format,
377            presentation_timestamp,
378        }
379    }
380}
381
382/// Maximum number of planes a [`FrameInner`] can describe in round 1.
383/// Covers every real-world video pixel format (1 plane for packed
384/// RGB/YUV 4:2:2, 3 planes for I420/YV12/I444, 4 planes for YUVA / RGBA
385/// planar). Audio is handled by a separate sibling type in a future
386/// round; this module is video-only for now.
387pub const MAX_PLANES: usize = 4;
388
389/// The owned body of a refcounted [`Frame`].
390///
391/// Holds an [`Arena`] (the bytes), a fixed-size table of
392/// `(offset_in_arena, length_in_bytes)` pairs (one per plane), and a
393/// [`FrameHeader`]. The `plane_count` field tracks how many entries of
394/// `plane_offsets` are actually populated. Up to [`MAX_PLANES`] planes
395/// are supported.
396///
397/// **Lifetime:** an `Arena` returns its buffer to the pool when
398/// dropped. A `Rc<FrameInner>` keeps the arena alive via its single
399/// owned field, so as long as any clone of a [`Frame`] exists the
400/// underlying buffer stays out of the pool.
401pub struct FrameInner {
402    arena: Arena,
403    plane_offsets: [(usize, usize); MAX_PLANES],
404    plane_count: u8,
405    header: FrameHeader,
406}
407
408/// Refcounted handle to a decoded video frame. Construct via
409/// [`Frame::new`]; clone freely (each clone bumps the refcount by 1).
410/// The arena and its buffer are released back to the pool when the
411/// last clone is dropped.
412///
413/// `Frame` is `Rc<FrameInner>` (single-threaded decoder path). For the
414/// cross-thread decode path — where the consumer runs on a different
415/// thread from the decoder — use the sibling [`sync::Frame`] which is
416/// `Arc<sync::FrameInner>` and is `Send + Sync`.
417pub type Frame = Rc<FrameInner>;
418
419impl FrameInner {
420    /// Construct a `Frame` (refcounted `Rc<FrameInner>`) from an arena,
421    /// a slice of `(offset, length)` plane descriptors, and a header.
422    /// Returns [`Error::InvalidData`] if more than [`MAX_PLANES`]
423    /// planes are supplied or if any plane range falls outside the
424    /// arena's used region.
425    pub fn new(arena: Arena, planes: &[(usize, usize)], header: FrameHeader) -> Result<Frame> {
426        if planes.len() > MAX_PLANES {
427            return Err(Error::invalid(format!(
428                "FrameInner supports at most {} planes (got {})",
429                MAX_PLANES,
430                planes.len()
431            )));
432        }
433        let used = arena.used();
434        for (i, (off, len)) in planes.iter().enumerate() {
435            let end = off
436                .checked_add(*len)
437                .ok_or_else(|| Error::invalid(format!("plane {i}: offset+len overflow")))?;
438            if end > used {
439                return Err(Error::invalid(format!(
440                    "plane {i}: range {off}..{end} exceeds arena used={used}"
441                )));
442            }
443        }
444        let mut plane_offsets = [(0usize, 0usize); MAX_PLANES];
445        for (i, p) in planes.iter().enumerate() {
446            plane_offsets[i] = *p;
447        }
448        Ok(Rc::new(FrameInner {
449            arena,
450            plane_offsets,
451            plane_count: planes.len() as u8,
452            header,
453        }))
454    }
455
456    /// Number of planes this frame holds.
457    pub fn plane_count(&self) -> usize {
458        self.plane_count as usize
459    }
460
461    /// Read-only access to plane `i`. Returns `None` if `i` is out of
462    /// range.
463    pub fn plane(&self, i: usize) -> Option<&[u8]> {
464        if i >= self.plane_count as usize {
465            return None;
466        }
467        let (off, len) = self.plane_offsets[i];
468        // SAFETY: plane ranges were validated against `arena.used()`
469        // at construction; the arena's buffer has not changed since.
470        // We borrow with the lifetime of `&self`.
471        let buf: &[u8] = unsafe {
472            let buf_ref = &*self.arena.buffer.get();
473            &(**buf_ref)[off..off + len]
474        };
475        Some(buf)
476    }
477
478    /// Frame header (width / height / pixel format / pts).
479    pub fn header(&self) -> &FrameHeader {
480        &self.header
481    }
482}
483
484#[cfg(test)]
485mod tests {
486    use super::*;
487
488    fn small_pool(slots: usize, cap: usize) -> Arc<ArenaPool> {
489        ArenaPool::new(slots, cap)
490    }
491
492    #[test]
493    fn pool_lease_returns_err_when_exhausted() {
494        let pool = small_pool(2, 1024);
495        let a = pool.lease().expect("first lease");
496        let b = pool.lease().expect("second lease");
497        let third = pool.lease();
498        assert!(matches!(third, Err(Error::ResourceExhausted(_))));
499        // Keep a and b alive past the assertion so they aren't dropped
500        // before the failing lease.
501        drop((a, b));
502    }
503
504    #[test]
505    fn arena_alloc_caps_at_size_limit() {
506        let pool = small_pool(1, 64);
507        let arena = pool.lease().unwrap();
508        // 64 bytes capacity. Allocate 32 u8s — fits.
509        let _: &mut [u8] = arena.alloc::<u8>(32).unwrap();
510        // Allocate another 32 u8s — exactly fills.
511        let _: &mut [u8] = arena.alloc::<u8>(32).unwrap();
512        // Any further allocation fails.
513        let third = arena.alloc::<u8>(1);
514        assert!(matches!(third, Err(Error::ResourceExhausted(_))));
515    }
516
517    #[test]
518    fn arena_alloc_count_cap_fires() {
519        let pool = ArenaPool::with_alloc_count_cap(1, 1024, 3);
520        let arena = pool.lease().unwrap();
521        let _: &mut [u8] = arena.alloc::<u8>(1).unwrap();
522        let _: &mut [u8] = arena.alloc::<u8>(1).unwrap();
523        let _: &mut [u8] = arena.alloc::<u8>(1).unwrap();
524        assert!(arena.alloc_count_exceeded());
525        let fourth = arena.alloc::<u8>(1);
526        assert!(matches!(fourth, Err(Error::ResourceExhausted(_))));
527    }
528
529    #[test]
530    fn arena_returns_to_pool_on_drop() {
531        let pool = small_pool(1, 256);
532        {
533            let arena = pool.lease().expect("first lease");
534            // Sanity: arena is leased; further leases would fail.
535            assert!(matches!(pool.lease(), Err(Error::ResourceExhausted(_))));
536            drop(arena);
537        }
538        // Arena dropped — pool slot must be free again.
539        let _again = pool.lease().expect("re-lease after drop");
540    }
541
542    #[test]
543    fn arena_alignment_is_respected() {
544        let pool = small_pool(1, 64);
545        let arena = pool.lease().unwrap();
546        // Allocate a single u8 to misalign the cursor.
547        let _: &mut [u8] = arena.alloc::<u8>(1).unwrap();
548        // Now allocate u32s; expect cursor to be aligned to 4.
549        let s: &mut [u32] = arena.alloc::<u32>(4).unwrap();
550        let addr = s.as_ptr() as usize;
551        assert_eq!(addr % align_of::<u32>(), 0);
552        assert_eq!(s.len(), 4);
553    }
554
555    fn build_simple_frame(pool: &Arc<ArenaPool>) -> Frame {
556        let arena = pool.lease().unwrap();
557        // Allocate 16 bytes for plane 0.
558        let plane0: &mut [u8] = arena.alloc::<u8>(16).unwrap();
559        for (i, b) in plane0.iter_mut().enumerate() {
560            *b = i as u8;
561        }
562        // The slice borrowed from arena ends here.
563        let header = FrameHeader::new(4, 4, PixelFormat::Gray8, Some(42));
564        FrameInner::new(arena, &[(0, 16)], header).unwrap()
565    }
566
567    #[test]
568    fn frame_refcount_keeps_arena_alive() {
569        let pool = small_pool(1, 256);
570        let frame = build_simple_frame(&pool);
571        let clone = Rc::clone(&frame);
572        drop(frame);
573        // Clone is still valid; arena still leased.
574        let plane = clone.plane(0).expect("plane 0");
575        assert_eq!(plane.len(), 16);
576        for (i, b) in plane.iter().enumerate() {
577            assert_eq!(*b, i as u8);
578        }
579        assert_eq!(clone.header().width, 4);
580        assert_eq!(clone.header().height, 4);
581        assert_eq!(clone.header().presentation_timestamp, Some(42));
582        // Pool still exhausted because clone holds the arena.
583        assert!(matches!(pool.lease(), Err(Error::ResourceExhausted(_))));
584    }
585
586    #[test]
587    fn last_drop_returns_arena_to_pool() {
588        let pool = small_pool(1, 256);
589        let frame = build_simple_frame(&pool);
590        let clone = Rc::clone(&frame);
591        drop(frame);
592        drop(clone);
593        // All clones gone — buffer must be back in the pool.
594        let _again = pool.lease().expect("lease after last drop");
595    }
596
597    #[test]
598    fn frame_rejects_too_many_planes() {
599        let pool = small_pool(1, 256);
600        let arena = pool.lease().unwrap();
601        let header = FrameHeader::new(1, 1, PixelFormat::Gray8, None);
602        let too_many = vec![(0usize, 0usize); MAX_PLANES + 1];
603        let r = FrameInner::new(arena, &too_many, header);
604        assert!(matches!(r, Err(Error::InvalidData(_))));
605    }
606
607    #[test]
608    fn frame_rejects_plane_outside_arena() {
609        let pool = small_pool(1, 64);
610        let arena = pool.lease().unwrap();
611        // arena.used() == 0; any non-empty plane is out of range.
612        let header = FrameHeader::new(1, 1, PixelFormat::Gray8, None);
613        let r = FrameInner::new(arena, &[(0, 16)], header);
614        assert!(matches!(r, Err(Error::InvalidData(_))));
615    }
616
617    #[test]
618    fn pool_outlives_buffer_drop_when_pool_dropped_first() {
619        // Exotic: arena outlives its pool. Buffer just frees normally.
620        let pool = small_pool(1, 64);
621        let arena = pool.lease().unwrap();
622        drop(pool);
623        // Drop arena — must not panic. The Weak handle won't upgrade.
624        drop(arena);
625    }
626
627    #[test]
628    fn arena_reset_clears_allocations() {
629        let pool = small_pool(1, 32);
630        let mut arena = pool.lease().unwrap();
631        let _: &mut [u8] = arena.alloc::<u8>(32).unwrap();
632        // Cap reached.
633        assert!(matches!(
634            arena.alloc::<u8>(1),
635            Err(Error::ResourceExhausted(_))
636        ));
637        arena.reset();
638        // After reset we can allocate again.
639        let _: &mut [u8] = arena.alloc::<u8>(32).unwrap();
640    }
641}