openjph_core/mem.rs
1//! Memory management utilities — port of `ojph_mem.h/cpp`.
2//!
3//! Provides aligned allocation, line buffers, and arena-style allocators that
4//! mirror the C++ OpenJPH memory model.
5
6use std::alloc::{self, Layout};
7use std::ops::{Deref, DerefMut, Index, IndexMut};
8use std::ptr::NonNull;
9
10use crate::arch::BYTE_ALIGNMENT;
11use crate::error::{OjphError, Result};
12
13// =========================================================================
14// AlignedVec<T>
15// =========================================================================
16
17/// A heap-allocated, contiguously-stored buffer with a guaranteed minimum byte
18/// alignment (defaults to [`BYTE_ALIGNMENT`] = 64, suitable for AVX-512).
19///
20/// This is the Rust equivalent of the C++ `ojph::mem_aligned_allocator` usage
21/// pattern. It uses [`std::alloc::alloc`] / [`std::alloc::dealloc`] with a
22/// custom [`Layout`] so the underlying pointer is always aligned.
23pub struct AlignedVec<T> {
24 ptr: NonNull<T>,
25 len: usize,
26 capacity: usize,
27 alignment: usize,
28}
29
30// Safety: the buffer is exclusively owned.
31unsafe impl<T: Send> Send for AlignedVec<T> {}
32unsafe impl<T: Sync> Sync for AlignedVec<T> {}
33
34impl<T> AlignedVec<T> {
35 /// Creates a new, empty `AlignedVec` with the default alignment.
36 pub fn new() -> Self {
37 Self {
38 ptr: NonNull::dangling(),
39 len: 0,
40 capacity: 0,
41 alignment: BYTE_ALIGNMENT as usize,
42 }
43 }
44
45 /// Creates a new `AlignedVec` with the specified alignment.
46 pub fn with_alignment(alignment: usize) -> Self {
47 assert!(
48 alignment.is_power_of_two(),
49 "alignment must be a power of two"
50 );
51 Self {
52 ptr: NonNull::dangling(),
53 len: 0,
54 capacity: 0,
55 alignment,
56 }
57 }
58
59 /// Allocates room for exactly `count` elements, zero-initialized.
60 ///
61 /// Any previous contents are dropped and deallocated.
62 pub fn resize(&mut self, count: usize) -> Result<()>
63 where
64 T: Default + Copy,
65 {
66 self.dealloc_inner();
67
68 if count == 0 {
69 return Ok(());
70 }
71
72 let layout = self.make_layout(count)?;
73 // SAFETY: layout has non-zero size and valid alignment.
74 let raw = unsafe { alloc::alloc_zeroed(layout) };
75 if raw.is_null() {
76 return Err(OjphError::AllocationFailed);
77 }
78 self.ptr = unsafe { NonNull::new_unchecked(raw.cast::<T>()) };
79 self.len = count;
80 self.capacity = count;
81 Ok(())
82 }
83
84 /// Returns the number of elements.
85 #[inline]
86 pub fn len(&self) -> usize {
87 self.len
88 }
89
90 /// Returns `true` when the buffer contains no elements.
91 #[inline]
92 pub fn is_empty(&self) -> bool {
93 self.len == 0
94 }
95
96 /// Returns a raw pointer to the first element.
97 #[inline]
98 pub fn as_ptr(&self) -> *const T {
99 self.ptr.as_ptr()
100 }
101
102 /// Returns a mutable raw pointer to the first element.
103 #[inline]
104 pub fn as_mut_ptr(&mut self) -> *mut T {
105 self.ptr.as_ptr()
106 }
107
108 // -- internal helpers --------------------------------------------------
109
110 fn make_layout(&self, count: usize) -> Result<Layout> {
111 let size = count
112 .checked_mul(std::mem::size_of::<T>())
113 .ok_or(OjphError::AllocationFailed)?;
114 let align = self.alignment.max(std::mem::align_of::<T>());
115 Layout::from_size_align(size, align).map_err(|_| OjphError::AllocationFailed)
116 }
117
118 fn dealloc_inner(&mut self) {
119 if self.capacity > 0 {
120 if let Ok(layout) = self.make_layout(self.capacity) {
121 // SAFETY: ptr was allocated with this layout.
122 unsafe { alloc::dealloc(self.ptr.as_ptr().cast::<u8>(), layout) };
123 }
124 self.len = 0;
125 self.capacity = 0;
126 self.ptr = NonNull::dangling();
127 }
128 }
129}
130
131impl<T> Drop for AlignedVec<T> {
132 fn drop(&mut self) {
133 self.dealloc_inner();
134 }
135}
136
137impl<T> Deref for AlignedVec<T> {
138 type Target = [T];
139 #[inline]
140 fn deref(&self) -> &[T] {
141 if self.len == 0 {
142 return &[];
143 }
144 // SAFETY: ptr is valid for `len` elements.
145 unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
146 }
147}
148
149impl<T> DerefMut for AlignedVec<T> {
150 #[inline]
151 fn deref_mut(&mut self) -> &mut [T] {
152 if self.len == 0 {
153 return &mut [];
154 }
155 // SAFETY: ptr is valid and uniquely owned.
156 unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
157 }
158}
159
160impl<T> Index<usize> for AlignedVec<T> {
161 type Output = T;
162 #[inline]
163 fn index(&self, idx: usize) -> &T {
164 &(**self)[idx]
165 }
166}
167
168impl<T> IndexMut<usize> for AlignedVec<T> {
169 #[inline]
170 fn index_mut(&mut self, idx: usize) -> &mut T {
171 &mut (**self)[idx]
172 }
173}
174
175impl<T> Default for AlignedVec<T> {
176 fn default() -> Self {
177 Self::new()
178 }
179}
180
181// =========================================================================
182// LineBuf — port of `line_buf`
183// =========================================================================
184
185/// Flag: buffer type is undefined / not yet allocated.
186pub const LFT_UNDEFINED: u32 = 0x00;
187/// Flag: buffer elements are 32 bits wide.
188pub const LFT_32BIT: u32 = 0x04;
189/// Flag: buffer elements are 64 bits wide.
190pub const LFT_64BIT: u32 = 0x08;
191/// Flag: buffer elements are integers (as opposed to float).
192pub const LFT_INTEGER: u32 = 0x10;
193/// Mask that isolates the element-size field.
194pub const LFT_SIZE_MASK: u32 = 0x0F;
195
196/// Discriminated pointer to the actual sample data in a [`LineBuf`].
197#[derive(Debug, Clone, Copy)]
198pub enum LineBufData {
199 /// 32-bit signed integer samples.
200 I32(*mut i32),
201 /// 64-bit signed integer samples.
202 I64(*mut i64),
203 /// 32-bit floating-point samples.
204 F32(*mut f32),
205 /// No buffer allocated yet.
206 None,
207}
208
209/// A single line (row) of sample data used throughout the wavelet and coding
210/// pipeline — port of the C++ `line_buf`.
211///
212/// The pointer stored in [`data`](LineBuf::data) is *not* owned by this
213/// struct; it borrows from an arena allocator.
214#[derive(Debug)]
215pub struct LineBuf {
216 /// Number of samples in this line.
217 pub size: usize,
218 /// Extra samples prepended before the line (for filter padding).
219 pub pre_size: u32,
220 /// Combination of `LFT_*` flag constants describing the element type.
221 pub flags: u32,
222 /// Pointer into the backing buffer.
223 pub data: LineBufData,
224}
225
226impl LineBuf {
227 /// Creates a new, empty line buffer.
228 pub fn new() -> Self {
229 Self {
230 size: 0,
231 pre_size: 0,
232 flags: LFT_UNDEFINED,
233 data: LineBufData::None,
234 }
235 }
236}
237
238impl Default for LineBuf {
239 fn default() -> Self {
240 Self::new()
241 }
242}
243
244// =========================================================================
245// LiftingBuf — lightweight wrapper for wavelet lifting steps
246// =========================================================================
247
248/// A line buffer reference used during wavelet lifting.
249pub struct LiftingBuf {
250 /// Whether this buffer slot is currently active in a lifting step.
251 pub active: bool,
252 /// Index into an external line-buffer array (avoids self-referential
253 /// lifetime issues that a raw mutable reference would introduce).
254 pub line_idx: Option<usize>,
255}
256
257impl LiftingBuf {
258 /// Creates an inactive lifting buffer.
259 pub fn new() -> Self {
260 Self {
261 active: false,
262 line_idx: None,
263 }
264 }
265}
266
267impl Default for LiftingBuf {
268 fn default() -> Self {
269 Self::new()
270 }
271}
272
273// =========================================================================
274// MemFixedAllocator — port of `mem_fixed_allocator`
275// =========================================================================
276
277/// A two-phase bump allocator for fixed-size, aligned sub-regions.
278///
279/// # Usage
280///
281/// 1. **Pre-allocation phase** — call [`pre_alloc_data`](Self::pre_alloc_data)
282/// repeatedly to accumulate the total size needed.
283/// 2. Call [`alloc_data`](Self::finalize) once to allocate the backing buffer.
284/// 3. **Allocation phase** — call [`alloc_data`](Self::alloc_data) to hand out
285/// sub-slices from the backing buffer.
286pub struct MemFixedAllocator {
287 buf: Vec<u8>,
288 /// Running total of bytes needed (phase 1) / bytes dispensed (phase 2).
289 offset: usize,
290 alignment: usize,
291}
292
293impl MemFixedAllocator {
294 /// Creates a new allocator with the default alignment.
295 pub fn new() -> Self {
296 Self {
297 buf: Vec::new(),
298 offset: 0,
299 alignment: BYTE_ALIGNMENT as usize,
300 }
301 }
302
303 /// Phase 1: registers that `size` bytes (with alignment) will be needed.
304 pub fn pre_alloc_data(&mut self, size: usize, _count: usize) {
305 let aligned = (size + self.alignment - 1) & !(self.alignment - 1);
306 self.offset += aligned;
307 }
308
309 /// Allocates the single backing buffer based on the accumulated size.
310 pub fn finalize(&mut self) -> Result<()> {
311 self.buf = vec![0u8; self.offset];
312 self.offset = 0;
313 Ok(())
314 }
315
316 /// Phase 2: hands out the next `size`-byte slice from the backing buffer.
317 ///
318 /// # Safety
319 ///
320 /// The returned pointer is valid for the lifetime of this allocator.
321 /// The caller must ensure `size` matches a prior `pre_alloc_data` call.
322 pub fn alloc_data(&mut self, size: usize) -> Result<*mut u8> {
323 let aligned = (size + self.alignment - 1) & !(self.alignment - 1);
324 if self.offset + aligned > self.buf.len() {
325 return Err(OjphError::AllocationFailed);
326 }
327 let ptr = self.buf[self.offset..].as_mut_ptr();
328 self.offset += aligned;
329 Ok(ptr)
330 }
331}
332
333impl Default for MemFixedAllocator {
334 fn default() -> Self {
335 Self::new()
336 }
337}
338
339// =========================================================================
340// CodedLists — linked-list node for coded data
341// =========================================================================
342
343/// A node in a singly-linked list of coded data buffers — port of
344/// `coded_lists`.
345pub struct CodedLists {
346 /// Pointer to the next node, or `None` if this is the tail.
347 pub next: Option<Box<CodedLists>>,
348 /// Pointer to the start of the coded data in this node.
349 pub buf: *mut u8,
350 /// Number of valid coded bytes.
351 pub buf_size: usize,
352 /// True when the list is not ready for consumption.
353 pub avail: bool,
354}
355
356impl CodedLists {
357 /// Creates a new, empty coded-list node.
358 pub fn new() -> Self {
359 Self {
360 next: None,
361 buf: std::ptr::null_mut(),
362 buf_size: 0,
363 avail: false,
364 }
365 }
366}
367
368impl Default for CodedLists {
369 fn default() -> Self {
370 Self::new()
371 }
372}
373
374// =========================================================================
375// MemElasticAllocator — port of `mem_elastic_allocator`
376// =========================================================================
377
378/// Default chunk size for elastic allocation (256 KiB).
379const ELASTIC_CHUNK_SIZE: usize = 256 * 1024;
380
381/// An arena-style allocator that grows by appending large chunks.
382///
383/// This is used for coded data buffers where the total size is not known
384/// in advance.
385pub struct MemElasticAllocator {
386 chunks: Vec<Vec<u8>>,
387 chunk_size: usize,
388 cur_offset: usize,
389}
390
391impl MemElasticAllocator {
392 /// Creates a new elastic allocator with the default chunk size.
393 pub fn new() -> Self {
394 Self {
395 chunks: Vec::new(),
396 chunk_size: ELASTIC_CHUNK_SIZE,
397 cur_offset: 0,
398 }
399 }
400
401 /// Creates a new elastic allocator with a custom chunk size.
402 pub fn with_chunk_size(chunk_size: usize) -> Self {
403 Self {
404 chunks: Vec::new(),
405 chunk_size,
406 cur_offset: 0,
407 }
408 }
409
410 /// Allocates `size` bytes from the arena, returning a raw mutable pointer.
411 ///
412 /// A new chunk is appended if the current one does not have enough room.
413 pub fn alloc_data(&mut self, size: usize) -> Result<*mut u8> {
414 let need = size;
415 // Check if the current chunk has space.
416 if let Some(last) = self.chunks.last() {
417 if self.cur_offset + need <= last.len() {
418 let ptr = unsafe { last.as_ptr().add(self.cur_offset) as *mut u8 };
419 self.cur_offset += need;
420 return Ok(ptr);
421 }
422 }
423 // Need a new chunk.
424 let alloc_size = self.chunk_size.max(need);
425 let chunk = vec![0u8; alloc_size];
426 let ptr = chunk.as_ptr() as *mut u8;
427 self.chunks.push(chunk);
428 self.cur_offset = need;
429 Ok(ptr)
430 }
431
432 /// Releases all memory and resets the allocator.
433 pub fn reset(&mut self) {
434 self.chunks.clear();
435 self.cur_offset = 0;
436 }
437}
438
439impl Default for MemElasticAllocator {
440 fn default() -> Self {
441 Self::new()
442 }
443}
444
445// =========================================================================
446// Tests
447// =========================================================================
448
449#[cfg(test)]
450mod tests {
451 use super::*;
452
453 #[test]
454 fn aligned_vec_basic() {
455 let mut v = AlignedVec::<i32>::new();
456 v.resize(128).unwrap();
457 assert_eq!(v.len(), 128);
458 assert_eq!(v[0], 0);
459 v[0] = 42;
460 assert_eq!(v[0], 42);
461 // Check alignment.
462 assert_eq!(v.as_ptr() as usize % BYTE_ALIGNMENT as usize, 0);
463 }
464
465 #[test]
466 fn fixed_allocator_round_trip() {
467 let mut a = MemFixedAllocator::new();
468 a.pre_alloc_data(100, 1);
469 a.pre_alloc_data(200, 1);
470 a.finalize().unwrap();
471 let p1 = a.alloc_data(100).unwrap();
472 let p2 = a.alloc_data(200).unwrap();
473 assert!(!p1.is_null());
474 assert!(!p2.is_null());
475 }
476
477 #[test]
478 fn elastic_allocator_basic() {
479 let mut a = MemElasticAllocator::new();
480 let p = a.alloc_data(64).unwrap();
481 assert!(!p.is_null());
482 a.reset();
483 }
484}