Skip to main content

zk_nalloc/
lib.rs

1//! nalloc: A ZK-Proof optimized memory allocator.
2//!
3//! This crate provides a high-performance, deterministic memory allocator
4//! specifically designed for Zero-Knowledge proof systems. It is framework-agnostic
5//! and works with any ZK system: Halo2, Plonky2, Risc0, SP1, Miden, Cairo, Arkworks, etc.
6//!
7//! # Features
8//!
9//! - **Arena-based allocation**: Pre-reserved memory pools for different workload types
10//! - **Bump allocation**: O(1) allocation via atomic pointer increment
11//! - **Security-first**: Volatile secure wiping for witness data
12//! - **Cache-optimized**: 64-byte alignment for FFT/NTT SIMD operations
13//! - **Cross-platform**: Linux, macOS, Windows, and Unix support
14//! - **Zero ZK dependencies**: Pure memory primitive, no framework lock-in
15//! - **Fallback support**: Gracefully falls back to system allocator when arena exhausted
16//!
17//! # Cargo Features
18//!
19//! - `fallback` (default): Fall back to system allocator when arena is exhausted
20//! - `huge-pages`: Enable Linux 2MB/1GB huge page support
21//! - `guard-pages`: Add guard pages at arena boundaries for overflow detection
22//! - `mlock`: Lock witness memory to prevent swapping (security)
23//!
24//! # Usage
25//!
26//! As a global allocator:
27//! ```rust,no_run
28//! use zk_nalloc::NAlloc;
29//!
30//! #[global_allocator]
31//! static ALLOC: NAlloc = NAlloc::new();
32//!
33//! fn main() {
34//!     let data = vec![0u64; 1000];
35//!     println!("Allocated {} elements", data.len());
36//! }
37//! ```
38//!
39//! Using specialized arenas directly:
40//! ```rust
41//! use zk_nalloc::NAlloc;
42//!
43//! let alloc = NAlloc::new();
44//! let witness = alloc.witness();
45//! let ptr = witness.alloc(1024, 8);
46//! assert!(!ptr.is_null());
47//!
48//! // Securely wipe when done
49//! unsafe { witness.secure_wipe(); }
50//! ```
51
52pub mod arena;
53pub mod bump;
54pub mod config;
55pub mod platform;
56pub mod polynomial;
57pub mod witness;
58
59pub use arena::{ArenaManager, ArenaStats};
60pub use bump::BumpAlloc;
61pub use config::*;
62pub use platform::sys;
63#[cfg(feature = "guard-pages")]
64pub use platform::GuardedAlloc;
65#[cfg(feature = "huge-pages")]
66pub use platform::HugePageSize;
67pub use platform::{AllocErrorKind, AllocFailed};
68pub use polynomial::PolynomialArena;
69pub use witness::WitnessArena;
70
71use std::alloc::{GlobalAlloc, Layout, System};
72use std::ptr::{copy_nonoverlapping, null_mut};
73use std::sync::atomic::{AtomicPtr, AtomicU8, Ordering};
74
75/// Initialization state for NAlloc.
76#[derive(Debug, Clone, Copy, PartialEq, Eq)]
77#[repr(u8)]
78enum InitState {
79    /// Not yet initialized
80    Uninitialized = 0,
81    /// Currently being initialized by another thread
82    Initializing = 1,
83    /// Successfully initialized with arenas
84    Initialized = 2,
85    /// Failed to initialize, using system allocator fallback
86    Fallback = 3,
87}
88
89/// The global ZK-optimized allocator.
90///
91/// `NAlloc` provides a drop-in replacement for the standard Rust global allocator,
92/// with special optimizations for ZK-Proof workloads.
93///
94/// # Memory Strategy
95///
96/// - **Large allocations (>1MB)**: Routed to Polynomial Arena (FFT vectors)
97/// - **Small allocations**: Routed to Scratch Arena (temporary buffers)
98/// - **Witness data**: Use `NAlloc::witness()` for security-critical allocations
99///
100/// # Thread Safety
101///
102/// This allocator uses lock-free atomic operations for initialization and
103/// allocation. It's safe to use from multiple threads concurrently.
104///
105/// # Fallback Behavior
106///
107/// If arena initialization fails (e.g., out of memory), NAlloc gracefully
108/// falls back to the system allocator rather than panicking. This ensures
109/// your application continues to function even under memory pressure.
110///
111/// # Security: `static` Usage and Witness Wipe
112///
113/// When used as a `#[global_allocator]` static, **Rust does not run `Drop`
114/// for statics**.  The `impl Drop for NAlloc` therefore only fires for
115/// non-static instances (e.g. `NAlloc::try_new()` in tests or scoped provers).
116///
117/// **For the `static` use-case you must wipe witness memory manually before
118/// the prover exits:**
119///
120/// ```rust,no_run
121/// use zk_nalloc::NAlloc;
122///
123/// #[global_allocator]
124/// static ALLOC: NAlloc = NAlloc::new();
125///
126/// fn shutdown() {
127///     // Must be called explicitly — Drop will NOT run for a static.
128///     unsafe { ALLOC.witness().secure_wipe(); }
129/// }
130/// ```
131///
132/// Failure to do so leaves witness data in RAM until the OS reclaims the
133/// pages, which may be observable by other processes on the same host.
134#[must_use]
135pub struct NAlloc {
136    /// Pointer to the ArenaManager (null until initialized)
137    arenas: AtomicPtr<ArenaManager>,
138    /// Initialization state
139    init_state: AtomicU8,
140}
141
142impl NAlloc {
143    /// Create a new `NAlloc` instance.
144    ///
145    /// The arenas are lazily initialized on the first allocation.
146    pub const fn new() -> Self {
147        Self {
148            arenas: AtomicPtr::new(null_mut()),
149            init_state: AtomicU8::new(InitState::Uninitialized as u8),
150        }
151    }
152
153    /// Try to create NAlloc and initialize arenas immediately.
154    ///
155    /// Returns an error if arena allocation fails, allowing the caller
156    /// to handle the failure gracefully.
157    pub fn try_new() -> Result<Self, AllocFailed> {
158        let nalloc = Self::new();
159        nalloc.try_init()?;
160        Ok(nalloc)
161    }
162
163    /// Try to initialize arenas.
164    ///
165    /// Returns Ok if initialization succeeds or was already done.
166    /// Returns Err if initialization fails.
167    fn try_init(&self) -> Result<(), AllocFailed> {
168        let state = self.init_state.load(Ordering::Acquire);
169
170        match state {
171            s if s == InitState::Initialized as u8 => Ok(()),
172            s if s == InitState::Fallback as u8 => {
173                Err(AllocFailed::with_kind(0, AllocErrorKind::OutOfMemory))
174            }
175            _ => {
176                let ptr = self.init_arenas();
177                if ptr.is_null() {
178                    Err(AllocFailed::with_kind(0, AllocErrorKind::OutOfMemory))
179                } else {
180                    Ok(())
181                }
182            }
183        }
184    }
185
186    /// Initialize the arenas if not already done.
187    ///
188    /// This uses a spin-lock pattern with atomic state to prevent
189    /// recursive allocation issues and handle initialization failures gracefully.
190    #[cold]
191    #[inline(never)]
192    fn init_arenas(&self) -> *mut ArenaManager {
193        // Fast path: already initialized
194        let state = self.init_state.load(Ordering::Acquire);
195        if state == InitState::Initialized as u8 {
196            return self.arenas.load(Ordering::Acquire);
197        }
198        if state == InitState::Fallback as u8 {
199            return null_mut();
200        }
201
202        // Try to acquire initialization lock
203        if self
204            .init_state
205            .compare_exchange(
206                InitState::Uninitialized as u8,
207                InitState::Initializing as u8,
208                Ordering::AcqRel,
209                Ordering::Relaxed,
210            )
211            .is_ok()
212        {
213            // We won the race - initialize
214            match ArenaManager::new() {
215                Ok(manager) => {
216                    // Use system allocator to avoid recursive allocation
217                    let layout = Layout::new::<ArenaManager>();
218                    let raw = unsafe { System.alloc(layout) as *mut ArenaManager };
219
220                    if raw.is_null() {
221                        // Failed to allocate manager struct - enter fallback mode
222                        eprintln!("[nalloc] Warning: Failed to allocate ArenaManager struct, using system allocator");
223                        self.init_state
224                            .store(InitState::Fallback as u8, Ordering::Release);
225                        return null_mut();
226                    }
227
228                    unsafe {
229                        std::ptr::write(raw, manager);
230                    }
231                    self.arenas.store(raw, Ordering::Release);
232                    self.init_state
233                        .store(InitState::Initialized as u8, Ordering::Release);
234                    return raw;
235                }
236                Err(e) => {
237                    // Arena allocation failed - enter fallback mode
238                    eprintln!(
239                        "[nalloc] Warning: Arena initialization failed ({}), using system allocator",
240                        e
241                    );
242                    self.init_state
243                        .store(InitState::Fallback as u8, Ordering::Release);
244                    return null_mut();
245                }
246            }
247        }
248
249        // Another thread is initializing - spin wait with timeout (Issue #2).
250        // We mix hint::spin_loop() (PAUSE/YIELD on x86) with periodic
251        // thread::yield_now() so the OS scheduler can run the thread that is
252        // actually performing the initialisation.  Without the yield, on
253        // 2-CPU CI runners all waiting threads can starve the init thread.
254        for i in 0..MAX_CAS_RETRIES {
255            for _ in 0..SPIN_ITERATIONS {
256                std::hint::spin_loop();
257            }
258            // Every 10 outer iterations hand control back to the OS scheduler
259            // so the initialising thread gets CPU time.
260            if i % 10 == 9 {
261                std::thread::yield_now();
262            }
263            let state = self.init_state.load(Ordering::Acquire);
264
265            match state {
266                s if s == InitState::Initialized as u8 => {
267                    return self.arenas.load(Ordering::Acquire);
268                }
269                s if s == InitState::Fallback as u8 => {
270                    return null_mut();
271                }
272                _ => continue,
273            }
274        }
275
276        // Issue #2: Timeout - initialization is stuck or taking too long
277        // Fall back to system allocator rather than spinning forever
278        #[cfg(debug_assertions)]
279        eprintln!("[nalloc] Warning: Arena initialization timed out, using system allocator");
280        null_mut()
281    }
282
283    /// Check if NAlloc is operating in fallback mode (using system allocator).
284    #[must_use]
285    #[inline]
286    pub fn is_fallback_mode(&self) -> bool {
287        self.init_state.load(Ordering::Relaxed) == InitState::Fallback as u8
288    }
289
290    /// Check if NAlloc is fully initialized with arenas.
291    #[must_use]
292    #[inline]
293    pub fn is_initialized(&self) -> bool {
294        self.init_state.load(Ordering::Relaxed) == InitState::Initialized as u8
295    }
296
297    #[inline(always)]
298    fn get_arenas(&self) -> Option<&ArenaManager> {
299        let state = self.init_state.load(Ordering::Acquire);
300
301        if state == InitState::Initialized as u8 {
302            let ptr = self.arenas.load(Ordering::Acquire);
303            if !ptr.is_null() {
304                return Some(unsafe { &*ptr });
305            }
306        }
307
308        if state == InitState::Uninitialized as u8 || state == InitState::Initializing as u8 {
309            let ptr = self.init_arenas();
310            if !ptr.is_null() {
311                return Some(unsafe { &*ptr });
312            }
313        }
314
315        None
316    }
317
318    /// Access the witness arena directly.
319    ///
320    /// Use this for allocating sensitive private inputs that need
321    /// zero-initialization and secure wiping.
322    ///
323    /// # Panics
324    ///
325    /// Panics if arena initialization failed. Use `try_witness()` for
326    /// fallible access.
327    ///
328    /// # Example
329    ///
330    /// ```rust
331    /// use zk_nalloc::NAlloc;
332    ///
333    /// let alloc = NAlloc::new();
334    /// let witness = alloc.witness();
335    /// let secret_ptr = witness.alloc(256, 8);
336    /// assert!(!secret_ptr.is_null());
337    ///
338    /// // Securely wipe when done
339    /// unsafe { witness.secure_wipe(); }
340    /// ```
341    #[inline]
342    pub fn witness(&self) -> WitnessArena {
343        self.try_witness()
344            .expect("Arena initialization failed - use try_witness() for fallible access")
345    }
346
347    /// Try to access the witness arena.
348    ///
349    /// Returns `None` if arena initialization failed.
350    #[must_use]
351    #[inline]
352    pub fn try_witness(&self) -> Option<WitnessArena> {
353        self.get_arenas().map(|a| WitnessArena::new(a.witness()))
354    }
355
356    /// Access the polynomial arena directly.
357    ///
358    /// Use this for FFT/NTT-friendly polynomial coefficient vectors.
359    /// Provides 64-byte alignment by default for SIMD operations.
360    ///
361    /// # Panics
362    ///
363    /// Panics if arena initialization failed. Use `try_polynomial()` for
364    /// fallible access.
365    ///
366    /// # Example
367    ///
368    /// ```rust
369    /// use zk_nalloc::NAlloc;
370    ///
371    /// let alloc = NAlloc::new();
372    /// let poly = alloc.polynomial();
373    /// let coeffs = poly.alloc_fft_friendly(1024); // 1K coefficients
374    /// assert!(!coeffs.is_null());
375    /// assert_eq!((coeffs as usize) % 64, 0); // 64-byte aligned
376    /// ```
377    #[inline]
378    pub fn polynomial(&self) -> PolynomialArena {
379        self.try_polynomial()
380            .expect("Arena initialization failed - use try_polynomial() for fallible access")
381    }
382
383    /// Try to access the polynomial arena.
384    ///
385    /// Returns `None` if arena initialization failed.
386    #[must_use]
387    #[inline]
388    pub fn try_polynomial(&self) -> Option<PolynomialArena> {
389        self.get_arenas()
390            .map(|a| PolynomialArena::new(a.polynomial()))
391    }
392
393    /// Access the scratch arena directly.
394    ///
395    /// Use this for temporary computation space.
396    ///
397    /// # Panics
398    ///
399    /// Panics if arena initialization failed. Use `try_scratch()` for
400    /// fallible access.
401    #[inline]
402    pub fn scratch(&self) -> std::sync::Arc<BumpAlloc> {
403        self.try_scratch()
404            .expect("Arena initialization failed - use try_scratch() for fallible access")
405    }
406
407    /// Try to access the scratch arena.
408    ///
409    /// Returns `None` if arena initialization failed.
410    #[must_use]
411    #[inline]
412    pub fn try_scratch(&self) -> Option<std::sync::Arc<BumpAlloc>> {
413        self.get_arenas().map(|a| a.scratch())
414    }
415
416    /// Reset all arenas, freeing all allocated memory.
417    ///
418    /// The witness arena is securely wiped before reset.
419    ///
420    /// # Safety
421    /// This will invalidate all previously allocated memory.
422    ///
423    /// # Note
424    /// Does nothing if operating in fallback mode.
425    pub unsafe fn reset_all(&self) {
426        if let Some(arenas) = self.get_arenas() {
427            arenas.reset_all();
428        }
429    }
430
431    /// Get statistics about arena usage.
432    ///
433    /// Returns `None` if operating in fallback mode.
434    ///
435    /// Useful for monitoring memory consumption and tuning arena sizes.
436    #[must_use]
437    pub fn stats(&self) -> Option<ArenaStats> {
438        self.get_arenas().map(|a| a.stats())
439    }
440
441    /// Get statistics, returning default stats if in fallback mode.
442    #[must_use]
443    pub fn stats_or_default(&self) -> ArenaStats {
444        self.stats().unwrap_or(ArenaStats {
445            witness_used: 0,
446            witness_capacity: 0,
447            polynomial_used: 0,
448            polynomial_capacity: 0,
449            scratch_used: 0,
450            scratch_capacity: 0,
451            #[cfg(feature = "fallback")]
452            witness_fallback_bytes: 0,
453            #[cfg(feature = "fallback")]
454            polynomial_fallback_bytes: 0,
455            #[cfg(feature = "fallback")]
456            scratch_fallback_bytes: 0,
457        })
458    }
459}
460
461impl Default for NAlloc {
462    fn default() -> Self {
463        Self::new()
464    }
465}
466
467impl Drop for NAlloc {
468    fn drop(&mut self) {
469        // Only clean up if we successfully initialized arenas.
470        // Fallback mode never allocated an ArenaManager on the heap.
471        if *self.init_state.get_mut() == InitState::Initialized as u8 {
472            let ptr = *self.arenas.get_mut();
473            if !ptr.is_null() {
474                unsafe {
475                    // Run ArenaManager's own Drop (securely wipes witness, unmaps arenas).
476                    std::ptr::drop_in_place(ptr);
477                    // Deallocate the heap slot we allocated in init_arenas().
478                    let layout = Layout::new::<ArenaManager>();
479                    System.dealloc(ptr as *mut u8, layout);
480                }
481            }
482        }
483    }
484}
485
486// Safety: NAlloc uses atomic operations for all shared state
487unsafe impl Send for NAlloc {}
488unsafe impl Sync for NAlloc {}
489
490unsafe impl GlobalAlloc for NAlloc {
491    #[inline(always)]
492    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
493        debug_assert!(layout.size() > 0);
494        debug_assert!(layout.align() > 0);
495        debug_assert!(layout.align().is_power_of_two());
496
497        // Try to use arenas
498        if let Some(arenas) = self.get_arenas() {
499            // Strategy:
500            // 1. Large allocations (>threshold) go to Polynomial Arena (likely vectors)
501            // 2. Smaller allocations go to Scratch Arena
502            // 3. User can explicitly use Witness Arena via NAlloc::witness()
503
504            if layout.size() > LARGE_ALLOC_THRESHOLD {
505                arenas.polynomial().alloc(layout.size(), layout.align())
506            } else {
507                arenas.scratch().alloc(layout.size(), layout.align())
508            }
509        } else {
510            // Fallback to system allocator
511            System.alloc(layout)
512        }
513    }
514
515    #[inline(always)]
516    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
517        // In fallback mode, we need to actually deallocate
518        if self.is_fallback_mode() {
519            System.dealloc(ptr, layout);
520            return;
521        }
522
523        // Issue #1: Check if this allocation came from fallback
524        // Arena allocations are within known address ranges; fallback allocations are not
525        if let Some(arenas) = self.get_arenas() {
526            let ptr_addr = ptr as usize;
527            if !arenas.contains_address(ptr_addr) {
528                // This was a fallback allocation - free it via system allocator
529                System.dealloc(ptr, layout);
530            }
531        }
532
533        // For arena allocations, deallocation is a no-op.
534        // Memory is reclaimed by calling reset() on the arena.
535    }
536
537    #[inline(always)]
538    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
539        debug_assert!(!ptr.is_null());
540        debug_assert!(layout.size() > 0);
541        debug_assert!(new_size > 0);
542
543        let old_size = layout.size();
544
545        // If the new size is smaller or equal, just return the same pointer.
546        // (The bump allocator doesn't shrink.)
547        if new_size <= old_size {
548            return ptr;
549        }
550
551        // Allocate a new block
552        let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
553        let new_ptr = self.alloc(new_layout);
554
555        if new_ptr.is_null() {
556            return null_mut();
557        }
558
559        // Copy the old data
560        copy_nonoverlapping(ptr, new_ptr, old_size);
561
562        // Dealloc the old pointer (no-op for bump allocator, but semantically correct)
563        self.dealloc(ptr, layout);
564
565        new_ptr
566    }
567
568    #[inline(always)]
569    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
570        let ptr = self.alloc(layout);
571        if !ptr.is_null() {
572            // Note: mmap'd memory is already zeroed, but we zero anyway for
573            // recycled memory or if user specifically requested zeroed allocation.
574            std::ptr::write_bytes(ptr, 0, layout.size());
575        }
576        ptr
577    }
578}
579
580#[cfg(test)]
581mod tests {
582    use super::*;
583    use std::alloc::GlobalAlloc;
584
585    #[test]
586    fn test_global_alloc_api() {
587        let alloc = NAlloc::new();
588        let layout = Layout::from_size_align(1024, 8).unwrap();
589        unsafe {
590            let ptr = alloc.alloc(layout);
591            assert!(!ptr.is_null());
592            // Check that we can write to it
593            ptr.write(42);
594            assert_eq!(ptr.read(), 42);
595        }
596    }
597
598    #[test]
599    fn test_try_new() {
600        // This should succeed on any reasonable system
601        let result = NAlloc::try_new();
602        assert!(result.is_ok());
603
604        let alloc = result.unwrap();
605        assert!(alloc.is_initialized());
606        assert!(!alloc.is_fallback_mode());
607    }
608
609    #[test]
610    fn test_fallback_mode_detection() {
611        let alloc = NAlloc::new();
612        // Force initialization
613        let _ = alloc.stats();
614
615        // Should be initialized (not fallback) on a normal system
616        assert!(alloc.is_initialized() || alloc.is_fallback_mode());
617    }
618
619    #[test]
620    fn test_try_accessors() {
621        let alloc = NAlloc::new();
622
623        // These should return Some on a normal system
624        assert!(alloc.try_witness().is_some());
625        assert!(alloc.try_polynomial().is_some());
626        assert!(alloc.try_scratch().is_some());
627    }
628
629    #[test]
630    fn test_realloc() {
631        let alloc = NAlloc::new();
632        let layout = Layout::from_size_align(64, 8).unwrap();
633        unsafe {
634            let ptr = alloc.alloc(layout);
635            assert!(!ptr.is_null());
636
637            // Write some data
638            for i in 0..64 {
639                ptr.add(i).write(i as u8);
640            }
641
642            // Realloc to a larger size
643            let new_ptr = alloc.realloc(ptr, layout, 128);
644            assert!(!new_ptr.is_null());
645
646            // Verify data was copied
647            for i in 0..64 {
648                assert_eq!(new_ptr.add(i).read(), i as u8);
649            }
650        }
651    }
652
653    #[test]
654    fn test_alloc_zeroed() {
655        let alloc = NAlloc::new();
656        let layout = Layout::from_size_align(1024, 8).unwrap();
657        unsafe {
658            let ptr = alloc.alloc_zeroed(layout);
659            assert!(!ptr.is_null());
660
661            // Verify memory is zeroed
662            for i in 0..1024 {
663                assert_eq!(*ptr.add(i), 0);
664            }
665        }
666    }
667
668    #[test]
669    fn test_stats() {
670        let alloc = NAlloc::new();
671
672        // Trigger arena initialization with an allocation
673        let layout = Layout::from_size_align(1024, 8).unwrap();
674        unsafe {
675            let _ = alloc.alloc(layout);
676        }
677
678        let stats = alloc.stats();
679        assert!(stats.is_some());
680
681        let stats = stats.unwrap();
682        assert!(stats.scratch_used >= 1024);
683        assert!(stats.total_capacity() > 0);
684    }
685
686    #[test]
687    fn test_stats_or_default() {
688        let alloc = NAlloc::new();
689
690        // Should work even before initialization
691        let stats = alloc.stats_or_default();
692        // Just verify it doesn't panic
693        let _ = stats.total_capacity();
694    }
695
696    #[test]
697    fn test_large_allocation_routing() {
698        let alloc = NAlloc::new();
699
700        // Small allocation (<1MB) should go to scratch
701        let small_layout = Layout::from_size_align(1024, 8).unwrap();
702        unsafe {
703            let _ = alloc.alloc(small_layout);
704        }
705
706        let stats_after_small = alloc.stats().unwrap();
707        assert!(stats_after_small.scratch_used >= 1024);
708
709        // Large allocation (>1MB) should go to polynomial
710        let large_layout = Layout::from_size_align(2 * 1024 * 1024, 64).unwrap();
711        unsafe {
712            let _ = alloc.alloc(large_layout);
713        }
714
715        let stats_after_large = alloc.stats().unwrap();
716        assert!(stats_after_large.polynomial_used >= 2 * 1024 * 1024);
717    }
718
719    #[test]
720    fn test_drop_deallocates_arena_manager() {
721        // Verify that Drop runs without panic and actually frees the ArenaManager.
722        // If Drop is missing, valgrind/miri would catch the leak; here we test
723        // that drop_in_place + dealloc completes without UB or double-free.
724        {
725            let alloc = NAlloc::try_new().expect("NAlloc::try_new should succeed");
726            assert!(alloc.is_initialized());
727            // alloc drops here → Drop impl runs → ArenaManager is freed
728        }
729        // If we reach here without SIGSEGV / panic, the Drop impl is correct.
730        // Run a second init to confirm the heap is still healthy.
731        let alloc2 = NAlloc::try_new().expect("heap still healthy after previous drop");
732        assert!(alloc2.is_initialized());
733    }
734
735    #[test]
736    fn test_concurrent_init() {
737        use std::sync::Arc;
738        use std::thread;
739
740        let alloc = Arc::new(NAlloc::new());
741        let mut handles = vec![];
742
743        // Spawn multiple threads that try to initialize simultaneously
744        for _ in 0..8 {
745            let alloc = Arc::clone(&alloc);
746            handles.push(thread::spawn(move || {
747                let layout = Layout::from_size_align(64, 8).unwrap();
748                unsafe {
749                    let ptr = alloc.alloc(layout);
750                    assert!(!ptr.is_null());
751                }
752            }));
753        }
754
755        for h in handles {
756            h.join().unwrap();
757        }
758
759        // After all threads complete, should be in a consistent state
760        assert!(alloc.is_initialized() || alloc.is_fallback_mode());
761    }
762}