zombie_rs/
runtime.rs

1//! Runtime - Global state manager for zombie computations.
2//!
3//! The global runtime manages:
4//! - Current tock (logical time)
5//! - Timeline (context tree / tock tree)
6//! - Eviction heap (GD algorithm)
7//! - Record stack
8//! - Replay stack
9
10// Allow Box<HashMap> because Option<Box<HashMap>> is 8 bytes when None,
11// while Option<HashMap> is 48 bytes (HashMap has no niche optimization).
12#![allow(clippy::box_collection)]
13
14use std::cell::RefCell;
15use std::collections::HashMap;
16use std::rc::{Rc, Weak};
17
18use crate::config::ZombieConfig;
19use crate::context::{ContextNode, EZombieNode, FullContext, ZombieNode};
20use crate::heap::{GDHeap, HeapIndexNotify};
21use crate::meter::ZombieMeter;
22use crate::record::{Record, Replayer};
23use crate::splay_list::SplayList;
24use crate::tock::Tock;
25use crate::union_find::UFSet;
26
27/// Result type for `find_context_and_value`: (context_start_tock, context_ref, typed_node).
28/// This combines the context lookup result with the downcast zombie node for efficient caching.
29pub type ContextLookupResult<'a, T> = (Tock, &'a Rc<dyn ContextNode>, Rc<ZombieNode<T>>);
30
31/// Replay state for tracking recomputation target.
32pub struct ReplayState {
33    pub target: Tock,
34    pub result: Rc<RefCell<Option<Rc<dyn EZombieNode>>>>,
35}
36
37impl ReplayState {
38    pub fn new(target: Tock, result: Rc<RefCell<Option<Rc<dyn EZombieNode>>>>) -> Self {
39        Self { target, result }
40    }
41
42    pub fn has_result(&self) -> bool {
43        self.result.borrow().is_some()
44    }
45
46    pub fn set_result(&self, node: Rc<dyn EZombieNode>) {
47        *self.result.borrow_mut() = Some(node);
48    }
49
50    pub fn take_result(&self) -> Option<Rc<dyn EZombieNode>> {
51        self.result.borrow_mut().take()
52    }
53}
54
55/// Handle for evictable context in the heap.
56///
57/// Implements `HeapIndexNotify` to track its position in the heap,
58/// enabling O(log n) touch operations via `pool_index`.
59struct EvictHandle {
60    context: Rc<FullContext>,
61}
62
63impl HeapIndexNotify for EvictHandle {
64    fn notify_index_changed(&self, new_index: usize) {
65        self.context.set_pool_index(new_index);
66    }
67
68    fn notify_removed(&self) {
69        self.context.clear_pool_index();
70    }
71}
72
73impl PartialEq for EvictHandle {
74    fn eq(&self, other: &Self) -> bool {
75        Rc::ptr_eq(&self.context, &other.context)
76    }
77}
78
79impl Eq for EvictHandle {}
80
81impl PartialOrd for EvictHandle {
82    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
83        Some(self.cmp(other))
84    }
85}
86
87impl Ord for EvictHandle {
88    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
89        self.context.start_tock().cmp(&other.context.start_tock())
90    }
91}
92
93/// Global state manager for the Zombie runtime.
94pub struct Runtime {
95    config: ZombieConfig,
96
97    /// Current logical time.
98    current_tock: Tock,
99
100    /// Timeline tree mapping tocks to contexts.
101    timeline: SplayList<Tock, Rc<dyn ContextNode>>,
102
103    /// Eviction heap using GD algorithm.
104    heap: GDHeap<EvictHandle>,
105
106    /// Record stack.
107    ///
108    /// # Invariant
109    ///
110    /// This stack is NEVER empty after initialization. It always contains at least
111    /// the `RootRecord` at the bottom. This invariant is established in `new()` and
112    /// maintained by all operations. Methods like `current_record()` rely on this
113    /// invariant and use `unwrap()` knowing it cannot fail in correctly initialized state.
114    records: Vec<Record>,
115
116    /// Replay stack.
117    replays: Vec<ReplayState>,
118
119    /// Time meter.
120    meter: ZombieMeter,
121
122    /// Total space used by evictable contexts (O(1) tracking instead of O(N) iteration).
123    total_space_used: usize,
124
125    /// Backedges map: stores backedges for each context (keyed by start_tock).
126    /// Only allocated when memory_limit is set (eviction enabled).
127    /// Using Option<Box<...>> ensures None variant is only 8 bytes (pointer size).
128    backedges: Option<Box<HashMap<Tock, UFSet>>>,
129}
130
131thread_local! {
132    static RUNTIME: RefCell<Option<Runtime>> = const { RefCell::new(None) };
133}
134
135impl Runtime {
136    /// Create a new Runtime with default config.
137    pub fn new() -> Self {
138        Self::with_config(ZombieConfig::default())
139    }
140
141    /// Create with custom config.
142    ///
143    /// Note: This also initializes UFArena if not already done.
144    /// This ensures Runtime::new() can be used directly in tests.
145    pub fn with_config(config: ZombieConfig) -> Self {
146        // Ensure UFArena is initialized (idempotent)
147        if !crate::union_find::UFArena::is_initialized() {
148            crate::union_find::UFArena::init();
149        }
150
151        // Initialize with root record at tock 0
152        let root_record = Record::new_root(Tock(0));
153
154        // Default replay state with MAX target (no active replay)
155        let default_replay = ReplayState::new(
156            Tock::MAX,
157            Rc::new(RefCell::new(None)),
158        );
159
160        // Only create backedges map if eviction is enabled
161        let backedges = if config.memory_limit.is_some() {
162            Some(Box::new(HashMap::new()))
163        } else {
164            None
165        };
166
167        // Create GDHeap with config's approx_factor and staleness
168        let heap = GDHeap::with_config(config.approx_factor, config.staleness);
169
170        Self {
171            config,
172            current_tock: Tock(1), // Start at 1 like C++
173            timeline: SplayList::new(),
174            heap,
175            records: vec![root_record],
176            replays: vec![default_replay],
177            meter: ZombieMeter::new(),
178            total_space_used: 0,
179            backedges,
180        }
181    }
182
183    /// Initialize thread-local Runtime.
184    ///
185    /// # Panics
186    ///
187    /// This function does not panic. If already initialized, the old state
188    /// is replaced (useful for test isolation).
189    pub fn init() {
190        // Initialize UFArena for GhostCell-based Union-Find
191        if !crate::union_find::UFArena::is_initialized() {
192            crate::union_find::UFArena::init();
193        }
194        RUNTIME.with(|t| {
195            *t.borrow_mut() = Some(Runtime::new());
196        });
197    }
198
199    /// Initialize with custom config.
200    ///
201    /// # Panics
202    ///
203    /// This function does not panic. If already initialized, the old state
204    /// is replaced (useful for test isolation).
205    pub fn init_with_config(config: ZombieConfig) {
206        // Initialize UFArena for GhostCell-based Union-Find
207        if !crate::union_find::UFArena::is_initialized() {
208            crate::union_find::UFArena::init();
209        }
210        RUNTIME.with(|t| {
211            *t.borrow_mut() = Some(Runtime::with_config(config));
212        });
213    }
214
215    /// Check if Runtime is initialized on the current thread.
216    #[inline]
217    pub fn is_initialized() -> bool {
218        RUNTIME.with(|t| t.borrow().is_some())
219    }
220
221    /// Access thread-local Runtime (immutable).
222    ///
223    /// # Panics
224    ///
225    /// Panics if `Runtime::init()` has not been called on this thread.
226    #[inline]
227    pub fn with<R, F: FnOnce(&Runtime) -> R>(f: F) -> R {
228        RUNTIME.with(|t| {
229            let guard = t.borrow();
230            let trailokya = guard.as_ref().expect(
231                "Runtime not initialized. Call Runtime::init() before using Zombie."
232            );
233            f(trailokya)
234        })
235    }
236
237    /// Access thread-local Runtime (mutable).
238    ///
239    /// # Panics
240    ///
241    /// Panics if `Runtime::init()` has not been called on this thread.
242    #[inline]
243    pub fn with_mut<R, F: FnOnce(&mut Runtime) -> R>(f: F) -> R {
244        RUNTIME.with(|t| {
245            let mut guard = t.borrow_mut();
246            let trailokya = guard.as_mut().expect(
247                "Runtime not initialized. Call Runtime::init() before using Zombie."
248            );
249            f(trailokya)
250        })
251    }
252
253    /// Try to access thread-local Runtime without panicking.
254    ///
255    /// Returns `None` if not initialized.
256    #[inline]
257    pub fn try_with<R, F: FnOnce(&Runtime) -> R>(f: F) -> Option<R> {
258        RUNTIME.with(|t| {
259            let guard = t.borrow();
260            guard.as_ref().map(f)
261        })
262    }
263
264    /// Try to access thread-local Runtime mutably without panicking.
265    ///
266    /// Returns `None` if not initialized.
267    #[inline]
268    pub fn try_with_mut<R, F: FnOnce(&mut Runtime) -> R>(f: F) -> Option<R> {
269        RUNTIME.with(|t| {
270            let mut guard = t.borrow_mut();
271            guard.as_mut().map(f)
272        })
273    }
274
275    // ========== Config Queries ==========
276
277    /// Check if memory_limit is set (eviction enabled).
278    ///
279    /// This is a simple field access, used to skip expensive operations
280    /// when eviction is disabled.
281    #[inline]
282    pub fn has_memory_limit(&self) -> bool {
283        self.config.memory_limit.is_some()
284    }
285
286    // ========== Tock Management ==========
287
288    /// Allocate a tock and return the old value (post-increment).
289    #[inline]
290    pub fn tick(&mut self) -> Tock {
291        let old = self.current_tock;
292        self.current_tock = Tock(self.current_tock.0 + 1);
293        old
294    }
295
296    /// Get current tock without advancing.
297    #[inline]
298    pub fn current_tock(&self) -> Tock {
299        self.current_tock
300    }
301
302    /// Set current tock (for replay).
303    pub fn set_tock(&mut self, tock: Tock) {
304        self.current_tock = tock;
305    }
306
307    // ========== Timeline (Context Tree) ==========
308
309    /// Find context containing a tock (with its start key).
310    pub fn find_context_with_key(&mut self, tock: Tock) -> Option<(Tock, &Rc<dyn ContextNode>)> {
311        self.timeline.find_le_with_key(&tock).map(|(k, v)| (*k, v))
312    }
313
314    /// Find context and typed value for a given tock.
315    /// Returns (context_start_tock, context_ref, typed_node) if found.
316    /// This is an optimized version that returns the context for caching during bind setup.
317    pub fn find_context_and_value<T: 'static + Clone + crate::meter::ZombieOps>(
318        &mut self,
319        tock: Tock,
320    ) -> Option<ContextLookupResult<'_, T>> {
321        let (ctx_start, ctx) = self.timeline.find_le_with_key(&tock)?;
322        let ctx_start = *ctx_start;
323
324        // Compute index within context
325        let idx = crate::tock::try_tock_to_index(tock, ctx_start)?;
326
327        // Verify tock is within this context's range
328        if tock >= ctx.end_tock() {
329            return None;
330        }
331
332        // Get the value at this index
333        let node = ctx.get_value_at(idx)?;
334
335        // Downcast from type-erased to typed node
336        let typed_rc = crate::context::downcast_zombie::<T>(&node)?;
337
338        Some((ctx_start, ctx, typed_rc))
339    }
340
341    /// Finds the context to replay from for a given target tock.
342    ///
343    /// Returns the context that can replay to recreate the target value.
344    /// There are two cases:
345    /// 1. Target is in a context that still exists: use that context's start_tock
346    /// 2. Target was in a context that was deleted (evicted): use predecessor's end_tock
347    ///
348    /// Returns (replay_from_tock, context) for replay initialization.
349    pub fn find_replay_context(&mut self, target_tock: Tock) -> Option<(Tock, &Rc<dyn ContextNode>)> {
350        // Find the context containing or preceding the target tock
351        let ctx = self.timeline.find_le(&target_tock)?;
352
353        // Case 1: Context still contains target [start, end) where end > target
354        if ctx.end_tock() > target_tock {
355            // Context exists and contains target - replay from its start
356            return Some((ctx.start_tock(), ctx));
357        }
358
359        // Case 2: Context ends before or at target (end <= target)
360        // This means target was in a context that was deleted (evicted from timeline)
361        // We need to replay from this context's end_tock
362        // The replayer must exist for replay to work
363        if ctx.get_replayer().is_some() {
364            return Some((ctx.end_tock(), ctx));
365        }
366
367        // Fallback: no replayer available (e.g., RootContext)
368        None
369    }
370
371    /// Insert a context.
372    pub fn insert_context(&mut self, start: Tock, ctx: Rc<dyn ContextNode>) {
373        self.timeline.insert(start, ctx);
374    }
375
376    /// Remove a context.
377    pub fn remove_context(&mut self, start: Tock) {
378        self.timeline.remove(&start);
379    }
380
381    /// Look up a zombie node at a specific tock.
382    ///
383    /// This is a common pattern used in multiple places:
384    /// 1. Find context containing the tock
385    /// 2. Compute index within context
386    /// 3. Verify tock is within range
387    /// 4. Get the value
388    ///
389    /// Returns None if tock is not in any context or context doesn't contain value.
390    #[inline]
391    fn lookup_node_at(&mut self, tock: Tock) -> Option<Rc<dyn EZombieNode>> {
392        let (ctx_start, ctx) = self.timeline.find_le_with_key(&tock)?;
393        let idx = crate::tock::try_tock_to_index(tock, *ctx_start)?;
394        if tock < ctx.end_tock() {
395            ctx.get_value_at(idx)
396        } else {
397            None
398        }
399    }
400
401    // ========== Record Stack ==========
402
403    /// Get current record (top of record stack).
404    ///
405    /// # Safety Invariant
406    ///
407    /// Uses `unwrap()` because the record stack is guaranteed non-empty after
408    /// initialization. See the documentation on the `records` field.
409    #[inline]
410    pub fn current_record(&self) -> &Record {
411        self.records.last().unwrap()
412    }
413
414    /// Get current record mutably.
415    ///
416    /// # Safety Invariant
417    ///
418    /// Uses `unwrap()` because the record stack is guaranteed non-empty after
419    /// initialization. See the documentation on the `records` field.
420    #[inline]
421    pub fn current_record_mut(&mut self) -> &mut Record {
422        self.records.last_mut().unwrap()
423    }
424
425    /// Push a HeadRecord for a new computation.
426    pub fn push_head_record(&mut self, replayer: Rc<Replayer>, start_tock: Tock) {
427        let time = self.meter.time();
428        self.records.push(Record::new_head(replayer, start_tock, time));
429    }
430
431    /// Check if current record is a HeadRecord (tailcall context)
432    pub fn is_current_record_tailcall(&self) -> bool {
433        self.records.last().is_some_and(|r| r.is_tailcall())
434    }
435
436    /// Suspend current record (creates context).
437    /// Returns true if this was a tailcall (HeadRecord suspend).
438    pub fn suspend_record(&mut self, replayer: Rc<Replayer>) -> bool {
439        let current_tock = self.current_tock;
440        let is_tailcall = self.is_current_record_tailcall();
441        let record = self.records.last_mut().unwrap();
442
443        // Create heap push closure for HeadRecord suspend (adds FullContext to heap)
444        let heap = &mut self.heap;
445        let metric = self.config.metric;
446        let total_space_used = &mut self.total_space_used;
447
448        let mut heap_push_fn = |ctx: Rc<FullContext>| {
449            *total_space_used += ctx.space().as_bytes();
450            let cost = ctx.cost(metric);
451            heap.push_notify(EvictHandle { context: ctx }, cost);
452        };
453
454        // Create timeline insert closure
455        let timeline = &mut self.timeline;
456        let mut insert_fn = |start: Tock, ctx: Rc<dyn ContextNode>| {
457            timeline.insert(start, ctx);
458        };
459
460        // Suspend record - creates RootContext or FullContext
461        // HeadRecord suspend saves state to FullContext for potential eviction
462        let backedge_info = record.suspend(replayer, current_tock, &mut insert_fn, Some(&mut heap_push_fn));
463
464        // Register backedges if memory_limit is set (eviction enabled).
465        // Uses timeline lookup with splay tree locality - recently accessed contexts
466        // are near the root, making lookups nearly O(1).
467        // Backedges are stored in Runtime's HashMap, not in FullContext.
468        if let Some(ref mut backedges_map) = self.backedges
469            && let Some((deps, forward_uf)) = backedge_info
470        {
471            for dep_tock in deps.iter() {
472                if let Some((dep_start, _dep_ctx)) = self.timeline.find_le_with_key(dep_tock) {
473                    // Store backedge using dependency's start_tock as key
474                    backedges_map
475                        .entry(*dep_start)
476                        .or_insert_with(UFSet::new)
477                        .insert(forward_uf);
478                }
479            }
480        }
481
482        is_tailcall
483    }
484
485    /// Replace current record with a new HeadRecord (for tailcall semantics).
486    /// This is used when an inner bind happens inside a HeadRecord.
487    pub fn replace_head_record(&mut self, replayer: Rc<Replayer>, start_tock: Tock) {
488        let time = self.meter.time();
489        // Pop the old HeadRecord (which was already suspended/completed)
490        self.records.pop();
491        // Push the new HeadRecord
492        self.records.push(Record::new_head(replayer, start_tock, time));
493    }
494
495    /// Finish record with result (creates ValueRecord).
496    /// Returns true if actually finished, false if record was already replaced by tailcall.
497    pub fn finish_record(&mut self, result_tock: Tock) -> bool {
498        // Check if current record is a HeadRecord.
499        // If not, our HeadRecord was replaced by an inner bind's tailcall.
500        if !self.records.last().is_some_and(|r| r.is_head()) {
501            return false;
502        }
503
504        // Complete current record
505        let current_tock = self.current_tock;
506        let metric = self.config.metric;
507
508        let record = self.records.last_mut().unwrap();
509
510        let forward_uf_result = {
511            let timeline = &mut self.timeline;
512            let heap = &mut self.heap;
513            let total_space_used = &mut self.total_space_used;
514
515            let mut insert_fn = |start: Tock, ctx: Rc<dyn ContextNode>| {
516                timeline.insert(start, ctx);
517            };
518
519            let mut heap_push_fn = |ctx: Rc<FullContext>| {
520                // Track space for O(1) total_space queries
521                *total_space_used += ctx.space().as_bytes();
522                let cost = ctx.cost(metric);
523                heap.push_notify(EvictHandle { context: ctx }, cost);
524            };
525
526            record.complete(current_tock, None, &mut insert_fn, &mut heap_push_fn)
527        };
528
529        // Only register backedges if memory_limit is set (eviction is enabled).
530        // Uses timeline lookup with splay tree locality for O(1) amortized performance.
531        // Backedges are stored in Runtime's HashMap, not in FullContext.
532        if let Some(ref mut backedges_map) = self.backedges
533            && let Some((deps, new_ctx_forward_uf)) = forward_uf_result
534        {
535            for dep_tock in deps.iter() {
536                if let Some((dep_start, _dep_ctx)) = self.timeline.find_le_with_key(dep_tock) {
537                    // Store backedge using dependency's start_tock as key
538                    backedges_map
539                        .entry(*dep_start)
540                        .or_insert_with(UFSet::new)
541                        .insert(new_ctx_forward_uf);
542                }
543            }
544        }
545
546        // Capture the start_tock of the just-created context before replacing record
547        let context_start_tock = record.start_tock();
548
549        // Replace with ValueRecord (includes context_start_tock for eviction protection)
550        self.records.pop();
551        self.records.push(Record::new_value(result_tock, context_start_tock));
552
553        // During replay: explicitly set the result node for the replay target.
554        // This is necessary because nested binds create zombies with new tocks during replay
555        // that don't match the original target.
556        if self.replays.len() > 1
557            && result_tock != Tock::MAX
558            && let Some(node) = self.lookup_node_at(result_tock)
559        {
560            self.set_replay_result(node);
561        }
562
563        // Check memory limit and evict if necessary.
564        self.maybe_evict_with_protection(Some(context_start_tock));
565
566        true
567    }
568
569    /// Get a weak reference to a zombie node by its tock.
570    pub fn get_weak(&mut self, tock: Tock) -> Option<Weak<dyn EZombieNode>> {
571        self.lookup_node_at(tock).map(|rc| Rc::downgrade(&rc))
572    }
573
574    /// Pop value record and resume parent, return result tock.
575    pub fn pop_value_record(&mut self) -> Tock {
576        let value_record = self.records.pop().unwrap();
577        let result_tock = value_record.get_result_tock().expect("Expected ValueRecord");
578        let context_start_tock = value_record.get_context_start_tock();
579
580        // Resume parent record
581        let new_tock = self.tick();
582        if let Some(parent) = self.records.last_mut() {
583            parent.resume(new_tock);
584        }
585
586        // Check memory limit and evict if necessary.
587        // The context containing the result is protected from eviction because
588        // the Zombie hasn't been fully returned yet - its ptr_cache (a Weak reference)
589        // would become invalid if the context is evicted during nested bind operations.
590        self.maybe_evict_with_protection(context_start_tock);
591
592        result_tock
593    }
594
595    /// Prepare replay for current record.
596    pub fn prepare_replay_current_record(
597        &mut self,
598    ) -> (Rc<Replayer>, crate::record::ReplayInputs) {
599        let record = self.records.last_mut().unwrap();
600
601        // Create input getter
602        let timeline = &mut self.timeline;
603        let mut get_input = |tock: Tock| -> Option<Rc<dyn EZombieNode>> {
604            let (ctx_start, ctx) = timeline.find_le_with_key(&tock)?;
605            // Use try_tock_to_index which safely handles boundary cases
606            let idx = crate::tock::try_tock_to_index(tock, *ctx_start)?;
607            if tock < ctx.end_tock() {
608                ctx.get_value_at(idx)
609            } else {
610                None
611            }
612        };
613
614        record.prepare_replay(&mut get_input)
615    }
616
617    /// Get replay inputs for a replayer without marking as played.
618    /// Used when resuming from a nested replay (WaitingForInput state).
619    pub fn get_replay_inputs_for_replayer(
620        &mut self,
621        replayer: &Rc<Replayer>,
622    ) -> crate::record::ReplayInputs {
623        let mut inputs = crate::record::ReplayInputs::new();
624        for &input_tock in replayer.inputs.iter() {
625            let node = self.lookup_node_at(input_tock);
626            inputs.push((input_tock, node));
627        }
628        inputs
629    }
630
631    /// Pop record after replay completes (should be empty HeadRecord).
632    pub fn pop_record_after_replay(&mut self) {
633        let record = self.records.pop().unwrap();
634        debug_assert!(record.values().is_empty(), "Record should be empty after replay");
635    }
636
637    /// Register a dependency on the current record.
638    pub fn register_dependency(&mut self, tock: Tock) {
639        self.current_record_mut().register_dependency(tock);
640    }
641
642    // ========== Replay Stack ==========
643
644    /// Push replay state.
645    pub fn push_replay(&mut self, target: Tock, result: Rc<RefCell<Option<Rc<dyn EZombieNode>>>>) {
646        self.replays.push(ReplayState::new(target, result));
647    }
648
649    /// Pop replay state and return result.
650    pub fn pop_replay(&mut self) -> Option<Rc<dyn EZombieNode>> {
651        self.replays.pop().and_then(|state| state.take_result())
652    }
653
654    /// Check if we've reached current replay target.
655    pub fn at_replay_target(&self, tock: Tock) -> bool {
656        self.replays.last().is_some_and(|r| r.target == tock)
657    }
658
659    /// Set replay result.
660    pub fn set_replay_result(&self, node: Rc<dyn EZombieNode>) {
661        if let Some(replay) = self.replays.last() {
662            replay.set_result(node);
663        }
664    }
665
666    /// Check if current replay has result.
667    pub fn replay_has_result(&self) -> bool {
668        self.replays.last().is_none_or(|r| r.has_result())
669    }
670
671    /// Get current replay target.
672    pub fn current_replay_target(&self) -> Tock {
673        self.replays.last().map_or(Tock::MAX, |r| r.target)
674    }
675
676    /// Replay depth (1 = not replaying).
677    pub fn replay_depth(&self) -> usize {
678        self.replays.len()
679    }
680
681    // ========== Unroll Optimization ==========
682
683    /// Check if we should unroll (skip creating context).
684    ///
685    /// C++ implementation:
686    /// ```cpp
687    /// if (t.records.back()->is_tailcall() && t.current_tock - t.records.back()->t < unroll_factor) {
688    ///     // unroll
689    /// }
690    /// ```
691    ///
692    /// We only unroll when:
693    /// 1. Current record is a tailcall (HeadRecord)
694    /// 2. The tock delta is within unroll_factor
695    #[inline]
696    pub fn should_unroll(&self) -> bool {
697        let record = self.current_record();
698        // Only unroll in tailcall context (HeadRecord)
699        if !record.is_tailcall() {
700            return false;
701        }
702        let tock_delta = (self.current_tock.0 - record.start_tock().0).max(0) as usize;
703        tock_delta < self.config.unroll_factor
704    }
705
706    /// Get unroll factor from config.
707    pub fn unroll_factor(&self) -> usize {
708        self.config.unroll_factor
709    }
710
711    // ========== GD Touch ==========
712
713    /// Touch a context in the GD heap to update its priority - O(log n).
714    ///
715    /// This implements the "access recency" part of the Greedy-Dual algorithm.
716    /// When a context is accessed (its values read), we touch it to delay eviction.
717    ///
718    /// Uses `pool_index` stored in FullContext for O(1) lookup + O(log n) rebalance,
719    /// matching the C++ implementation's performance characteristics.
720    ///
721    /// # Performance
722    ///
723    /// - O(log n) using pool_index (like C++ implementation)
724    /// - Only called when `memory_limit` is set (eviction enabled)
725    /// - Only called from slow path (cache miss)
726    #[inline]
727    pub fn touch_context(&mut self, pool_index: usize) {
728        // Update last_accessed timestamp (C++ does this in accessed())
729        if let Some(handle) = self.heap.get_at(pool_index) {
730            handle.context.accessed();
731        }
732
733        // C++ touch() only updates L_at_insert, doesn't recalculate cost
734        // Matching C++ behavior exactly - cost is only recalculated during eviction (pop)
735        self.heap.touch_at(pool_index);
736    }
737
738    // ========== Full Cost Calculation ==========
739
740    /// Compute the complete time cost for a context, matching C++ implementation.
741    ///
742    /// C++ `FullContextNode::time_cost()` includes:
743    /// 1. time_taken (base computation cost)
744    /// 2. forward_uf.value() (propagated forward cost)
745    /// 3. backward_uf.value() (propagated backward cost)
746    /// 4. backedges.sum(counted) (cost from contexts depending on this one)
747    /// 5. Each dependency's backward_uf (cost from dependencies)
748    /// 6. Parent context's backward_uf
749    ///
750    /// All UF values are deduplicated using a HashSet to avoid double-counting
751    /// merged sets.
752    ///
753    /// Note: Requires &mut self because SplayList::find_le_with_key performs splay operations.
754    fn compute_full_time_cost(&mut self, ctx: &FullContext) -> crate::meter::Time {
755        use std::collections::HashSet;
756        use crate::union_find::UF;
757
758        let mut counted: HashSet<UF> = HashSet::new();
759        let mut cost = ctx.time_taken();
760
761        // 1. Add forward_uf value (if not already counted)
762        let forward_uf = *ctx.forward_uf();
763        if counted.insert(forward_uf) {
764            cost = cost + forward_uf.value();
765        }
766
767        // 2. Add backward_uf value (if not already counted)
768        let backward_uf = *ctx.backward_uf();
769        if counted.insert(backward_uf) {
770            cost = cost + backward_uf.value();
771        }
772
773        // 3. Add backedges sum (contexts that depend on this one)
774        if let Some(ref mut backedges_map) = self.backedges
775            && let Some(backedges) = backedges_map.get_mut(&ctx.start_tock())
776        {
777            cost = cost + backedges.sum_excluding(&mut counted);
778        }
779
780        // 4. Add each dependency's backward_uf
781        // Clone dependencies to avoid borrow issues with self.timeline
782        let dependencies: Vec<_> = ctx.dependencies().to_vec();
783        for dep_tock in dependencies {
784            if let Some((_, dep_ctx)) = self.timeline.find_le_with_key(&dep_tock)
785                && let Some(full_ctx) = dep_ctx.as_any().downcast_ref::<FullContext>()
786            {
787                let dep_backward_uf = *full_ctx.backward_uf();
788                if counted.insert(dep_backward_uf) {
789                    cost = cost + dep_backward_uf.value();
790                }
791            }
792        }
793
794        // 5. Add parent context's backward_uf
795        let parent_tock = crate::tock::Tock(ctx.start_tock().0 - 1);
796        if parent_tock >= crate::tock::Tock(0)
797            && let Some((_, parent_ctx)) = self.timeline.find_le_with_key(&parent_tock)
798            && let Some(full_ctx) = parent_ctx.as_any().downcast_ref::<FullContext>()
799        {
800            let parent_backward_uf = *full_ctx.backward_uf();
801            if counted.insert(parent_backward_uf) {
802                cost = cost + parent_backward_uf.value();
803            }
804        }
805
806        cost
807    }
808
809    /// Compute full eviction cost using the metric function.
810    ///
811    /// This is the complete cost calculation matching C++ behavior.
812    /// Note: Requires &mut self because it calls compute_full_time_cost.
813    #[allow(dead_code)]  // Available for testing/debugging
814    fn compute_full_cost(&mut self, ctx: &FullContext) -> crate::config::Cost {
815        let time_cost = self.compute_full_time_cost(ctx);
816        (self.config.metric)(ctx.time_taken(), time_cost, ctx.space())
817    }
818
819    // ========== Eviction ==========
820
821    /// Evict one context from the heap.
822    /// `protected_tock` specifies a context start_tock that should be skipped.
823    pub fn evict_one_with_protection(&mut self, protected_tock: Option<Tock>) -> bool {
824        let metric = self.config.metric;
825
826        // Pop contexts from heap, skipping protected one
827        // Track if we've already skipped the protected context once
828        // to prevent infinite loop when it's the only context in the heap
829        let mut skipped_protected = false;
830
831        let result = loop {
832            let handle = match self.heap.pop_with_cost_check_notify(|h| h.context.cost(metric)) {
833                Some(h) => h,
834                None => break None,
835            };
836
837            // Check if this context is protected
838            if let Some(pt) = protected_tock
839                && handle.context.start_tock() == pt
840            {
841                // If we've already skipped this context once, it means it's the only
842                // context in the heap. Return None to avoid infinite loop.
843                if skipped_protected {
844                    // Put it back and return None - can't evict anything
845                    let cost = handle.context.cost(metric);
846                    self.heap.push_notify(handle, cost);
847                    break None;
848                }
849                // First time seeing this protected context - skip it
850                skipped_protected = true;
851                let cost = handle.context.cost(metric);
852                self.heap.push_notify(handle, cost);
853                continue;
854            }
855
856            break Some(handle);
857        };
858
859        if let Some(handle) = result {
860            // Decrease total space used (before evict clears the space)
861            let space_freed = handle.context.space().as_bytes();
862            self.total_space_used = self.total_space_used.saturating_sub(space_freed);
863
864            // Create UF with this context's time cost for propagation
865            let cost_uf = crate::union_find::UF::new(handle.context.time_taken());
866
867            // Propagate cost to dependencies' backward_uf
868            // When evicting A, all contexts that A depends on get increased backward cost
869            // (because replaying A will need to access them)
870            for &dep_tock in handle.context.dependencies() {
871                if let Some((_, dep_ctx)) = self.timeline.find_le_with_key(&dep_tock)
872                    && let Some(full_ctx) = dep_ctx.as_any().downcast_ref::<FullContext>()
873                {
874                    full_ctx.backward_uf().merge(&cost_uf);
875                }
876            }
877
878            // Propagate cost to backedges' forward_uf (O(1) merge operation)
879            // When evicting A, all contexts that depend on A get increased forward cost.
880            // Backedges are stored in Runtime's HashMap.
881            let ctx_start = handle.context.start_tock();
882            if let Some(ref backedges_map) = self.backedges
883                && let Some(backedges) = backedges_map.get(&ctx_start)
884            {
885                backedges.merge_all(&cost_uf);
886            }
887
888            // Propagate cost to parent context's backward_uf
889            let parent_tock = crate::tock::Tock(handle.context.start_tock().0 - 1);
890            if parent_tock >= crate::tock::Tock(0)
891                && let Some((_, parent_ctx)) = self.timeline.find_le_with_key(&parent_tock)
892                && let Some(full_ctx) = parent_ctx.as_any().downcast_ref::<FullContext>()
893            {
894                full_ctx.backward_uf().merge(&cost_uf);
895            }
896
897            // Evict the context itself (clears values and merges internal UFs)
898            handle.context.evict();
899
900            // Remove backedges for the evicted context
901            if let Some(ref mut backedges_map) = self.backedges {
902                backedges_map.remove(&ctx_start);
903            }
904
905            // Remove from timeline to avoid metadata leaks (O(N) memory).
906            // Replay will find the parent context via find_le_with_key.
907            self.remove_context(handle.context.start_tock());
908            true
909        } else {
910            false
911        }
912    }
913
914    /// Get total space used by all contexts in the heap (O(1)).
915    pub fn total_space(&self) -> usize {
916        self.total_space_used
917    }
918
919    /// Check memory limit and evict if necessary.
920    /// This is called after creating new zombie values.
921    #[inline]
922    pub fn maybe_evict(&mut self) {
923        self.maybe_evict_with_protection(None);
924    }
925
926    /// Check memory limit and evict if necessary, with a protected context.
927    /// The context with start_tock = protected_tock will not be evicted.
928    pub fn maybe_evict_with_protection(&mut self, protected_tock: Option<Tock>) {
929
930        if let Some(limit) = self.config.memory_limit {
931            while self.total_space_used > limit && self.evict_one_with_protection(protected_tock) {
932                // Keep evicting until under limit or no more to evict
933            }
934        }
935    }
936
937    /// Evict one context (convenience wrapper).
938    #[inline]
939    pub fn evict_one(&mut self) -> bool {
940        self.evict_one_with_protection(None)
941    }
942
943    // ========== Meter ==========
944
945    /// Get meter reference.
946    pub fn meter(&self) -> &ZombieMeter {
947        &self.meter
948    }
949
950    /// Get meter mutable reference.
951    pub fn meter_mut(&mut self) -> &mut ZombieMeter {
952        &mut self.meter
953    }
954
955    // ========== Stats ==========
956
957    /// Number of contexts in tock tree.
958    pub fn context_count(&self) -> usize {
959        self.timeline.len()
960    }
961
962    /// Estimated metadata memory usage in bytes.
963    ///
964    /// This includes:
965    /// - Contexts (FullContext, RootContext) with their SmallVecs
966    /// - GD heap entries
967    /// - Tock tree (SplayList) nodes
968    ///
969    /// Does NOT include user data (the actual Zombie values).
970    pub fn metadata_bytes(&self) -> usize {
971        use std::mem::size_of;
972
973        // Use actual struct sizes - SmallVec vs Vec difference is reflected here
974        let context_size = size_of::<FullContext>(); // Includes SmallVec inline storage
975        let context_overhead = self.timeline.len() * context_size;
976
977        // Heap entry: (Cost, Weak<FullContext>) ≈ 24 bytes
978        let heap_overhead = self.heap.len() * 24;
979
980        // SplayList node overhead (ptr + key + value ref)
981        let splay_overhead = self.timeline.len() * 48;
982
983        // Record: RecordNode enum with Replayer reference
984        let record_overhead = self.records.len() * 64;
985
986        context_overhead + heap_overhead + splay_overhead + record_overhead
987    }
988
989    /// Number of entries in the GD heap.
990    pub fn heap_len(&self) -> usize {
991        self.heap.len()
992    }
993}
994
995impl Default for Runtime {
996    fn default() -> Self {
997        Self::new()
998    }
999}
1000
1001#[cfg(test)]
1002mod tests {
1003    use super::*;
1004
1005    #[test]
1006    fn test_tick() {
1007        let mut t = Runtime::new();
1008        assert_eq!(t.tick(), Tock(1));
1009        assert_eq!(t.tick(), Tock(2));
1010        assert_eq!(t.current_tock(), Tock(3));
1011    }
1012
1013    #[test]
1014    fn test_thread_local() {
1015        Runtime::init();
1016        Runtime::with_mut(|t| {
1017            assert_eq!(t.tick(), Tock(1));
1018        });
1019        Runtime::with(|t| {
1020            assert_eq!(t.current_tock(), Tock(2));
1021        });
1022    }
1023
1024    #[test]
1025    fn test_replay_depth() {
1026        let t = Runtime::new();
1027        assert_eq!(t.replay_depth(), 1); // Default replay state
1028    }
1029
1030    // =========================================================================
1031    // Edge Case Tests
1032    // =========================================================================
1033
1034    /// Test that empty timeline operations don't panic
1035    #[test]
1036    fn test_empty_timeline_operations() {
1037        let mut runtime = Runtime::new();
1038
1039        // lookup_node_at on empty timeline
1040        assert!(runtime.lookup_node_at(Tock(0)).is_none());
1041        assert!(runtime.lookup_node_at(Tock(100)).is_none());
1042
1043        // find_context_with_key on empty timeline
1044        assert!(runtime.find_context_with_key(Tock(0)).is_none());
1045
1046        // find_replay_context on empty timeline
1047        assert!(runtime.find_replay_context(Tock(0)).is_none());
1048
1049        // get_weak on empty timeline
1050        assert!(runtime.get_weak(Tock(0)).is_none());
1051
1052        // remove_context on empty timeline doesn't panic
1053        runtime.remove_context(Tock(0));
1054
1055        // Stats
1056        assert_eq!(runtime.context_count(), 0);
1057        assert_eq!(runtime.total_space(), 0);
1058    }
1059
1060    /// Test tock boundary values
1061    #[test]
1062    fn test_tock_boundary_values() {
1063        let mut runtime = Runtime::new();
1064
1065        // Tock(0) as lookup key
1066        assert!(runtime.lookup_node_at(Tock(0)).is_none());
1067
1068        // Tock::MAX as lookup key
1069        assert!(runtime.lookup_node_at(Tock::MAX).is_none());
1070
1071        // set_tock and current_tock consistency
1072        runtime.set_tock(Tock(100));
1073        assert_eq!(runtime.current_tock(), Tock(100));
1074
1075        // tick increments tock
1076        assert_eq!(runtime.tick(), Tock(100));
1077        assert_eq!(runtime.current_tock(), Tock(101));
1078    }
1079
1080    /// Test record stack invariant
1081    #[test]
1082    fn test_record_stack_invariant() {
1083        let runtime = Runtime::new();
1084
1085        // Record stack is non-empty after init
1086        let record = runtime.current_record();
1087        // RootRecord starts at Tock(0)
1088        assert_eq!(record.start_tock(), Tock(0));
1089
1090        // Initial record is not HeadRecord
1091        assert!(!record.is_head());
1092        assert!(!record.is_value());
1093    }
1094
1095    /// Test memory limit configuration
1096    #[test]
1097    fn test_memory_limit_config() {
1098        use crate::config::ZombieConfig;
1099
1100        // No memory limit
1101        let config = ZombieConfig::default();
1102        let runtime = Runtime::with_config(config);
1103        assert!(!runtime.has_memory_limit());
1104
1105        // With memory limit
1106        let config = ZombieConfig::default().with_memory_limit(1024 * 1024);
1107        let runtime = Runtime::with_config(config);
1108        assert!(runtime.has_memory_limit());
1109    }
1110
1111    /// Test thread-local initialization idempotency
1112    #[test]
1113    fn test_init_idempotent() {
1114        // Multiple init calls don't panic
1115        Runtime::init();
1116        Runtime::init();
1117        Runtime::init();
1118
1119        // Should still work normally
1120        Runtime::with(|t| {
1121            assert!(t.current_tock() >= Tock(1));
1122        });
1123    }
1124
1125    /// Test metadata_bytes calculation
1126    #[test]
1127    fn test_metadata_bytes() {
1128        let runtime = Runtime::new();
1129
1130        // Empty runtime should have minimal metadata
1131        let bytes = runtime.metadata_bytes();
1132        assert!(bytes > 0); // At least record stack overhead
1133    }
1134
1135    /// Test that try_with returns None when Runtime is uninitialized
1136    #[test]
1137    fn test_try_with_uninitialized() {
1138        // Create a new thread where Runtime hasn't been initialized
1139        let handle = std::thread::spawn(|| {
1140            // try_with should return None before init
1141            let result = Runtime::try_with(|_| 42);
1142            // Note: After the fix, Runtime::new() now calls UFArena::init(),
1143            // but try_with still returns None because the thread-local Runtime
1144            // is not initialized until Runtime::init() is called.
1145            result.is_none()
1146        });
1147
1148        let was_none = handle.join().expect("Thread should not panic");
1149        assert!(was_none, "try_with should return None in uninitialized thread");
1150    }
1151
1152    /// Test replay stack operations
1153    #[test]
1154    fn test_replay_stack_operations() {
1155        let mut runtime = Runtime::new();
1156
1157        // Initially replay stack has one entry
1158        assert!(runtime.replays.len() == 1);
1159        assert_eq!(runtime.replays[0].target, Tock::MAX);
1160
1161        // Push a new replay state
1162        let new_replay = ReplayState::new(Tock(100), Rc::new(RefCell::new(None)));
1163        runtime.replays.push(new_replay);
1164        assert_eq!(runtime.replays.len(), 2);
1165
1166        // Pop replay state
1167        runtime.replays.pop();
1168        assert_eq!(runtime.replays.len(), 1);
1169    }
1170
1171    /// Test should_unroll with various staleness thresholds
1172    #[test]
1173    fn test_should_unroll() {
1174        use crate::config::ZombieConfig;
1175
1176        // Default config with high staleness threshold (unroll_factor = 32)
1177        let config = ZombieConfig::default();
1178        let runtime = Runtime::with_config(config);
1179
1180        // RootRecord is NOT a tailcall, so should_unroll returns false regardless of tock delta.
1181        // This matches C++ behavior: unrolling only happens inside TailCall contexts.
1182        assert!(!runtime.should_unroll());
1183
1184        // Config with low staleness threshold shouldn't affect unrolling logic directly
1185        // but let's just check the unroll factor config
1186        let config = ZombieConfig::default().with_unroll_factor(0);
1187        let runtime = Runtime::with_config(config);
1188        // With unroll_factor=0 and non-tailcall record, still false
1189        assert!(!runtime.should_unroll());
1190    }
1191
1192    // =========================================================================
1193    // Additional Edge Case Tests
1194    // =========================================================================
1195
1196    /// Test timeline find_context_with_key boundary cases
1197    #[test]
1198    fn test_timeline_find_boundary() {
1199        use crate::context::RootContext;
1200
1201        let mut runtime = Runtime::new();
1202
1203        // Insert a context at Tock(10) with end at Tock(15)
1204        let ctx = RootContext::new(Tock(10));
1205        // Simulate end tock by pushing some values
1206        runtime.insert_context(Tock(10), ctx.clone());
1207
1208        // Exact match should find
1209        let result = runtime.find_context_with_key(Tock(10));
1210        assert!(result.is_some());
1211        assert_eq!(result.unwrap().0, Tock(10));
1212
1213        // Key after context start should still find it (find_le)
1214        let result = runtime.find_context_with_key(Tock(12));
1215        assert!(result.is_some());
1216        assert_eq!(result.unwrap().0, Tock(10));
1217
1218        // Key before any context should return None
1219        let result = runtime.find_context_with_key(Tock(5));
1220        assert!(result.is_none());
1221    }
1222
1223    /// Test lookup_node_at returns None for out-of-range tock
1224    #[test]
1225    fn test_lookup_node_out_of_range() {
1226        let mut runtime = Runtime::new();
1227
1228        // Lookup on empty timeline
1229        assert!(runtime.lookup_node_at(Tock(0)).is_none());
1230        assert!(runtime.lookup_node_at(Tock(100)).is_none());
1231        assert!(runtime.lookup_node_at(Tock::MAX).is_none());
1232    }
1233
1234    /// Test at_replay_target correctly identifies target
1235    #[test]
1236    fn test_at_replay_target() {
1237        let mut runtime = Runtime::new();
1238
1239        // Initial state: target is MAX
1240        assert!(runtime.at_replay_target(Tock::MAX));
1241        assert!(!runtime.at_replay_target(Tock(100)));
1242
1243        // Push new replay with target 100
1244        runtime.push_replay(Tock(100), Rc::new(RefCell::new(None)));
1245        assert!(runtime.at_replay_target(Tock(100)));
1246        assert!(!runtime.at_replay_target(Tock(50)));
1247        assert!(!runtime.at_replay_target(Tock::MAX));
1248
1249        // Pop replay - back to MAX
1250        runtime.pop_replay();
1251        assert!(runtime.at_replay_target(Tock::MAX));
1252    }
1253
1254    /// Test total_space tracking
1255    #[test]
1256    fn test_total_space_tracking() {
1257        use crate::config::ZombieConfig;
1258
1259        // With memory limit enabled
1260        let config = ZombieConfig::default().with_memory_limit(1024 * 1024);
1261        let runtime = Runtime::with_config(config);
1262
1263        // Initially zero
1264        assert_eq!(runtime.total_space(), 0);
1265    }
1266}
1267