Skip to main content

oxilean_runtime/rc/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use std::cell::Cell;
6use std::collections::HashMap;
7
8/// An observer that logs RC events.
9#[allow(dead_code)]
10pub struct RcObserver {
11    events: Vec<RcEvent>,
12    max_events: usize,
13}
14#[allow(dead_code)]
15impl RcObserver {
16    /// Create a new observer.
17    pub fn new(max_events: usize) -> Self {
18        Self {
19            events: Vec::new(),
20            max_events,
21        }
22    }
23    /// Record an event.
24    pub fn record(&mut self, id: u64, kind: RcEventKind, count_after: u64) {
25        if self.events.len() >= self.max_events {
26            self.events.remove(0);
27        }
28        self.events.push(RcEvent {
29            id,
30            kind,
31            count_after,
32        });
33    }
34    /// Get all events.
35    pub fn events(&self) -> &[RcEvent] {
36        &self.events
37    }
38    /// Count events of a specific kind.
39    pub fn count_kind(&self, kind: &RcEventKind) -> usize {
40        self.events.iter().filter(|e| &e.kind == kind).count()
41    }
42    /// Number of Drop events.
43    pub fn drop_count(&self) -> usize {
44        self.count_kind(&RcEventKind::Drop)
45    }
46    /// Number of Alloc events.
47    pub fn alloc_count(&self) -> usize {
48        self.count_kind(&RcEventKind::Alloc)
49    }
50    /// Clear the event log.
51    pub fn clear(&mut self) {
52        self.events.clear();
53    }
54}
55/// Results of RC elision analysis for a function.
56#[derive(Clone, Debug)]
57pub struct RcElisionAnalysis {
58    /// Hints for each variable in the function.
59    pub variable_hints: HashMap<String, RcElisionHint>,
60    /// Total number of RC operations that can be elided.
61    pub elided_ops: u32,
62    /// Total number of RC operations remaining.
63    pub remaining_ops: u32,
64    /// Whether the function is fully linear (no RC needed).
65    pub fully_linear: bool,
66}
67impl RcElisionAnalysis {
68    /// Create a new empty analysis.
69    pub fn new() -> Self {
70        RcElisionAnalysis {
71            variable_hints: HashMap::new(),
72            elided_ops: 0,
73            remaining_ops: 0,
74            fully_linear: true,
75        }
76    }
77    /// Add a hint for a variable.
78    pub fn add_hint(&mut self, var: String, hint: RcElisionHint) {
79        if hint == RcElisionHint::None {
80            self.fully_linear = false;
81        }
82        self.variable_hints.insert(var, hint);
83    }
84    /// Get the hint for a variable.
85    pub fn get_hint(&self, var: &str) -> RcElisionHint {
86        self.variable_hints
87            .get(var)
88            .cloned()
89            .unwrap_or(RcElisionHint::None)
90    }
91    /// Calculate the elision ratio.
92    pub fn elision_ratio(&self) -> f64 {
93        let total = self.elided_ops + self.remaining_ops;
94        if total == 0 {
95            return 1.0;
96        }
97        self.elided_ops as f64 / total as f64
98    }
99    /// Merge with another analysis (for nested scopes).
100    pub fn merge(&mut self, other: &RcElisionAnalysis) {
101        for (var, hint) in &other.variable_hints {
102            let existing = self.get_hint(var);
103            let combined = existing.combine(hint);
104            self.add_hint(var.clone(), combined);
105        }
106        self.elided_ops += other.elided_ops;
107        self.remaining_ops += other.remaining_ops;
108        self.fully_linear = self.fully_linear && other.fully_linear;
109    }
110}
111/// A map where values are reference counted; removing all refs drops the value.
112#[allow(dead_code)]
113pub struct RefcountedMap<K: Eq + std::hash::Hash + Clone, V: Clone> {
114    map: HashMap<K, (V, u32)>,
115}
116#[allow(dead_code)]
117impl<K: Eq + std::hash::Hash + Clone, V: Clone> RefcountedMap<K, V> {
118    /// Create an empty map.
119    pub fn new() -> Self {
120        Self {
121            map: HashMap::new(),
122        }
123    }
124    /// Insert a value with refcount 1.
125    pub fn insert(&mut self, key: K, value: V) {
126        self.map.insert(key, (value, 1));
127    }
128    /// Increment the refcount of a key.
129    pub fn inc_ref(&mut self, key: &K) {
130        if let Some((_, rc)) = self.map.get_mut(key) {
131            *rc = rc.saturating_add(1);
132        }
133    }
134    /// Decrement the refcount. Returns true if the value was dropped.
135    pub fn dec_ref(&mut self, key: &K) -> bool {
136        if let Some((_, rc)) = self.map.get_mut(key) {
137            if *rc > 0 {
138                *rc -= 1;
139                if *rc == 0 {
140                    self.map.remove(key);
141                    return true;
142                }
143            }
144        }
145        false
146    }
147    /// Get a reference to the value.
148    pub fn get(&self, key: &K) -> Option<&V> {
149        self.map.get(key).map(|(v, _)| v)
150    }
151    /// Get the refcount of a key.
152    pub fn refcount(&self, key: &K) -> u32 {
153        self.map.get(key).map(|(_, rc)| *rc).unwrap_or(0)
154    }
155    /// Number of live entries.
156    pub fn len(&self) -> usize {
157        self.map.len()
158    }
159    /// Whether the map is empty.
160    pub fn is_empty(&self) -> bool {
161        self.map.is_empty()
162    }
163}
164#[allow(dead_code)]
165#[derive(Clone, Debug, PartialEq, Eq)]
166pub enum RcEventKind {
167    Inc,
168    Dec,
169    Drop,
170    Alloc,
171}
172/// A reference counter that saturates at `MAX` (immortal objects).
173#[allow(dead_code)]
174#[derive(Clone, Debug, PartialEq, Eq)]
175pub struct StickyRc {
176    pub(super) count: u32,
177    pub(super) max: u32,
178}
179#[allow(dead_code)]
180impl StickyRc {
181    /// Create a counter with `initial` count and given `max`.
182    pub fn new(initial: u32, max: u32) -> Self {
183        Self {
184            count: initial.min(max),
185            max,
186        }
187    }
188    /// Increment (saturates at `max`).
189    pub fn inc(&mut self) {
190        if self.count < self.max {
191            self.count += 1;
192        }
193    }
194    /// Decrement (saturates at 0). Returns true if now zero.
195    pub fn dec(&mut self) -> bool {
196        if self.count > 0 {
197            self.count -= 1;
198        }
199        self.count == 0
200    }
201    /// Whether at max (immortal).
202    pub fn is_immortal(&self) -> bool {
203        self.count >= self.max
204    }
205    /// Current count.
206    pub fn count(&self) -> u32 {
207        self.count
208    }
209    /// Whether the count is zero.
210    pub fn is_zero(&self) -> bool {
211        self.count == 0
212    }
213}
214/// The inner data shared between all `Rc` references.
215pub(super) struct RcInner<T> {
216    /// The actual value.
217    pub(super) value: T,
218    /// Weak reference count (strong count is tracked by the outer std::rc::Rc).
219    weak_count: Cell<u32>,
220}
221impl<T> RcInner<T> {
222    fn new(value: T) -> Self {
223        RcInner {
224            value,
225            weak_count: Cell::new(0),
226        }
227    }
228}
229/// An atomic reference-counted pointer for shared data.
230///
231/// This is similar to `std::sync::Arc` but with additional features
232/// for the OxiLean runtime.
233pub struct RtArc<T> {
234    /// Shared inner data; uses std::sync::Arc for proper reference-count sharing.
235    pub(super) inner: std::sync::Arc<ArcInner<T>>,
236}
237impl<T> RtArc<T> {
238    /// Create a new `RtArc` wrapping the given value.
239    pub fn new(value: T) -> Self {
240        RtArc {
241            inner: std::sync::Arc::new(ArcInner::new(value)),
242        }
243    }
244    /// Get the current strong reference count.
245    pub fn strong_count(&self) -> u32 {
246        std::sync::Arc::strong_count(&self.inner) as u32
247    }
248    /// Get the current weak reference count.
249    pub fn weak_count(&self) -> u32 {
250        self.inner
251            .weak_count
252            .load(std::sync::atomic::Ordering::Acquire)
253    }
254    /// Check if this is the only strong reference.
255    pub fn is_unique(&self) -> bool {
256        self.strong_count() == 1
257    }
258    /// Get a reference to the inner value.
259    #[allow(clippy::should_implement_trait)]
260    pub fn as_ref(&self) -> &T {
261        &self.inner.value
262    }
263    /// Try to get a mutable reference if this is the only strong reference.
264    pub fn get_mut(&mut self) -> Option<&mut T> {
265        if self.weak_count() == 0 {
266            std::sync::Arc::get_mut(&mut self.inner).map(|r| &mut r.value)
267        } else {
268            None
269        }
270    }
271    /// Try to unwrap the value if this is the only reference.
272    pub fn try_unwrap(self) -> Result<T, Self> {
273        if self.weak_count() == 0 {
274            match std::sync::Arc::try_unwrap(self.inner) {
275                Ok(inner) => Ok(inner.value),
276                Err(inner) => Err(RtArc { inner }),
277            }
278        } else {
279            Err(self)
280        }
281    }
282    /// Create a clone sharing the same allocation (increments strong count).
283    pub fn clone_arc(&self) -> Self {
284        RtArc {
285            inner: std::sync::Arc::clone(&self.inner),
286        }
287    }
288}
289/// The inner data shared between all `RtArc` references.
290pub(super) struct ArcInner<T> {
291    /// The actual value.
292    pub(super) value: T,
293    /// Weak reference count (strong count is tracked by the outer std::sync::Arc).
294    weak_count: std::sync::atomic::AtomicU32,
295}
296impl<T> ArcInner<T> {
297    fn new(value: T) -> Self {
298        ArcInner {
299            value,
300            weak_count: std::sync::atomic::AtomicU32::new(0),
301        }
302    }
303}
304/// Compact reference count tracking using a bitmask.
305/// Supports up to 64 objects, each with a 1-bit "alive" flag.
306#[allow(dead_code)]
307pub struct RcBitmask {
308    pub(super) mask: u64,
309}
310#[allow(dead_code)]
311impl RcBitmask {
312    /// Create an empty bitmask (all dead).
313    pub fn new() -> Self {
314        Self { mask: 0 }
315    }
316    /// Mark slot `i` as alive (0 <= i < 64).
317    pub fn set_alive(&mut self, i: u32) {
318        debug_assert!(i < 64);
319        self.mask |= 1u64 << i;
320    }
321    /// Mark slot `i` as dead.
322    pub fn set_dead(&mut self, i: u32) {
323        debug_assert!(i < 64);
324        self.mask &= !(1u64 << i);
325    }
326    /// Check if slot `i` is alive.
327    pub fn is_alive(&self, i: u32) -> bool {
328        (self.mask >> i) & 1 == 1
329    }
330    /// Count alive slots.
331    pub fn alive_count(&self) -> u32 {
332        self.mask.count_ones()
333    }
334    /// Count dead slots.
335    pub fn dead_count(&self) -> u32 {
336        self.mask.count_zeros()
337    }
338    /// Find the first dead slot (for allocation).
339    pub fn first_dead(&self) -> Option<u32> {
340        let inv = !self.mask;
341        if inv == 0 {
342            None
343        } else {
344            Some(inv.trailing_zeros())
345        }
346    }
347    /// Find the first alive slot.
348    pub fn first_alive(&self) -> Option<u32> {
349        if self.mask == 0 {
350            None
351        } else {
352            Some(self.mask.trailing_zeros())
353        }
354    }
355    /// Raw bitmask value.
356    pub fn raw(&self) -> u64 {
357        self.mask
358    }
359}
360/// Borrow state for runtime borrow checking.
361///
362/// This tracks whether a value is currently borrowed mutably or immutably,
363/// similar to `RefCell` but for the runtime system.
364#[derive(Clone, Copy, Debug, PartialEq, Eq)]
365pub enum BorrowState {
366    /// Value is not borrowed.
367    Unborrowed,
368    /// Value is borrowed immutably N times.
369    ImmutableBorrow(u32),
370    /// Value is borrowed mutably.
371    MutableBorrow,
372}
373/// A node in the RC graph.
374#[allow(dead_code)]
375#[derive(Clone, Debug)]
376pub struct RcGraphNode {
377    pub id: u32,
378    pub data: u64,
379    pub out_edges: Vec<u32>,
380}
381/// Index into an `RcPool`.
382#[allow(dead_code)]
383#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
384pub struct RcPoolIdx(pub usize);
385/// A copy-on-write smart pointer.
386///
387/// `CowBox<T>` wraps a value that is shared (read-only) until a mutation
388/// is requested, at which point it copies the value to create a unique owner.
389pub struct CowBox<T> {
390    /// The inner RC.
391    pub(super) inner: Rc<T>,
392    /// Whether a COW copy has been made.
393    pub(super) copied: Cell<bool>,
394}
395impl<T: Clone> CowBox<T> {
396    /// Create a new CowBox.
397    pub fn new(value: T) -> Self {
398        CowBox {
399            inner: Rc::new(value),
400            copied: Cell::new(false),
401        }
402    }
403    /// Get a read-only reference.
404    #[allow(clippy::should_implement_trait)]
405    pub fn as_ref(&self) -> &T {
406        self.inner.as_ref()
407    }
408    /// Get a mutable reference, copying if necessary.
409    #[allow(clippy::should_implement_trait)]
410    pub fn as_mut(&mut self) -> &mut T {
411        if !self.inner.is_unique() {
412            let new_value = self.inner.as_ref().clone();
413            self.inner = Rc::new(new_value);
414            self.copied.set(true);
415        }
416        self.inner
417            .get_mut()
418            .unwrap_or_else(|| unreachable!("COW clone guarantees unique ownership before get_mut"))
419    }
420    /// Check if a COW copy has been made.
421    pub fn was_copied(&self) -> bool {
422        self.copied.get()
423    }
424    /// Check if this is the unique owner.
425    pub fn is_unique(&self) -> bool {
426        self.inner.is_unique()
427    }
428    /// Unwrap the value, cloning if not unique.
429    pub fn into_owned(self) -> T {
430        match self.inner.try_unwrap() {
431            Ok(v) => v,
432            Err(rc) => rc.as_ref().clone(),
433        }
434    }
435}
436/// Records an ownership transfer event.
437#[allow(dead_code)]
438#[derive(Clone, Debug)]
439pub struct OwnershipEvent {
440    pub object_id: u64,
441    pub from_owner: String,
442    pub to_owner: String,
443    pub timestamp: u64,
444}
445/// A log of ownership transfer events for debugging.
446#[allow(dead_code)]
447pub struct OwnershipLog {
448    events: Vec<OwnershipEvent>,
449    max_events: usize,
450}
451#[allow(dead_code)]
452impl OwnershipLog {
453    /// Create a new log.
454    pub fn new(max_events: usize) -> Self {
455        Self {
456            events: Vec::new(),
457            max_events,
458        }
459    }
460    /// Record a transfer.
461    pub fn record_transfer(&mut self, object_id: u64, from: &str, to: &str, ts: u64) {
462        if self.events.len() >= self.max_events {
463            self.events.remove(0);
464        }
465        self.events.push(OwnershipEvent {
466            object_id,
467            from_owner: from.to_string(),
468            to_owner: to.to_string(),
469            timestamp: ts,
470        });
471    }
472    /// Events for a specific object.
473    pub fn events_for(&self, object_id: u64) -> Vec<&OwnershipEvent> {
474        self.events
475            .iter()
476            .filter(|e| e.object_id == object_id)
477            .collect()
478    }
479    /// Total events.
480    pub fn len(&self) -> usize {
481        self.events.len()
482    }
483    /// Whether empty.
484    pub fn is_empty(&self) -> bool {
485        self.events.is_empty()
486    }
487}
488/// Policy for how reference counting behaves.
489#[derive(Clone, Debug, PartialEq, Eq)]
490pub enum RcPolicy {
491    /// Standard reference counting (increment on share, decrement on drop).
492    Standard,
493    /// Deferred reference counting (batch decrements at scope boundaries).
494    Deferred,
495    /// Aggressive elision (skip RC for provably linear values).
496    AggressiveElision,
497    /// No reference counting (for debugging, everything leaks).
498    Disabled,
499}
500impl RcPolicy {
501    /// Check if this policy uses deferred decrements.
502    pub fn is_deferred(&self) -> bool {
503        matches!(self, RcPolicy::Deferred)
504    }
505    /// Check if this policy allows elision.
506    pub fn allows_elision(&self) -> bool {
507        matches!(self, RcPolicy::AggressiveElision | RcPolicy::Deferred)
508    }
509    /// Check if RC is enabled.
510    pub fn is_enabled(&self) -> bool {
511        !matches!(self, RcPolicy::Disabled)
512    }
513}
514/// A node in the GC graph.
515#[allow(dead_code)]
516#[derive(Clone, Debug, Default)]
517pub struct GcNode {
518    /// Direct references to other nodes.
519    pub refs: Vec<u32>,
520    /// Whether this node is a GC root.
521    pub is_root: bool,
522    /// Whether the node has been marked (reachable from roots).
523    pub marked: bool,
524}
525/// A log entry for RC changes.
526#[allow(dead_code)]
527#[derive(Clone, Debug)]
528pub struct RcEvent {
529    pub id: u64,
530    pub kind: RcEventKind,
531    pub count_after: u64,
532}
533/// A weak reference for atomic reference counting.
534pub struct ArcWeak<T> {
535    /// Whether the value is still alive.
536    pub(super) alive: std::sync::atomic::AtomicBool,
537    /// A copy of the value for upgrade attempts.
538    value: Option<T>,
539}
540impl<T: Clone + Send + Sync> ArcWeak<T> {
541    /// Create a new weak reference from a strong reference.
542    pub fn from_arc(arc: &RtArc<T>) -> Self {
543        arc.inner
544            .weak_count
545            .fetch_add(1, std::sync::atomic::Ordering::Release);
546        ArcWeak {
547            alive: std::sync::atomic::AtomicBool::new(true),
548            value: Some(arc.inner.value.clone()),
549        }
550    }
551    /// Try to upgrade to a strong reference.
552    pub fn upgrade(&self) -> Option<RtArc<T>> {
553        if self.alive.load(std::sync::atomic::Ordering::Acquire) {
554            self.value.as_ref().map(|v| RtArc::new(v.clone()))
555        } else {
556            None
557        }
558    }
559    /// Check if the referenced value is still alive.
560    pub fn is_alive(&self) -> bool {
561        self.alive.load(std::sync::atomic::Ordering::Acquire)
562    }
563    /// Mark as dead.
564    pub fn invalidate(&self) {
565        self.alive
566            .store(false, std::sync::atomic::Ordering::Release);
567    }
568}
569/// A borrow flag for tracking runtime borrows.
570pub struct BorrowFlag {
571    /// Current borrow state.
572    state: Cell<BorrowState>,
573}
574impl BorrowFlag {
575    /// Create a new unborrowed flag.
576    pub fn new() -> Self {
577        BorrowFlag {
578            state: Cell::new(BorrowState::Unborrowed),
579        }
580    }
581    /// Try to acquire an immutable borrow.
582    pub fn try_borrow(&self) -> bool {
583        match self.state.get() {
584            BorrowState::Unborrowed => {
585                self.state.set(BorrowState::ImmutableBorrow(1));
586                true
587            }
588            BorrowState::ImmutableBorrow(n) => {
589                self.state.set(BorrowState::ImmutableBorrow(n + 1));
590                true
591            }
592            BorrowState::MutableBorrow => false,
593        }
594    }
595    /// Release an immutable borrow.
596    pub fn release_borrow(&self) {
597        match self.state.get() {
598            BorrowState::ImmutableBorrow(1) => {
599                self.state.set(BorrowState::Unborrowed);
600            }
601            BorrowState::ImmutableBorrow(n) if n > 1 => {
602                self.state.set(BorrowState::ImmutableBorrow(n - 1));
603            }
604            _ => {}
605        }
606    }
607    /// Try to acquire a mutable borrow.
608    pub fn try_borrow_mut(&self) -> bool {
609        match self.state.get() {
610            BorrowState::Unborrowed => {
611                self.state.set(BorrowState::MutableBorrow);
612                true
613            }
614            _ => false,
615        }
616    }
617    /// Release a mutable borrow.
618    pub fn release_borrow_mut(&self) {
619        if self.state.get() == BorrowState::MutableBorrow {
620            self.state.set(BorrowState::Unborrowed);
621        }
622    }
623    /// Get the current borrow state.
624    pub fn state(&self) -> BorrowState {
625        self.state.get()
626    }
627    /// Check if the value is currently borrowed.
628    pub fn is_borrowed(&self) -> bool {
629        self.state.get() != BorrowState::Unborrowed
630    }
631    /// Check if the value is mutably borrowed.
632    pub fn is_mutably_borrowed(&self) -> bool {
633        self.state.get() == BorrowState::MutableBorrow
634    }
635}
636/// A pool of reference-counted slot values, indexed by integer ID.
637#[allow(dead_code)]
638pub struct RcPool<T: Clone> {
639    slots: Vec<Option<T>>,
640    refcounts: Vec<u32>,
641    free: Vec<usize>,
642    alloc_count: u64,
643}
644#[allow(dead_code)]
645impl<T: Clone> RcPool<T> {
646    /// Create an empty pool.
647    pub fn new() -> Self {
648        Self {
649            slots: Vec::new(),
650            refcounts: Vec::new(),
651            free: Vec::new(),
652            alloc_count: 0,
653        }
654    }
655    /// Insert a value, returning an index with refcount 1.
656    pub fn insert(&mut self, value: T) -> RcPoolIdx {
657        self.alloc_count += 1;
658        if let Some(idx) = self.free.pop() {
659            self.slots[idx] = Some(value);
660            self.refcounts[idx] = 1;
661            RcPoolIdx(idx)
662        } else {
663            let idx = self.slots.len();
664            self.slots.push(Some(value));
665            self.refcounts.push(1);
666            RcPoolIdx(idx)
667        }
668    }
669    /// Increment the refcount of an index.
670    pub fn inc_ref(&mut self, idx: RcPoolIdx) {
671        if let Some(rc) = self.refcounts.get_mut(idx.0) {
672            *rc = rc.saturating_add(1);
673        }
674    }
675    /// Decrement the refcount. Returns `true` if the object was freed.
676    pub fn dec_ref(&mut self, idx: RcPoolIdx) -> bool {
677        if let Some(rc) = self.refcounts.get_mut(idx.0) {
678            if *rc == 0 {
679                return false;
680            }
681            *rc -= 1;
682            if *rc == 0 {
683                self.slots[idx.0] = None;
684                self.free.push(idx.0);
685                return true;
686            }
687        }
688        false
689    }
690    /// Get the refcount of an index.
691    pub fn refcount(&self, idx: RcPoolIdx) -> u32 {
692        self.refcounts.get(idx.0).copied().unwrap_or(0)
693    }
694    /// Get a reference to the value.
695    pub fn get(&self, idx: RcPoolIdx) -> Option<&T> {
696        self.slots.get(idx.0)?.as_ref()
697    }
698    /// Get a mutable reference (unique access).
699    pub fn get_mut(&mut self, idx: RcPoolIdx) -> Option<&mut T> {
700        if self.refcount(idx) != 1 {
701            return None;
702        }
703        self.slots.get_mut(idx.0)?.as_mut()
704    }
705    /// Clone-on-write: if refcount > 1, clone the value into a new slot.
706    pub fn cow(&mut self, idx: RcPoolIdx) -> Option<RcPoolIdx> {
707        let rc = self.refcount(idx);
708        if rc <= 1 {
709            return Some(idx);
710        }
711        let value = self.get(idx)?.clone();
712        self.dec_ref(idx);
713        Some(self.insert(value))
714    }
715    /// Number of live slots.
716    pub fn live_count(&self) -> usize {
717        self.slots.iter().filter(|s| s.is_some()).count()
718    }
719    /// Total allocation count.
720    pub fn alloc_count(&self) -> u64 {
721        self.alloc_count
722    }
723    /// Capacity.
724    pub fn capacity(&self) -> usize {
725        self.slots.len()
726    }
727}
728/// A non-atomic reference-counted pointer.
729///
730/// This is similar to `std::rc::Rc` but with additional features:
731/// - Elision hints from the compiler
732/// - Statistics tracking
733/// - Unique-owner optimization
734///
735/// ## Safety
736///
737/// This type is `!Send` and `!Sync` — it must only be used on a single thread.
738pub struct Rc<T> {
739    /// Shared inner data; uses std::rc::Rc for proper reference-count sharing.
740    pub(super) inner: std::rc::Rc<RcInner<T>>,
741}
742impl<T> Rc<T> {
743    /// Create a new `Rc` wrapping the given value.
744    pub fn new(value: T) -> Self {
745        Rc {
746            inner: std::rc::Rc::new(RcInner::new(value)),
747        }
748    }
749    /// Get the current strong reference count.
750    pub fn strong_count(&self) -> u32 {
751        std::rc::Rc::strong_count(&self.inner) as u32
752    }
753    /// Get the current weak reference count.
754    pub fn weak_count(&self) -> u32 {
755        self.inner.weak_count.get()
756    }
757    /// Check if this is the only strong reference.
758    pub fn is_unique(&self) -> bool {
759        self.strong_count() == 1
760    }
761    /// Try to get a mutable reference if this is the only strong reference.
762    pub fn get_mut(&mut self) -> Option<&mut T> {
763        if self.weak_count() == 0 {
764            std::rc::Rc::get_mut(&mut self.inner).map(|r| &mut r.value)
765        } else {
766            None
767        }
768    }
769    /// Get a reference to the inner value.
770    #[allow(clippy::should_implement_trait)]
771    pub fn as_ref(&self) -> &T {
772        &self.inner.value
773    }
774    /// Try to unwrap the value if this is the only reference.
775    pub fn try_unwrap(self) -> Result<T, Self> {
776        if self.weak_count() == 0 {
777            match std::rc::Rc::try_unwrap(self.inner) {
778                Ok(inner) => Ok(inner.value),
779                Err(inner) => Err(Rc { inner }),
780            }
781        } else {
782            Err(self)
783        }
784    }
785    /// Create a clone sharing the same allocation (increments strong count).
786    pub fn clone_rc(&self) -> Self {
787        Rc {
788            inner: std::rc::Rc::clone(&self.inner),
789        }
790    }
791    /// Increment the weak count.
792    fn inc_weak(&self) {
793        let c = self.inner.weak_count.get();
794        self.inner.weak_count.set(c.saturating_add(1));
795    }
796    /// Decrement the weak count.
797    fn dec_weak(&self) {
798        let c = self.inner.weak_count.get();
799        if c > 0 {
800            self.inner.weak_count.set(c - 1);
801        }
802    }
803}
804/// Hints from the compiler about when RC operations can be elided.
805///
806/// During compilation, the OxiLean compiler analyzes usage patterns
807/// to determine when reference counting operations can be skipped.
808#[derive(Clone, Debug, PartialEq, Eq)]
809pub enum RcElisionHint {
810    /// No elision possible — standard RC behavior.
811    None,
812    /// Value is consumed exactly once (linear use).
813    /// RC increment/decrement can both be elided.
814    LinearUse,
815    /// Value is created and immediately consumed (ephemeral).
816    /// No RC needed at all.
817    Ephemeral,
818    /// Value is borrowed (read-only reference).
819    /// Only the borrow needs tracking, not the object itself.
820    Borrowed,
821    /// Value is owned uniquely by the current scope.
822    /// Mutations can be done in-place.
823    UniqueOwner,
824    /// Value is shared but immutable.
825    /// RC operations needed but no copy-on-write.
826    SharedImmutable,
827    /// Value is in a tail position (will be returned).
828    /// The caller's reference can be transferred.
829    TailPosition,
830    /// Value is a function argument that is not captured.
831    /// RC can be deferred to call site.
832    UncapturedArg,
833    /// Value is stored in a data structure and then forgotten.
834    /// RC can be merged with the structure's RC.
835    StructField,
836    /// Value is part of a known-dead path (e.g., unreachable branch).
837    /// All RC operations can be elided.
838    DeadPath,
839}
840impl RcElisionHint {
841    /// Check if this hint allows eliding the RC increment.
842    pub fn can_elide_inc(&self) -> bool {
843        matches!(
844            self,
845            RcElisionHint::LinearUse
846                | RcElisionHint::Ephemeral
847                | RcElisionHint::TailPosition
848                | RcElisionHint::DeadPath
849        )
850    }
851    /// Check if this hint allows eliding the RC decrement.
852    pub fn can_elide_dec(&self) -> bool {
853        matches!(
854            self,
855            RcElisionHint::LinearUse | RcElisionHint::Ephemeral | RcElisionHint::DeadPath
856        )
857    }
858    /// Check if this hint allows in-place mutation.
859    pub fn can_mutate_inplace(&self) -> bool {
860        matches!(self, RcElisionHint::UniqueOwner | RcElisionHint::LinearUse)
861    }
862    /// Combine two hints (conservative: take the least optimistic).
863    pub fn combine(&self, other: &RcElisionHint) -> RcElisionHint {
864        if self == other {
865            return self.clone();
866        }
867        if *self == RcElisionHint::DeadPath {
868            return other.clone();
869        }
870        if *other == RcElisionHint::DeadPath {
871            return self.clone();
872        }
873        RcElisionHint::None
874    }
875}
876/// A simple mark-and-sweep GC tracer over an abstract object graph.
877#[allow(dead_code)]
878pub struct GcTracer {
879    nodes: Vec<GcNode>,
880}
881#[allow(dead_code)]
882impl GcTracer {
883    /// Create an empty tracer.
884    pub fn new() -> Self {
885        Self { nodes: Vec::new() }
886    }
887    /// Add a node and return its ID.
888    pub fn add_node(&mut self, is_root: bool) -> u32 {
889        let id = self.nodes.len() as u32;
890        self.nodes.push(GcNode {
891            refs: Vec::new(),
892            is_root,
893            marked: false,
894        });
895        id
896    }
897    /// Add a reference from `from` to `to`.
898    pub fn add_ref(&mut self, from: u32, to: u32) {
899        if let Some(node) = self.nodes.get_mut(from as usize) {
900            if !node.refs.contains(&to) {
901                node.refs.push(to);
902            }
903        }
904    }
905    /// Mark all nodes reachable from roots.
906    pub fn mark(&mut self) {
907        let roots: Vec<u32> = self
908            .nodes
909            .iter()
910            .enumerate()
911            .filter(|(_, n)| n.is_root)
912            .map(|(i, _)| i as u32)
913            .collect();
914        let mut worklist = roots;
915        while let Some(id) = worklist.pop() {
916            if let Some(node) = self.nodes.get_mut(id as usize) {
917                if node.marked {
918                    continue;
919                }
920                node.marked = true;
921                let refs = node.refs.clone();
922                for next in refs {
923                    worklist.push(next);
924                }
925            }
926        }
927    }
928    /// Sweep: return IDs of unreachable (non-marked) nodes.
929    pub fn sweep(&self) -> Vec<u32> {
930        self.nodes
931            .iter()
932            .enumerate()
933            .filter(|(_, n)| !n.marked)
934            .map(|(i, _)| i as u32)
935            .collect()
936    }
937    /// Full collection: mark then sweep.
938    pub fn collect(&mut self) -> Vec<u32> {
939        for node in self.nodes.iter_mut() {
940            node.marked = false;
941        }
942        self.mark();
943        self.sweep()
944    }
945    /// Number of nodes.
946    pub fn node_count(&self) -> usize {
947        self.nodes.len()
948    }
949    /// Number of marked (live) nodes.
950    pub fn live_count(&self) -> usize {
951        self.nodes.iter().filter(|n| n.marked).count()
952    }
953}
954/// A simple atomic reference counter (non-owning, just counts).
955#[allow(dead_code)]
956pub struct AtomicRefCounter {
957    count: std::sync::atomic::AtomicU64,
958}
959#[allow(dead_code)]
960impl AtomicRefCounter {
961    /// Create a new counter starting at `initial`.
962    pub fn new(initial: u64) -> Self {
963        Self {
964            count: std::sync::atomic::AtomicU64::new(initial),
965        }
966    }
967    /// Increment and return the new value.
968    pub fn inc(&self) -> u64 {
969        self.count.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + 1
970    }
971    /// Decrement and return the new value (saturating at 0).
972    pub fn dec(&self) -> u64 {
973        match self.count.fetch_update(
974            std::sync::atomic::Ordering::SeqCst,
975            std::sync::atomic::Ordering::SeqCst,
976            |v| if v > 0 { Some(v - 1) } else { None },
977        ) {
978            Ok(prev) => prev - 1,
979            Err(_) => 0,
980        }
981    }
982    /// Load the current count.
983    pub fn load(&self) -> u64 {
984        self.count.load(std::sync::atomic::Ordering::SeqCst)
985    }
986    /// Reset to a new value.
987    pub fn reset(&self, val: u64) {
988        self.count.store(val, std::sync::atomic::Ordering::SeqCst);
989    }
990    /// Whether the count is zero.
991    pub fn is_zero(&self) -> bool {
992        self.load() == 0
993    }
994}
995/// A table of weak references to values.
996#[allow(dead_code)]
997pub struct WeakTable<T: Clone> {
998    entries: HashMap<u64, std::sync::Weak<T>>,
999    next_key: u64,
1000    miss_count: u64,
1001}
1002#[allow(dead_code)]
1003impl<T: Clone> WeakTable<T> {
1004    /// Create an empty weak table.
1005    pub fn new() -> Self {
1006        Self {
1007            entries: HashMap::new(),
1008            next_key: 0,
1009            miss_count: 0,
1010        }
1011    }
1012    /// Register a strong reference, returning its key.
1013    pub fn register(&mut self, value: std::sync::Arc<T>) -> u64 {
1014        let key = self.next_key;
1015        self.next_key += 1;
1016        self.entries.insert(key, std::sync::Arc::downgrade(&value));
1017        key
1018    }
1019    /// Try to upgrade a weak reference.
1020    pub fn get(&mut self, key: u64) -> Option<std::sync::Arc<T>> {
1021        if let Some(weak) = self.entries.get(&key) {
1022            if let Some(strong) = weak.upgrade() {
1023                return Some(strong);
1024            } else {
1025                self.entries.remove(&key);
1026                self.miss_count += 1;
1027            }
1028        }
1029        None
1030    }
1031    /// Prune all expired (dead) weak references.
1032    pub fn prune(&mut self) -> usize {
1033        let before = self.entries.len();
1034        self.entries.retain(|_, weak| weak.upgrade().is_some());
1035        before - self.entries.len()
1036    }
1037    /// Number of registered entries (including dead ones).
1038    pub fn len(&self) -> usize {
1039        self.entries.len()
1040    }
1041    /// Whether the table is empty.
1042    pub fn is_empty(&self) -> bool {
1043        self.entries.is_empty()
1044    }
1045    /// Number of times a lookup found a dead reference.
1046    pub fn miss_count(&self) -> u64 {
1047        self.miss_count
1048    }
1049}
1050/// A directed graph where nodes are reference-counted.
1051#[allow(dead_code)]
1052pub struct RcGraph {
1053    nodes: HashMap<u32, (RcGraphNode, u32)>,
1054    next_id: u32,
1055    edge_count: usize,
1056}
1057#[allow(dead_code)]
1058impl RcGraph {
1059    /// Create an empty graph.
1060    pub fn new() -> Self {
1061        Self {
1062            nodes: HashMap::new(),
1063            next_id: 0,
1064            edge_count: 0,
1065        }
1066    }
1067    /// Add a node with the given data.
1068    pub fn add_node(&mut self, data: u64) -> u32 {
1069        let id = self.next_id;
1070        self.next_id += 1;
1071        self.nodes.insert(
1072            id,
1073            (
1074                RcGraphNode {
1075                    id,
1076                    data,
1077                    out_edges: Vec::new(),
1078                },
1079                1,
1080            ),
1081        );
1082        id
1083    }
1084    /// Add a directed edge from `src` to `dst`.
1085    pub fn add_edge(&mut self, src: u32, dst: u32) {
1086        if let Some((node, _)) = self.nodes.get_mut(&src) {
1087            if !node.out_edges.contains(&dst) {
1088                node.out_edges.push(dst);
1089                self.edge_count += 1;
1090            }
1091        }
1092        if let Some((_, rc)) = self.nodes.get_mut(&dst) {
1093            *rc = rc.saturating_add(1);
1094        }
1095    }
1096    /// Remove an edge and decrement destination refcount.
1097    pub fn remove_edge(&mut self, src: u32, dst: u32) -> bool {
1098        let removed = if let Some((node, _)) = self.nodes.get_mut(&src) {
1099            let before = node.out_edges.len();
1100            node.out_edges.retain(|&e| e != dst);
1101            node.out_edges.len() < before
1102        } else {
1103            false
1104        };
1105        if removed {
1106            self.edge_count = self.edge_count.saturating_sub(1);
1107            if let Some((_, rc)) = self.nodes.get_mut(&dst) {
1108                *rc = rc.saturating_sub(1);
1109            }
1110        }
1111        removed
1112    }
1113    /// Get node data.
1114    pub fn node_data(&self, id: u32) -> Option<u64> {
1115        self.nodes.get(&id).map(|(n, _)| n.data)
1116    }
1117    /// Get refcount.
1118    pub fn refcount(&self, id: u32) -> u32 {
1119        self.nodes.get(&id).map(|(_, rc)| *rc).unwrap_or(0)
1120    }
1121    /// Get out-edges.
1122    pub fn out_edges(&self, id: u32) -> Vec<u32> {
1123        self.nodes
1124            .get(&id)
1125            .map(|(n, _)| n.out_edges.clone())
1126            .unwrap_or_default()
1127    }
1128    /// Remove a node (and all edges from it).
1129    pub fn remove_node(&mut self, id: u32) {
1130        if let Some((node, _)) = self.nodes.remove(&id) {
1131            for dst in node.out_edges {
1132                if let Some((_, rc)) = self.nodes.get_mut(&dst) {
1133                    *rc = rc.saturating_sub(1);
1134                }
1135                self.edge_count = self.edge_count.saturating_sub(1);
1136            }
1137        }
1138    }
1139    /// Number of nodes.
1140    pub fn node_count(&self) -> usize {
1141        self.nodes.len()
1142    }
1143    /// Number of edges.
1144    pub fn edge_count(&self) -> usize {
1145        self.edge_count
1146    }
1147    /// Nodes with zero refcount (potential garbage).
1148    pub fn zero_refcount_nodes(&self) -> Vec<u32> {
1149        self.nodes
1150            .iter()
1151            .filter(|(_, (_, rc))| *rc == 0)
1152            .map(|(id, _)| *id)
1153            .collect()
1154    }
1155}
1156/// Statistics about reference counting operations.
1157#[derive(Clone, Debug, Default)]
1158pub struct RcStats {
1159    /// Total number of RC increments.
1160    pub increments: u64,
1161    /// Total number of RC decrements.
1162    pub decrements: u64,
1163    /// Total number of deallocations (rc reached 0).
1164    pub deallocations: u64,
1165    /// Total number of elided increments.
1166    pub elided_increments: u64,
1167    /// Total number of elided decrements.
1168    pub elided_decrements: u64,
1169    /// Total number of in-place mutations (unique owner).
1170    pub inplace_mutations: u64,
1171    /// Total number of copy-on-write operations.
1172    pub copy_on_write: u64,
1173    /// Peak reference count observed.
1174    pub peak_rc: u32,
1175}
1176impl RcStats {
1177    /// Create new empty statistics.
1178    pub fn new() -> Self {
1179        Self::default()
1180    }
1181    /// Record an RC increment.
1182    pub fn record_inc(&mut self) {
1183        self.increments += 1;
1184    }
1185    /// Record an RC decrement.
1186    pub fn record_dec(&mut self) {
1187        self.decrements += 1;
1188    }
1189    /// Record a deallocation.
1190    pub fn record_dealloc(&mut self) {
1191        self.deallocations += 1;
1192    }
1193    /// Record an elided increment.
1194    pub fn record_elided_inc(&mut self) {
1195        self.elided_increments += 1;
1196    }
1197    /// Record an elided decrement.
1198    pub fn record_elided_dec(&mut self) {
1199        self.elided_decrements += 1;
1200    }
1201    /// Record an in-place mutation.
1202    pub fn record_inplace_mutation(&mut self) {
1203        self.inplace_mutations += 1;
1204    }
1205    /// Record a copy-on-write operation.
1206    pub fn record_cow(&mut self) {
1207        self.copy_on_write += 1;
1208    }
1209    /// Update the peak RC if necessary.
1210    pub fn update_peak(&mut self, rc: u32) {
1211        if rc > self.peak_rc {
1212            self.peak_rc = rc;
1213        }
1214    }
1215    /// Total RC operations (not elided).
1216    pub fn total_ops(&self) -> u64 {
1217        self.increments + self.decrements
1218    }
1219    /// Total elided operations.
1220    pub fn total_elided(&self) -> u64 {
1221        self.elided_increments + self.elided_decrements
1222    }
1223    /// Elision ratio.
1224    pub fn elision_ratio(&self) -> f64 {
1225        let total = self.total_ops() + self.total_elided();
1226        if total == 0 {
1227            return 1.0;
1228        }
1229        self.total_elided() as f64 / total as f64
1230    }
1231    /// Merge with another stats instance.
1232    pub fn merge(&mut self, other: &RcStats) {
1233        self.increments += other.increments;
1234        self.decrements += other.decrements;
1235        self.deallocations += other.deallocations;
1236        self.elided_increments += other.elided_increments;
1237        self.elided_decrements += other.elided_decrements;
1238        self.inplace_mutations += other.inplace_mutations;
1239        self.copy_on_write += other.copy_on_write;
1240        if other.peak_rc > self.peak_rc {
1241            self.peak_rc = other.peak_rc;
1242        }
1243    }
1244    /// Reset all statistics.
1245    pub fn reset(&mut self) {
1246        *self = Self::default();
1247    }
1248}
1249/// A typed retain/release counter attached to a value.
1250#[allow(dead_code)]
1251pub struct RetainRelease<T> {
1252    pub(super) value: T,
1253    retain_count: u64,
1254    release_count: u64,
1255}
1256#[allow(dead_code)]
1257impl<T> RetainRelease<T> {
1258    /// Create a new retained value.
1259    pub fn new(value: T) -> Self {
1260        Self {
1261            value,
1262            retain_count: 1,
1263            release_count: 0,
1264        }
1265    }
1266    /// Retain (increment refcount).
1267    pub fn retain(&mut self) {
1268        self.retain_count += 1;
1269    }
1270    /// Release (decrement refcount). Returns true if the object should be dropped.
1271    pub fn release(&mut self) -> bool {
1272        self.release_count += 1;
1273        self.retain_count <= self.release_count
1274    }
1275    /// Current live refcount.
1276    pub fn live_count(&self) -> u64 {
1277        self.retain_count.saturating_sub(self.release_count)
1278    }
1279    /// Access the inner value.
1280    pub fn get(&self) -> &T {
1281        &self.value
1282    }
1283    /// Mutably access the inner value.
1284    pub fn get_mut(&mut self) -> &mut T {
1285        &mut self.value
1286    }
1287    /// Total retains.
1288    pub fn retain_count(&self) -> u64 {
1289        self.retain_count
1290    }
1291    /// Total releases.
1292    pub fn release_count(&self) -> u64 {
1293        self.release_count
1294    }
1295}
1296/// Manages reference counting within a scope.
1297///
1298/// The `RcManager` tracks all live references and their counts,
1299/// applies elision hints, and collects statistics.
1300pub struct RcManager {
1301    /// Elision analysis for the current scope.
1302    analysis: RcElisionAnalysis,
1303    /// Statistics for the current scope.
1304    stats: RcStats,
1305    /// Whether RC tracking is enabled (can be disabled for benchmarking).
1306    enabled: bool,
1307    /// Maximum reference count before triggering a warning.
1308    max_rc_threshold: u32,
1309    /// Pending decrements (batched for efficiency).
1310    pending_decrements: Vec<String>,
1311}
1312impl RcManager {
1313    /// Create a new RC manager.
1314    pub fn new() -> Self {
1315        RcManager {
1316            analysis: RcElisionAnalysis::new(),
1317            stats: RcStats::new(),
1318            enabled: true,
1319            max_rc_threshold: 1_000_000,
1320            pending_decrements: Vec::new(),
1321        }
1322    }
1323    /// Create a new RC manager with elision analysis.
1324    pub fn with_analysis(analysis: RcElisionAnalysis) -> Self {
1325        RcManager {
1326            analysis,
1327            stats: RcStats::new(),
1328            enabled: true,
1329            max_rc_threshold: 1_000_000,
1330            pending_decrements: Vec::new(),
1331        }
1332    }
1333    /// Record an RC increment for a variable.
1334    pub fn inc(&mut self, var: &str) {
1335        if !self.enabled {
1336            return;
1337        }
1338        let hint = self.analysis.get_hint(var);
1339        if hint.can_elide_inc() {
1340            self.stats.record_elided_inc();
1341        } else {
1342            self.stats.record_inc();
1343        }
1344    }
1345    /// Record an RC decrement for a variable.
1346    pub fn dec(&mut self, var: &str) {
1347        if !self.enabled {
1348            return;
1349        }
1350        let hint = self.analysis.get_hint(var);
1351        if hint.can_elide_dec() {
1352            self.stats.record_elided_dec();
1353        } else {
1354            self.stats.record_dec();
1355        }
1356    }
1357    /// Schedule a decrement for later (batch processing).
1358    pub fn schedule_dec(&mut self, var: String) {
1359        self.pending_decrements.push(var);
1360    }
1361    /// Process all pending decrements.
1362    pub fn flush_pending(&mut self) {
1363        let pending = std::mem::take(&mut self.pending_decrements);
1364        for var in &pending {
1365            self.dec(var);
1366        }
1367    }
1368    /// Check if a variable can be mutated in-place.
1369    pub fn can_mutate_inplace(&self, var: &str) -> bool {
1370        self.analysis.get_hint(var).can_mutate_inplace()
1371    }
1372    /// Record an in-place mutation.
1373    pub fn record_inplace_mutation(&mut self) {
1374        self.stats.record_inplace_mutation();
1375    }
1376    /// Record a copy-on-write operation.
1377    pub fn record_cow(&mut self) {
1378        self.stats.record_cow();
1379    }
1380    /// Get the current statistics.
1381    pub fn stats(&self) -> &RcStats {
1382        &self.stats
1383    }
1384    /// Get the elision analysis.
1385    pub fn analysis(&self) -> &RcElisionAnalysis {
1386        &self.analysis
1387    }
1388    /// Enable or disable RC tracking.
1389    pub fn set_enabled(&mut self, enabled: bool) {
1390        self.enabled = enabled;
1391    }
1392    /// Set the maximum RC threshold.
1393    pub fn set_max_rc_threshold(&mut self, threshold: u32) {
1394        self.max_rc_threshold = threshold;
1395    }
1396    /// Get the max RC threshold.
1397    pub fn max_rc_threshold(&self) -> u32 {
1398        self.max_rc_threshold
1399    }
1400    /// Reset statistics.
1401    pub fn reset_stats(&mut self) {
1402        self.stats.reset();
1403    }
1404}
1405/// A weak reference that does not prevent deallocation.
1406///
1407/// Weak references can be upgraded to strong references if the value
1408/// is still alive.
1409pub struct Weak<T> {
1410    /// The value (kept alive by the strong count on the original Rc).
1411    _marker: std::marker::PhantomData<T>,
1412    /// Whether the value is still alive.
1413    pub(super) alive: Cell<bool>,
1414    /// A copy of the value for upgrade attempts.
1415    value: Option<T>,
1416}
1417impl<T: Clone> Weak<T> {
1418    /// Create a new weak reference from a strong reference.
1419    pub fn from_rc(rc: &Rc<T>) -> Self {
1420        rc.inc_weak();
1421        Weak {
1422            _marker: std::marker::PhantomData,
1423            alive: Cell::new(true),
1424            value: Some(rc.inner.value.clone()),
1425        }
1426    }
1427    /// Try to upgrade this weak reference to a strong reference.
1428    pub fn upgrade(&self) -> Option<Rc<T>> {
1429        if self.alive.get() {
1430            self.value.as_ref().map(|v| Rc::new(v.clone()))
1431        } else {
1432            None
1433        }
1434    }
1435    /// Check if the referenced value is still alive.
1436    pub fn is_alive(&self) -> bool {
1437        self.alive.get()
1438    }
1439    /// Mark this weak reference as dead (the strong references are all gone).
1440    pub fn invalidate(&self) {
1441        self.alive.set(false);
1442    }
1443}