1use std::cell::Cell;
6use std::collections::HashMap;
7
8#[allow(dead_code)]
10pub struct RcObserver {
11 events: Vec<RcEvent>,
12 max_events: usize,
13}
14#[allow(dead_code)]
15impl RcObserver {
16 pub fn new(max_events: usize) -> Self {
18 Self {
19 events: Vec::new(),
20 max_events,
21 }
22 }
23 pub fn record(&mut self, id: u64, kind: RcEventKind, count_after: u64) {
25 if self.events.len() >= self.max_events {
26 self.events.remove(0);
27 }
28 self.events.push(RcEvent {
29 id,
30 kind,
31 count_after,
32 });
33 }
34 pub fn events(&self) -> &[RcEvent] {
36 &self.events
37 }
38 pub fn count_kind(&self, kind: &RcEventKind) -> usize {
40 self.events.iter().filter(|e| &e.kind == kind).count()
41 }
42 pub fn drop_count(&self) -> usize {
44 self.count_kind(&RcEventKind::Drop)
45 }
46 pub fn alloc_count(&self) -> usize {
48 self.count_kind(&RcEventKind::Alloc)
49 }
50 pub fn clear(&mut self) {
52 self.events.clear();
53 }
54}
55#[derive(Clone, Debug)]
57pub struct RcElisionAnalysis {
58 pub variable_hints: HashMap<String, RcElisionHint>,
60 pub elided_ops: u32,
62 pub remaining_ops: u32,
64 pub fully_linear: bool,
66}
67impl RcElisionAnalysis {
68 pub fn new() -> Self {
70 RcElisionAnalysis {
71 variable_hints: HashMap::new(),
72 elided_ops: 0,
73 remaining_ops: 0,
74 fully_linear: true,
75 }
76 }
77 pub fn add_hint(&mut self, var: String, hint: RcElisionHint) {
79 if hint == RcElisionHint::None {
80 self.fully_linear = false;
81 }
82 self.variable_hints.insert(var, hint);
83 }
84 pub fn get_hint(&self, var: &str) -> RcElisionHint {
86 self.variable_hints
87 .get(var)
88 .cloned()
89 .unwrap_or(RcElisionHint::None)
90 }
91 pub fn elision_ratio(&self) -> f64 {
93 let total = self.elided_ops + self.remaining_ops;
94 if total == 0 {
95 return 1.0;
96 }
97 self.elided_ops as f64 / total as f64
98 }
99 pub fn merge(&mut self, other: &RcElisionAnalysis) {
101 for (var, hint) in &other.variable_hints {
102 let existing = self.get_hint(var);
103 let combined = existing.combine(hint);
104 self.add_hint(var.clone(), combined);
105 }
106 self.elided_ops += other.elided_ops;
107 self.remaining_ops += other.remaining_ops;
108 self.fully_linear = self.fully_linear && other.fully_linear;
109 }
110}
111#[allow(dead_code)]
113pub struct RefcountedMap<K: Eq + std::hash::Hash + Clone, V: Clone> {
114 map: HashMap<K, (V, u32)>,
115}
116#[allow(dead_code)]
117impl<K: Eq + std::hash::Hash + Clone, V: Clone> RefcountedMap<K, V> {
118 pub fn new() -> Self {
120 Self {
121 map: HashMap::new(),
122 }
123 }
124 pub fn insert(&mut self, key: K, value: V) {
126 self.map.insert(key, (value, 1));
127 }
128 pub fn inc_ref(&mut self, key: &K) {
130 if let Some((_, rc)) = self.map.get_mut(key) {
131 *rc = rc.saturating_add(1);
132 }
133 }
134 pub fn dec_ref(&mut self, key: &K) -> bool {
136 if let Some((_, rc)) = self.map.get_mut(key) {
137 if *rc > 0 {
138 *rc -= 1;
139 if *rc == 0 {
140 self.map.remove(key);
141 return true;
142 }
143 }
144 }
145 false
146 }
147 pub fn get(&self, key: &K) -> Option<&V> {
149 self.map.get(key).map(|(v, _)| v)
150 }
151 pub fn refcount(&self, key: &K) -> u32 {
153 self.map.get(key).map(|(_, rc)| *rc).unwrap_or(0)
154 }
155 pub fn len(&self) -> usize {
157 self.map.len()
158 }
159 pub fn is_empty(&self) -> bool {
161 self.map.is_empty()
162 }
163}
164#[allow(dead_code)]
165#[derive(Clone, Debug, PartialEq, Eq)]
166pub enum RcEventKind {
167 Inc,
168 Dec,
169 Drop,
170 Alloc,
171}
172#[allow(dead_code)]
174#[derive(Clone, Debug, PartialEq, Eq)]
175pub struct StickyRc {
176 pub(super) count: u32,
177 pub(super) max: u32,
178}
179#[allow(dead_code)]
180impl StickyRc {
181 pub fn new(initial: u32, max: u32) -> Self {
183 Self {
184 count: initial.min(max),
185 max,
186 }
187 }
188 pub fn inc(&mut self) {
190 if self.count < self.max {
191 self.count += 1;
192 }
193 }
194 pub fn dec(&mut self) -> bool {
196 if self.count > 0 {
197 self.count -= 1;
198 }
199 self.count == 0
200 }
201 pub fn is_immortal(&self) -> bool {
203 self.count >= self.max
204 }
205 pub fn count(&self) -> u32 {
207 self.count
208 }
209 pub fn is_zero(&self) -> bool {
211 self.count == 0
212 }
213}
214pub(super) struct RcInner<T> {
216 pub(super) value: T,
218 weak_count: Cell<u32>,
220}
221impl<T> RcInner<T> {
222 fn new(value: T) -> Self {
223 RcInner {
224 value,
225 weak_count: Cell::new(0),
226 }
227 }
228}
229pub struct RtArc<T> {
234 pub(super) inner: std::sync::Arc<ArcInner<T>>,
236}
237impl<T> RtArc<T> {
238 pub fn new(value: T) -> Self {
240 RtArc {
241 inner: std::sync::Arc::new(ArcInner::new(value)),
242 }
243 }
244 pub fn strong_count(&self) -> u32 {
246 std::sync::Arc::strong_count(&self.inner) as u32
247 }
248 pub fn weak_count(&self) -> u32 {
250 self.inner
251 .weak_count
252 .load(std::sync::atomic::Ordering::Acquire)
253 }
254 pub fn is_unique(&self) -> bool {
256 self.strong_count() == 1
257 }
258 #[allow(clippy::should_implement_trait)]
260 pub fn as_ref(&self) -> &T {
261 &self.inner.value
262 }
263 pub fn get_mut(&mut self) -> Option<&mut T> {
265 if self.weak_count() == 0 {
266 std::sync::Arc::get_mut(&mut self.inner).map(|r| &mut r.value)
267 } else {
268 None
269 }
270 }
271 pub fn try_unwrap(self) -> Result<T, Self> {
273 if self.weak_count() == 0 {
274 match std::sync::Arc::try_unwrap(self.inner) {
275 Ok(inner) => Ok(inner.value),
276 Err(inner) => Err(RtArc { inner }),
277 }
278 } else {
279 Err(self)
280 }
281 }
282 pub fn clone_arc(&self) -> Self {
284 RtArc {
285 inner: std::sync::Arc::clone(&self.inner),
286 }
287 }
288}
289pub(super) struct ArcInner<T> {
291 pub(super) value: T,
293 weak_count: std::sync::atomic::AtomicU32,
295}
296impl<T> ArcInner<T> {
297 fn new(value: T) -> Self {
298 ArcInner {
299 value,
300 weak_count: std::sync::atomic::AtomicU32::new(0),
301 }
302 }
303}
304#[allow(dead_code)]
307pub struct RcBitmask {
308 pub(super) mask: u64,
309}
310#[allow(dead_code)]
311impl RcBitmask {
312 pub fn new() -> Self {
314 Self { mask: 0 }
315 }
316 pub fn set_alive(&mut self, i: u32) {
318 debug_assert!(i < 64);
319 self.mask |= 1u64 << i;
320 }
321 pub fn set_dead(&mut self, i: u32) {
323 debug_assert!(i < 64);
324 self.mask &= !(1u64 << i);
325 }
326 pub fn is_alive(&self, i: u32) -> bool {
328 (self.mask >> i) & 1 == 1
329 }
330 pub fn alive_count(&self) -> u32 {
332 self.mask.count_ones()
333 }
334 pub fn dead_count(&self) -> u32 {
336 self.mask.count_zeros()
337 }
338 pub fn first_dead(&self) -> Option<u32> {
340 let inv = !self.mask;
341 if inv == 0 {
342 None
343 } else {
344 Some(inv.trailing_zeros())
345 }
346 }
347 pub fn first_alive(&self) -> Option<u32> {
349 if self.mask == 0 {
350 None
351 } else {
352 Some(self.mask.trailing_zeros())
353 }
354 }
355 pub fn raw(&self) -> u64 {
357 self.mask
358 }
359}
360#[derive(Clone, Copy, Debug, PartialEq, Eq)]
365pub enum BorrowState {
366 Unborrowed,
368 ImmutableBorrow(u32),
370 MutableBorrow,
372}
373#[allow(dead_code)]
375#[derive(Clone, Debug)]
376pub struct RcGraphNode {
377 pub id: u32,
378 pub data: u64,
379 pub out_edges: Vec<u32>,
380}
381#[allow(dead_code)]
383#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
384pub struct RcPoolIdx(pub usize);
385pub struct CowBox<T> {
390 pub(super) inner: Rc<T>,
392 pub(super) copied: Cell<bool>,
394}
395impl<T: Clone> CowBox<T> {
396 pub fn new(value: T) -> Self {
398 CowBox {
399 inner: Rc::new(value),
400 copied: Cell::new(false),
401 }
402 }
403 #[allow(clippy::should_implement_trait)]
405 pub fn as_ref(&self) -> &T {
406 self.inner.as_ref()
407 }
408 #[allow(clippy::should_implement_trait)]
410 pub fn as_mut(&mut self) -> &mut T {
411 if !self.inner.is_unique() {
412 let new_value = self.inner.as_ref().clone();
413 self.inner = Rc::new(new_value);
414 self.copied.set(true);
415 }
416 self.inner
417 .get_mut()
418 .unwrap_or_else(|| unreachable!("COW clone guarantees unique ownership before get_mut"))
419 }
420 pub fn was_copied(&self) -> bool {
422 self.copied.get()
423 }
424 pub fn is_unique(&self) -> bool {
426 self.inner.is_unique()
427 }
428 pub fn into_owned(self) -> T {
430 match self.inner.try_unwrap() {
431 Ok(v) => v,
432 Err(rc) => rc.as_ref().clone(),
433 }
434 }
435}
436#[allow(dead_code)]
438#[derive(Clone, Debug)]
439pub struct OwnershipEvent {
440 pub object_id: u64,
441 pub from_owner: String,
442 pub to_owner: String,
443 pub timestamp: u64,
444}
445#[allow(dead_code)]
447pub struct OwnershipLog {
448 events: Vec<OwnershipEvent>,
449 max_events: usize,
450}
451#[allow(dead_code)]
452impl OwnershipLog {
453 pub fn new(max_events: usize) -> Self {
455 Self {
456 events: Vec::new(),
457 max_events,
458 }
459 }
460 pub fn record_transfer(&mut self, object_id: u64, from: &str, to: &str, ts: u64) {
462 if self.events.len() >= self.max_events {
463 self.events.remove(0);
464 }
465 self.events.push(OwnershipEvent {
466 object_id,
467 from_owner: from.to_string(),
468 to_owner: to.to_string(),
469 timestamp: ts,
470 });
471 }
472 pub fn events_for(&self, object_id: u64) -> Vec<&OwnershipEvent> {
474 self.events
475 .iter()
476 .filter(|e| e.object_id == object_id)
477 .collect()
478 }
479 pub fn len(&self) -> usize {
481 self.events.len()
482 }
483 pub fn is_empty(&self) -> bool {
485 self.events.is_empty()
486 }
487}
488#[derive(Clone, Debug, PartialEq, Eq)]
490pub enum RcPolicy {
491 Standard,
493 Deferred,
495 AggressiveElision,
497 Disabled,
499}
500impl RcPolicy {
501 pub fn is_deferred(&self) -> bool {
503 matches!(self, RcPolicy::Deferred)
504 }
505 pub fn allows_elision(&self) -> bool {
507 matches!(self, RcPolicy::AggressiveElision | RcPolicy::Deferred)
508 }
509 pub fn is_enabled(&self) -> bool {
511 !matches!(self, RcPolicy::Disabled)
512 }
513}
514#[allow(dead_code)]
516#[derive(Clone, Debug, Default)]
517pub struct GcNode {
518 pub refs: Vec<u32>,
520 pub is_root: bool,
522 pub marked: bool,
524}
525#[allow(dead_code)]
527#[derive(Clone, Debug)]
528pub struct RcEvent {
529 pub id: u64,
530 pub kind: RcEventKind,
531 pub count_after: u64,
532}
533pub struct ArcWeak<T> {
535 pub(super) alive: std::sync::atomic::AtomicBool,
537 value: Option<T>,
539}
540impl<T: Clone + Send + Sync> ArcWeak<T> {
541 pub fn from_arc(arc: &RtArc<T>) -> Self {
543 arc.inner
544 .weak_count
545 .fetch_add(1, std::sync::atomic::Ordering::Release);
546 ArcWeak {
547 alive: std::sync::atomic::AtomicBool::new(true),
548 value: Some(arc.inner.value.clone()),
549 }
550 }
551 pub fn upgrade(&self) -> Option<RtArc<T>> {
553 if self.alive.load(std::sync::atomic::Ordering::Acquire) {
554 self.value.as_ref().map(|v| RtArc::new(v.clone()))
555 } else {
556 None
557 }
558 }
559 pub fn is_alive(&self) -> bool {
561 self.alive.load(std::sync::atomic::Ordering::Acquire)
562 }
563 pub fn invalidate(&self) {
565 self.alive
566 .store(false, std::sync::atomic::Ordering::Release);
567 }
568}
569pub struct BorrowFlag {
571 state: Cell<BorrowState>,
573}
574impl BorrowFlag {
575 pub fn new() -> Self {
577 BorrowFlag {
578 state: Cell::new(BorrowState::Unborrowed),
579 }
580 }
581 pub fn try_borrow(&self) -> bool {
583 match self.state.get() {
584 BorrowState::Unborrowed => {
585 self.state.set(BorrowState::ImmutableBorrow(1));
586 true
587 }
588 BorrowState::ImmutableBorrow(n) => {
589 self.state.set(BorrowState::ImmutableBorrow(n + 1));
590 true
591 }
592 BorrowState::MutableBorrow => false,
593 }
594 }
595 pub fn release_borrow(&self) {
597 match self.state.get() {
598 BorrowState::ImmutableBorrow(1) => {
599 self.state.set(BorrowState::Unborrowed);
600 }
601 BorrowState::ImmutableBorrow(n) if n > 1 => {
602 self.state.set(BorrowState::ImmutableBorrow(n - 1));
603 }
604 _ => {}
605 }
606 }
607 pub fn try_borrow_mut(&self) -> bool {
609 match self.state.get() {
610 BorrowState::Unborrowed => {
611 self.state.set(BorrowState::MutableBorrow);
612 true
613 }
614 _ => false,
615 }
616 }
617 pub fn release_borrow_mut(&self) {
619 if self.state.get() == BorrowState::MutableBorrow {
620 self.state.set(BorrowState::Unborrowed);
621 }
622 }
623 pub fn state(&self) -> BorrowState {
625 self.state.get()
626 }
627 pub fn is_borrowed(&self) -> bool {
629 self.state.get() != BorrowState::Unborrowed
630 }
631 pub fn is_mutably_borrowed(&self) -> bool {
633 self.state.get() == BorrowState::MutableBorrow
634 }
635}
636#[allow(dead_code)]
638pub struct RcPool<T: Clone> {
639 slots: Vec<Option<T>>,
640 refcounts: Vec<u32>,
641 free: Vec<usize>,
642 alloc_count: u64,
643}
644#[allow(dead_code)]
645impl<T: Clone> RcPool<T> {
646 pub fn new() -> Self {
648 Self {
649 slots: Vec::new(),
650 refcounts: Vec::new(),
651 free: Vec::new(),
652 alloc_count: 0,
653 }
654 }
655 pub fn insert(&mut self, value: T) -> RcPoolIdx {
657 self.alloc_count += 1;
658 if let Some(idx) = self.free.pop() {
659 self.slots[idx] = Some(value);
660 self.refcounts[idx] = 1;
661 RcPoolIdx(idx)
662 } else {
663 let idx = self.slots.len();
664 self.slots.push(Some(value));
665 self.refcounts.push(1);
666 RcPoolIdx(idx)
667 }
668 }
669 pub fn inc_ref(&mut self, idx: RcPoolIdx) {
671 if let Some(rc) = self.refcounts.get_mut(idx.0) {
672 *rc = rc.saturating_add(1);
673 }
674 }
675 pub fn dec_ref(&mut self, idx: RcPoolIdx) -> bool {
677 if let Some(rc) = self.refcounts.get_mut(idx.0) {
678 if *rc == 0 {
679 return false;
680 }
681 *rc -= 1;
682 if *rc == 0 {
683 self.slots[idx.0] = None;
684 self.free.push(idx.0);
685 return true;
686 }
687 }
688 false
689 }
690 pub fn refcount(&self, idx: RcPoolIdx) -> u32 {
692 self.refcounts.get(idx.0).copied().unwrap_or(0)
693 }
694 pub fn get(&self, idx: RcPoolIdx) -> Option<&T> {
696 self.slots.get(idx.0)?.as_ref()
697 }
698 pub fn get_mut(&mut self, idx: RcPoolIdx) -> Option<&mut T> {
700 if self.refcount(idx) != 1 {
701 return None;
702 }
703 self.slots.get_mut(idx.0)?.as_mut()
704 }
705 pub fn cow(&mut self, idx: RcPoolIdx) -> Option<RcPoolIdx> {
707 let rc = self.refcount(idx);
708 if rc <= 1 {
709 return Some(idx);
710 }
711 let value = self.get(idx)?.clone();
712 self.dec_ref(idx);
713 Some(self.insert(value))
714 }
715 pub fn live_count(&self) -> usize {
717 self.slots.iter().filter(|s| s.is_some()).count()
718 }
719 pub fn alloc_count(&self) -> u64 {
721 self.alloc_count
722 }
723 pub fn capacity(&self) -> usize {
725 self.slots.len()
726 }
727}
728pub struct Rc<T> {
739 pub(super) inner: std::rc::Rc<RcInner<T>>,
741}
742impl<T> Rc<T> {
743 pub fn new(value: T) -> Self {
745 Rc {
746 inner: std::rc::Rc::new(RcInner::new(value)),
747 }
748 }
749 pub fn strong_count(&self) -> u32 {
751 std::rc::Rc::strong_count(&self.inner) as u32
752 }
753 pub fn weak_count(&self) -> u32 {
755 self.inner.weak_count.get()
756 }
757 pub fn is_unique(&self) -> bool {
759 self.strong_count() == 1
760 }
761 pub fn get_mut(&mut self) -> Option<&mut T> {
763 if self.weak_count() == 0 {
764 std::rc::Rc::get_mut(&mut self.inner).map(|r| &mut r.value)
765 } else {
766 None
767 }
768 }
769 #[allow(clippy::should_implement_trait)]
771 pub fn as_ref(&self) -> &T {
772 &self.inner.value
773 }
774 pub fn try_unwrap(self) -> Result<T, Self> {
776 if self.weak_count() == 0 {
777 match std::rc::Rc::try_unwrap(self.inner) {
778 Ok(inner) => Ok(inner.value),
779 Err(inner) => Err(Rc { inner }),
780 }
781 } else {
782 Err(self)
783 }
784 }
785 pub fn clone_rc(&self) -> Self {
787 Rc {
788 inner: std::rc::Rc::clone(&self.inner),
789 }
790 }
791 fn inc_weak(&self) {
793 let c = self.inner.weak_count.get();
794 self.inner.weak_count.set(c.saturating_add(1));
795 }
796 fn dec_weak(&self) {
798 let c = self.inner.weak_count.get();
799 if c > 0 {
800 self.inner.weak_count.set(c - 1);
801 }
802 }
803}
804#[derive(Clone, Debug, PartialEq, Eq)]
809pub enum RcElisionHint {
810 None,
812 LinearUse,
815 Ephemeral,
818 Borrowed,
821 UniqueOwner,
824 SharedImmutable,
827 TailPosition,
830 UncapturedArg,
833 StructField,
836 DeadPath,
839}
840impl RcElisionHint {
841 pub fn can_elide_inc(&self) -> bool {
843 matches!(
844 self,
845 RcElisionHint::LinearUse
846 | RcElisionHint::Ephemeral
847 | RcElisionHint::TailPosition
848 | RcElisionHint::DeadPath
849 )
850 }
851 pub fn can_elide_dec(&self) -> bool {
853 matches!(
854 self,
855 RcElisionHint::LinearUse | RcElisionHint::Ephemeral | RcElisionHint::DeadPath
856 )
857 }
858 pub fn can_mutate_inplace(&self) -> bool {
860 matches!(self, RcElisionHint::UniqueOwner | RcElisionHint::LinearUse)
861 }
862 pub fn combine(&self, other: &RcElisionHint) -> RcElisionHint {
864 if self == other {
865 return self.clone();
866 }
867 if *self == RcElisionHint::DeadPath {
868 return other.clone();
869 }
870 if *other == RcElisionHint::DeadPath {
871 return self.clone();
872 }
873 RcElisionHint::None
874 }
875}
876#[allow(dead_code)]
878pub struct GcTracer {
879 nodes: Vec<GcNode>,
880}
881#[allow(dead_code)]
882impl GcTracer {
883 pub fn new() -> Self {
885 Self { nodes: Vec::new() }
886 }
887 pub fn add_node(&mut self, is_root: bool) -> u32 {
889 let id = self.nodes.len() as u32;
890 self.nodes.push(GcNode {
891 refs: Vec::new(),
892 is_root,
893 marked: false,
894 });
895 id
896 }
897 pub fn add_ref(&mut self, from: u32, to: u32) {
899 if let Some(node) = self.nodes.get_mut(from as usize) {
900 if !node.refs.contains(&to) {
901 node.refs.push(to);
902 }
903 }
904 }
905 pub fn mark(&mut self) {
907 let roots: Vec<u32> = self
908 .nodes
909 .iter()
910 .enumerate()
911 .filter(|(_, n)| n.is_root)
912 .map(|(i, _)| i as u32)
913 .collect();
914 let mut worklist = roots;
915 while let Some(id) = worklist.pop() {
916 if let Some(node) = self.nodes.get_mut(id as usize) {
917 if node.marked {
918 continue;
919 }
920 node.marked = true;
921 let refs = node.refs.clone();
922 for next in refs {
923 worklist.push(next);
924 }
925 }
926 }
927 }
928 pub fn sweep(&self) -> Vec<u32> {
930 self.nodes
931 .iter()
932 .enumerate()
933 .filter(|(_, n)| !n.marked)
934 .map(|(i, _)| i as u32)
935 .collect()
936 }
937 pub fn collect(&mut self) -> Vec<u32> {
939 for node in self.nodes.iter_mut() {
940 node.marked = false;
941 }
942 self.mark();
943 self.sweep()
944 }
945 pub fn node_count(&self) -> usize {
947 self.nodes.len()
948 }
949 pub fn live_count(&self) -> usize {
951 self.nodes.iter().filter(|n| n.marked).count()
952 }
953}
954#[allow(dead_code)]
956pub struct AtomicRefCounter {
957 count: std::sync::atomic::AtomicU64,
958}
959#[allow(dead_code)]
960impl AtomicRefCounter {
961 pub fn new(initial: u64) -> Self {
963 Self {
964 count: std::sync::atomic::AtomicU64::new(initial),
965 }
966 }
967 pub fn inc(&self) -> u64 {
969 self.count.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + 1
970 }
971 pub fn dec(&self) -> u64 {
973 match self.count.fetch_update(
974 std::sync::atomic::Ordering::SeqCst,
975 std::sync::atomic::Ordering::SeqCst,
976 |v| if v > 0 { Some(v - 1) } else { None },
977 ) {
978 Ok(prev) => prev - 1,
979 Err(_) => 0,
980 }
981 }
982 pub fn load(&self) -> u64 {
984 self.count.load(std::sync::atomic::Ordering::SeqCst)
985 }
986 pub fn reset(&self, val: u64) {
988 self.count.store(val, std::sync::atomic::Ordering::SeqCst);
989 }
990 pub fn is_zero(&self) -> bool {
992 self.load() == 0
993 }
994}
995#[allow(dead_code)]
997pub struct WeakTable<T: Clone> {
998 entries: HashMap<u64, std::sync::Weak<T>>,
999 next_key: u64,
1000 miss_count: u64,
1001}
1002#[allow(dead_code)]
1003impl<T: Clone> WeakTable<T> {
1004 pub fn new() -> Self {
1006 Self {
1007 entries: HashMap::new(),
1008 next_key: 0,
1009 miss_count: 0,
1010 }
1011 }
1012 pub fn register(&mut self, value: std::sync::Arc<T>) -> u64 {
1014 let key = self.next_key;
1015 self.next_key += 1;
1016 self.entries.insert(key, std::sync::Arc::downgrade(&value));
1017 key
1018 }
1019 pub fn get(&mut self, key: u64) -> Option<std::sync::Arc<T>> {
1021 if let Some(weak) = self.entries.get(&key) {
1022 if let Some(strong) = weak.upgrade() {
1023 return Some(strong);
1024 } else {
1025 self.entries.remove(&key);
1026 self.miss_count += 1;
1027 }
1028 }
1029 None
1030 }
1031 pub fn prune(&mut self) -> usize {
1033 let before = self.entries.len();
1034 self.entries.retain(|_, weak| weak.upgrade().is_some());
1035 before - self.entries.len()
1036 }
1037 pub fn len(&self) -> usize {
1039 self.entries.len()
1040 }
1041 pub fn is_empty(&self) -> bool {
1043 self.entries.is_empty()
1044 }
1045 pub fn miss_count(&self) -> u64 {
1047 self.miss_count
1048 }
1049}
1050#[allow(dead_code)]
1052pub struct RcGraph {
1053 nodes: HashMap<u32, (RcGraphNode, u32)>,
1054 next_id: u32,
1055 edge_count: usize,
1056}
1057#[allow(dead_code)]
1058impl RcGraph {
1059 pub fn new() -> Self {
1061 Self {
1062 nodes: HashMap::new(),
1063 next_id: 0,
1064 edge_count: 0,
1065 }
1066 }
1067 pub fn add_node(&mut self, data: u64) -> u32 {
1069 let id = self.next_id;
1070 self.next_id += 1;
1071 self.nodes.insert(
1072 id,
1073 (
1074 RcGraphNode {
1075 id,
1076 data,
1077 out_edges: Vec::new(),
1078 },
1079 1,
1080 ),
1081 );
1082 id
1083 }
1084 pub fn add_edge(&mut self, src: u32, dst: u32) {
1086 if let Some((node, _)) = self.nodes.get_mut(&src) {
1087 if !node.out_edges.contains(&dst) {
1088 node.out_edges.push(dst);
1089 self.edge_count += 1;
1090 }
1091 }
1092 if let Some((_, rc)) = self.nodes.get_mut(&dst) {
1093 *rc = rc.saturating_add(1);
1094 }
1095 }
1096 pub fn remove_edge(&mut self, src: u32, dst: u32) -> bool {
1098 let removed = if let Some((node, _)) = self.nodes.get_mut(&src) {
1099 let before = node.out_edges.len();
1100 node.out_edges.retain(|&e| e != dst);
1101 node.out_edges.len() < before
1102 } else {
1103 false
1104 };
1105 if removed {
1106 self.edge_count = self.edge_count.saturating_sub(1);
1107 if let Some((_, rc)) = self.nodes.get_mut(&dst) {
1108 *rc = rc.saturating_sub(1);
1109 }
1110 }
1111 removed
1112 }
1113 pub fn node_data(&self, id: u32) -> Option<u64> {
1115 self.nodes.get(&id).map(|(n, _)| n.data)
1116 }
1117 pub fn refcount(&self, id: u32) -> u32 {
1119 self.nodes.get(&id).map(|(_, rc)| *rc).unwrap_or(0)
1120 }
1121 pub fn out_edges(&self, id: u32) -> Vec<u32> {
1123 self.nodes
1124 .get(&id)
1125 .map(|(n, _)| n.out_edges.clone())
1126 .unwrap_or_default()
1127 }
1128 pub fn remove_node(&mut self, id: u32) {
1130 if let Some((node, _)) = self.nodes.remove(&id) {
1131 for dst in node.out_edges {
1132 if let Some((_, rc)) = self.nodes.get_mut(&dst) {
1133 *rc = rc.saturating_sub(1);
1134 }
1135 self.edge_count = self.edge_count.saturating_sub(1);
1136 }
1137 }
1138 }
1139 pub fn node_count(&self) -> usize {
1141 self.nodes.len()
1142 }
1143 pub fn edge_count(&self) -> usize {
1145 self.edge_count
1146 }
1147 pub fn zero_refcount_nodes(&self) -> Vec<u32> {
1149 self.nodes
1150 .iter()
1151 .filter(|(_, (_, rc))| *rc == 0)
1152 .map(|(id, _)| *id)
1153 .collect()
1154 }
1155}
1156#[derive(Clone, Debug, Default)]
1158pub struct RcStats {
1159 pub increments: u64,
1161 pub decrements: u64,
1163 pub deallocations: u64,
1165 pub elided_increments: u64,
1167 pub elided_decrements: u64,
1169 pub inplace_mutations: u64,
1171 pub copy_on_write: u64,
1173 pub peak_rc: u32,
1175}
1176impl RcStats {
1177 pub fn new() -> Self {
1179 Self::default()
1180 }
1181 pub fn record_inc(&mut self) {
1183 self.increments += 1;
1184 }
1185 pub fn record_dec(&mut self) {
1187 self.decrements += 1;
1188 }
1189 pub fn record_dealloc(&mut self) {
1191 self.deallocations += 1;
1192 }
1193 pub fn record_elided_inc(&mut self) {
1195 self.elided_increments += 1;
1196 }
1197 pub fn record_elided_dec(&mut self) {
1199 self.elided_decrements += 1;
1200 }
1201 pub fn record_inplace_mutation(&mut self) {
1203 self.inplace_mutations += 1;
1204 }
1205 pub fn record_cow(&mut self) {
1207 self.copy_on_write += 1;
1208 }
1209 pub fn update_peak(&mut self, rc: u32) {
1211 if rc > self.peak_rc {
1212 self.peak_rc = rc;
1213 }
1214 }
1215 pub fn total_ops(&self) -> u64 {
1217 self.increments + self.decrements
1218 }
1219 pub fn total_elided(&self) -> u64 {
1221 self.elided_increments + self.elided_decrements
1222 }
1223 pub fn elision_ratio(&self) -> f64 {
1225 let total = self.total_ops() + self.total_elided();
1226 if total == 0 {
1227 return 1.0;
1228 }
1229 self.total_elided() as f64 / total as f64
1230 }
1231 pub fn merge(&mut self, other: &RcStats) {
1233 self.increments += other.increments;
1234 self.decrements += other.decrements;
1235 self.deallocations += other.deallocations;
1236 self.elided_increments += other.elided_increments;
1237 self.elided_decrements += other.elided_decrements;
1238 self.inplace_mutations += other.inplace_mutations;
1239 self.copy_on_write += other.copy_on_write;
1240 if other.peak_rc > self.peak_rc {
1241 self.peak_rc = other.peak_rc;
1242 }
1243 }
1244 pub fn reset(&mut self) {
1246 *self = Self::default();
1247 }
1248}
1249#[allow(dead_code)]
1251pub struct RetainRelease<T> {
1252 pub(super) value: T,
1253 retain_count: u64,
1254 release_count: u64,
1255}
1256#[allow(dead_code)]
1257impl<T> RetainRelease<T> {
1258 pub fn new(value: T) -> Self {
1260 Self {
1261 value,
1262 retain_count: 1,
1263 release_count: 0,
1264 }
1265 }
1266 pub fn retain(&mut self) {
1268 self.retain_count += 1;
1269 }
1270 pub fn release(&mut self) -> bool {
1272 self.release_count += 1;
1273 self.retain_count <= self.release_count
1274 }
1275 pub fn live_count(&self) -> u64 {
1277 self.retain_count.saturating_sub(self.release_count)
1278 }
1279 pub fn get(&self) -> &T {
1281 &self.value
1282 }
1283 pub fn get_mut(&mut self) -> &mut T {
1285 &mut self.value
1286 }
1287 pub fn retain_count(&self) -> u64 {
1289 self.retain_count
1290 }
1291 pub fn release_count(&self) -> u64 {
1293 self.release_count
1294 }
1295}
1296pub struct RcManager {
1301 analysis: RcElisionAnalysis,
1303 stats: RcStats,
1305 enabled: bool,
1307 max_rc_threshold: u32,
1309 pending_decrements: Vec<String>,
1311}
1312impl RcManager {
1313 pub fn new() -> Self {
1315 RcManager {
1316 analysis: RcElisionAnalysis::new(),
1317 stats: RcStats::new(),
1318 enabled: true,
1319 max_rc_threshold: 1_000_000,
1320 pending_decrements: Vec::new(),
1321 }
1322 }
1323 pub fn with_analysis(analysis: RcElisionAnalysis) -> Self {
1325 RcManager {
1326 analysis,
1327 stats: RcStats::new(),
1328 enabled: true,
1329 max_rc_threshold: 1_000_000,
1330 pending_decrements: Vec::new(),
1331 }
1332 }
1333 pub fn inc(&mut self, var: &str) {
1335 if !self.enabled {
1336 return;
1337 }
1338 let hint = self.analysis.get_hint(var);
1339 if hint.can_elide_inc() {
1340 self.stats.record_elided_inc();
1341 } else {
1342 self.stats.record_inc();
1343 }
1344 }
1345 pub fn dec(&mut self, var: &str) {
1347 if !self.enabled {
1348 return;
1349 }
1350 let hint = self.analysis.get_hint(var);
1351 if hint.can_elide_dec() {
1352 self.stats.record_elided_dec();
1353 } else {
1354 self.stats.record_dec();
1355 }
1356 }
1357 pub fn schedule_dec(&mut self, var: String) {
1359 self.pending_decrements.push(var);
1360 }
1361 pub fn flush_pending(&mut self) {
1363 let pending = std::mem::take(&mut self.pending_decrements);
1364 for var in &pending {
1365 self.dec(var);
1366 }
1367 }
1368 pub fn can_mutate_inplace(&self, var: &str) -> bool {
1370 self.analysis.get_hint(var).can_mutate_inplace()
1371 }
1372 pub fn record_inplace_mutation(&mut self) {
1374 self.stats.record_inplace_mutation();
1375 }
1376 pub fn record_cow(&mut self) {
1378 self.stats.record_cow();
1379 }
1380 pub fn stats(&self) -> &RcStats {
1382 &self.stats
1383 }
1384 pub fn analysis(&self) -> &RcElisionAnalysis {
1386 &self.analysis
1387 }
1388 pub fn set_enabled(&mut self, enabled: bool) {
1390 self.enabled = enabled;
1391 }
1392 pub fn set_max_rc_threshold(&mut self, threshold: u32) {
1394 self.max_rc_threshold = threshold;
1395 }
1396 pub fn max_rc_threshold(&self) -> u32 {
1398 self.max_rc_threshold
1399 }
1400 pub fn reset_stats(&mut self) {
1402 self.stats.reset();
1403 }
1404}
1405pub struct Weak<T> {
1410 _marker: std::marker::PhantomData<T>,
1412 pub(super) alive: Cell<bool>,
1414 value: Option<T>,
1416}
1417impl<T: Clone> Weak<T> {
1418 pub fn from_rc(rc: &Rc<T>) -> Self {
1420 rc.inc_weak();
1421 Weak {
1422 _marker: std::marker::PhantomData,
1423 alive: Cell::new(true),
1424 value: Some(rc.inner.value.clone()),
1425 }
1426 }
1427 pub fn upgrade(&self) -> Option<Rc<T>> {
1429 if self.alive.get() {
1430 self.value.as_ref().map(|v| Rc::new(v.clone()))
1431 } else {
1432 None
1433 }
1434 }
1435 pub fn is_alive(&self) -> bool {
1437 self.alive.get()
1438 }
1439 pub fn invalidate(&self) {
1441 self.alive.set(false);
1442 }
1443}