Skip to main content

kovan/
reclaim.rs

1//! Memory reclamation trait and implementation
2
3use crate::retired::RetiredNode;
4use alloc::boxed::Box;
5
6/// Trait for types that can be reclaimed
7///
8/// Types implementing this trait can be safely retired and reclaimed
9/// by the memory reclamation system.
10/// # Safety
11/// Implementors must ensure that `reclaim` is safe to call when the object is no longer reachable.
12pub unsafe trait Reclaimable: Sized {
13    /// Get a reference to the embedded RetiredNode
14    fn retired_node(&self) -> &RetiredNode;
15
16    /// Get a mutable reference to the embedded RetiredNode
17    fn retired_node_mut(&mut self) -> &mut RetiredNode;
18
19    /// Deallocate this node
20    ///
21    /// # Safety
22    ///
23    /// This must only be called once, when the node is no longer accessible
24    unsafe fn dealloc(ptr: *mut Self) {
25        // Default implementation: drop via Box
26        // SAFETY: Caller guarantees ptr is valid and this is called once
27        unsafe {
28            drop(Box::from_raw(ptr));
29        }
30    }
31}
32
33/// Adjust reference count of a batch
34///
35/// # Safety
36///
37/// - node_ptr must point to valid RetiredNode
38/// - The batch must still be valid
39pub(crate) unsafe fn adjust_refs(node_ptr: usize, delta: isize) {
40    if node_ptr == 0 {
41        return;
42    }
43
44    let node = unsafe { &*(node_ptr as *const RetiredNode) };
45
46    // Check for null nref_ptr
47    if node.nref_ptr.is_null() {
48        return;
49    }
50
51    let nref_node = unsafe { &*node.nref_ptr };
52
53    // Handle increment vs decrement separately to avoid race conditions
54    if delta < 0 {
55        // Decrement: use fetch_sub and check if we're the one who brought it to zero
56        let prev = nref_node
57            .nref
58            .fetch_sub(delta.abs(), core::sync::atomic::Ordering::AcqRel);
59
60        // Only free if we were the one who brought it from delta.abs() to 0
61        if prev == delta.abs() {
62            // SAFETY: NRef reached 0, so all threads that could see batch have left
63            unsafe {
64                let nref_node = &*node.nref_ptr;
65
66                // Call the type-erased destructor to free all nodes in batch
67                let destructor = nref_node.destructor;
68                let mut curr = nref_node.batch_first;
69
70                while !curr.is_null() {
71                    let next = (*curr).batch_next;
72                    destructor(curr);
73                    curr = next;
74                }
75
76                // Free the NRefNode itself
77                drop(Box::from_raw(node.nref_ptr));
78            }
79        }
80    } else {
81        // Increment: just add, no free needed
82        nref_node
83            .nref
84            .fetch_add(delta, core::sync::atomic::Ordering::AcqRel);
85    }
86}
87
88/// Traverse retirement list and decrement references
89///
90/// # Safety
91///
92/// - start must point to valid RetiredNode or be null
93/// - stop is where to stop traversing (exclusive) - use 0 to traverse all
94/// - All nodes in list must still be valid
95#[allow(unused_variables)]
96pub(crate) unsafe fn traverse_and_decrement(start: usize, stop: usize, slot: usize) {
97    let mut curr = start as *mut RetiredNode;
98    #[cfg(feature = "robust")]
99    let mut count = 0usize;
100
101    while !curr.is_null() && curr as usize != stop {
102        let node = unsafe { &*curr };
103
104        // Check for null nref_ptr before dereferencing
105        if !node.nref_ptr.is_null() {
106            let nref_node = unsafe { &*node.nref_ptr };
107
108            // Atomic decrement
109            let prev_nref = nref_node
110                .nref
111                .fetch_sub(1, core::sync::atomic::Ordering::AcqRel);
112            #[cfg(feature = "robust")]
113            {
114                count += 1;
115            }
116
117            // If reaches zero, free entire batch
118            if prev_nref == 1 {
119                // SAFETY: NRef reached 0, all threads have left
120                unsafe {
121                    let nref_node = &*node.nref_ptr;
122
123                    // Call the type-erased destructor to free all nodes in batch
124                    let destructor = nref_node.destructor;
125                    let mut curr = nref_node.batch_first;
126
127                    while !curr.is_null() {
128                        let next = (*curr).batch_next;
129                        destructor(curr);
130                        curr = next;
131                    }
132
133                    // Free the NRefNode itself
134                    drop(Box::from_raw(node.nref_ptr));
135                }
136            }
137        }
138
139        // Move to next node in list
140        curr = node.smr_next;
141    }
142
143    // Decrement ack counter for robustness
144    #[cfg(feature = "robust")]
145    if count > 0 {
146        let global = crate::slot::global();
147        global
148            .slot(slot)
149            .ack_counter
150            .fetch_sub(count as isize, core::sync::atomic::Ordering::Relaxed);
151    }
152}