light_qsbr/shared_manager.rs
1/// This module provides the [`SharedManager`].
2use std::sync::atomic::Ordering::{AcqRel, Relaxed};
3use orengine_utils::cache_padded::CachePaddedAtomicUsize;
4use orengine_utils::hints::unlikely;
5use orengine_utils::light_arc::LightArc;
6use crate::local_manager::{LocalManager, LOCAL_MANAGER};
7use crate::number_of_executors::NumberOfExecutorsInEpoch;
8
9/// The internal state of the [`SharedManager`].
10///
11/// This struct is reference-counted via [`LightArc`] and shared between all
12/// [`SharedManager`] instances. It contains the global epoch, the number of
13/// executors in each epoch, and (in tests) a counter of the bytes deallocated number.
14struct Inner {
15 current_epoch: CachePaddedAtomicUsize,
16 number_of_executors_in_epoch: NumberOfExecutorsInEpoch,
17 #[cfg(test)]
18 bytes_deallocated: CachePaddedAtomicUsize,
19}
20
21/// A shared manager that coordinates memory reclamation across executors.
22///
23/// The [`SharedManager`] is the central structure that keeps a number of registered executors,
24/// a number of executors that passed the current epoch, and the current epoch number.
25///
26/// Executors register themselves with the [`SharedManager`] and get
27/// a thread-local [`LocalManager`], which they use to schedule memory for deallocation or dropping.
28///
29/// The [`SharedManager`] ensures that memory is only freed when it is safe — that is,
30/// once all executors have advanced past the epoch in which the memory was retired.
31///
32/// # Usage
33///
34/// 1. Create a new [`SharedManager`] with [`SharedManager::new`].
35/// 2. For each executor (thread or runtime worker), call
36/// [`SharedManager::register_new_executor`] to install a thread-local [`LocalManager`].
37/// 3. Executors periodically call [`LocalManager::maybe_pass_epoch`] to advance epochs
38/// and allow reclamation.
39/// 4. When an executor is done, it must deregister with
40/// `unsafe { LocalManager::deregister() }`.
41///
42/// # Example
43///
44/// You can find a very detailed example in the [`LocalManager`]'s docs.
45#[derive(Clone)]
46pub struct SharedManager {
47 inner: LightArc<Inner>,
48}
49
50impl SharedManager {
51 /// Creates a new [`SharedManager`].
52 ///
53 /// This function initializes the global epoch and prepares the internal state
54 /// for executor registration.
55 pub fn new() -> Self {
56 Self {
57 inner: LightArc::new(Inner {
58 current_epoch: CachePaddedAtomicUsize::new(),
59 number_of_executors_in_epoch: NumberOfExecutorsInEpoch::new(),
60 #[cfg(test)]
61 bytes_deallocated: CachePaddedAtomicUsize::new(),
62 })
63 }
64 }
65
66 /// Increments the counter of deallocated bytes (test-only).
67 #[cfg(test)]
68 pub(crate) fn increment_bytes_deallocated(&self, bytes: usize) {
69 self.inner.bytes_deallocated.fetch_add(bytes, std::sync::atomic::Ordering::SeqCst);
70 }
71
72 /// Returns the total number of bytes deallocated since creation (test-only).
73 #[cfg(test)]
74 pub(crate) fn bytes_deallocated(&self) -> usize {
75 self.inner.bytes_deallocated.load(std::sync::atomic::Ordering::SeqCst)
76 }
77
78 /// Registers a new executor in the current thread.
79 ///
80 /// This creates and installs a thread-local [`LocalManager`] associated
81 /// with this [`SharedManager`].
82 ///
83 /// # Panics
84 ///
85 /// Panics if the current thread already has a registered [`LocalManager`].
86 pub fn register_new_executor(&self) {
87 self.inner.number_of_executors_in_epoch.register_new_executor();
88
89 LOCAL_MANAGER.with(|local_manager_| {
90 let local_manager = unsafe { &mut *local_manager_.get() };
91
92 assert!(
93 local_manager
94 .replace(LocalManager::new(self))
95 .is_none(),
96 "Attempt to register local manager in a thread that already has a local manager. \
97 Each thread can be registered only once"
98 );
99 });
100 }
101
102 /// Deregisters an executor from the current epoch.
103 ///
104 /// Returns `true` if all executors have left the current epoch, which means
105 /// the global epoch can safely be advanced.
106 pub(crate) fn deregister_executor(&self) -> bool {
107 if unlikely(
108 self
109 .inner
110 .number_of_executors_in_epoch
111 .deregister_executor_and_decrement_counter(),
112 ) {
113 self.inner.current_epoch.fetch_add(1, AcqRel);
114
115 return true;
116 }
117
118 false
119 }
120
121 /// Marks the calling executor as having passed the current epoch.
122 ///
123 /// Returns `true` if this was the last executor in the epoch, meaning
124 /// the global epoch is incremented and memory can be reclaimed.
125 pub(crate) fn executor_passed_epoch(&self) -> bool {
126 if unlikely(self.inner.number_of_executors_in_epoch.executor_passed_epoch()) {
127 // All executors passed the epoch, we can update the current epoch
128 self.inner.current_epoch.fetch_add(1, AcqRel);
129
130 return true;
131 }
132
133 false
134 }
135
136 /// Returns the current global epoch.
137 pub(crate) fn current_epoch(&self) -> usize {
138 self.inner.current_epoch.load(Relaxed)
139 }
140}
141
142impl Default for SharedManager {
143 fn default() -> Self {
144 Self::new()
145 }
146}
147
148impl Drop for Inner {
149 fn drop(&mut self) {
150 let data = self
151 .number_of_executors_in_epoch
152 .parsed_data_for_drop();
153
154 assert!(
155 data.in_current_epoch == 0 && data.all == 0,
156 "Some executors are still registered when the shared manager is dropped. \
157 Make sure to call `deregister_executor` for all registered executors \
158 (use {code}.",
159 code = "unsafe { LocalManager::deregister() }"
160 );
161 }
162}