linked/
static_instance_per_thread_sync.rs

1// Copyright (c) Microsoft Corporation.
2// Copyright (c) Folo authors.
3
4use std::sync::Arc;
5use std::thread::LocalKey;
6
7/// This is the real type of variables wrapped in the [`linked::thread_local_arc!` macro][1].
8/// See macro documentation for more details.
9///
10/// Instances of this type are created by the [`linked::thread_local_arc!` macro][1],
11/// never directly by user code, which can call `.with()` or `.to_arc()`
12/// to work with or obtain a linked instance of `T`.
13///
14/// [1]: [crate::thread_local_arc]
15#[derive(Debug)]
16pub struct StaticInstancePerThreadSync<T>
17where
18    T: linked::Object + Send + Sync,
19{
20    get_storage: fn() -> &'static LocalKey<Arc<T>>,
21}
22
23impl<T> StaticInstancePerThreadSync<T>
24where
25    T: linked::Object + Send + Sync,
26{
27    /// Note: this function exists to serve the inner workings of the
28    /// `linked::thread_local_arc!` macro and should not be used directly.
29    /// It is not part of the public API and may be removed or changed at any time.
30    #[doc(hidden)]
31    #[must_use]
32    pub const fn new(get_storage: fn() -> &'static LocalKey<Arc<T>>) -> Self {
33        Self { get_storage }
34    }
35
36    /// Executes a closure with the current thread's linked instance from
37    /// the object family referenced by the static variable.
38    ///
39    /// # Example
40    ///
41    /// ```
42    /// # use std::sync::atomic::{AtomicUsize, Ordering};
43    /// # use std::sync::{Arc, Mutex};
44    /// #
45    /// # #[linked::object]
46    /// # struct MetricsCollector {
47    /// #     local_requests: AtomicUsize,
48    /// #     global_total: Arc<Mutex<usize>>,
49    /// # }
50    /// #
51    /// # impl MetricsCollector {
52    /// #     pub fn new() -> Self {
53    /// #         let global_total = Arc::new(Mutex::new(0));
54    /// #         linked::new!(Self {
55    /// #             local_requests: AtomicUsize::new(0),
56    /// #             global_total: Arc::clone(&global_total),
57    /// #         })
58    /// #     }
59    /// #     
60    /// #     pub fn record_request(&self) {
61    /// #         self.local_requests.fetch_add(1, Ordering::Relaxed);
62    /// #         *self.global_total.lock().unwrap() += 1;
63    /// #     }
64    /// #     
65    /// #     pub fn local_count(&self) -> usize {
66    /// #         self.local_requests.load(Ordering::Relaxed)
67    /// #     }
68    /// #     
69    /// #     pub fn global_count(&self) -> usize {
70    /// #         *self.global_total.lock().unwrap()
71    /// #     }
72    /// # }
73    /// use std::thread;
74    ///
75    /// linked::thread_local_arc!(static METRICS: MetricsCollector = MetricsCollector::new());
76    ///
77    /// // Use .with() for efficient access when you do not need to store the Arc
78    /// METRICS.with(|metrics| {
79    ///     metrics.record_request();
80    ///     assert_eq!(metrics.local_count(), 1);
81    ///     assert_eq!(metrics.global_count(), 1);
82    /// });
83    ///
84    /// // Multiple calls to .with() access the same thread-local instance
85    /// METRICS.with(|metrics| {
86    ///     assert_eq!(metrics.local_count(), 1); // Still 1 from previous call
87    ///     assert_eq!(metrics.global_count(), 1); // Still 1 globally
88    /// });
89    ///
90    /// // Each thread gets its own instance with fresh local state but shared global state
91    /// thread::spawn(|| {
92    ///     METRICS.with(|metrics| {
93    ///         assert_eq!(metrics.local_count(), 0); // Fresh local count for this thread
94    ///         assert_eq!(metrics.global_count(), 1); // But sees global count from main thread
95    ///         
96    ///         metrics.record_request();
97    ///         assert_eq!(metrics.local_count(), 1); // Local count incremented
98    ///         assert_eq!(metrics.global_count(), 2); // Global count now 2
99    ///     });
100    /// }).join().unwrap();
101    ///
102    /// // Back on main thread: local state unchanged, global state updated
103    /// METRICS.with(|metrics| {
104    ///     assert_eq!(metrics.local_count(), 1); // Still 1 locally
105    ///     assert_eq!(metrics.global_count(), 2); // But sees update from other thread
106    /// });
107    /// ```
108    ///
109    /// # Performance
110    ///
111    /// For repeated access to the current thread's linked instance, prefer reusing an `Arc<T>`
112    /// obtained from `.to_arc()`.
113    ///
114    /// If your code is not in a situation where it can reuse an existing `Arc<T>`, this method is
115    /// the optimal way to access the current thread's linked instance of `T`.
116    #[inline]
117    pub fn with<F, R>(&self, f: F) -> R
118    where
119        F: FnOnce(&Arc<T>) -> R,
120    {
121        (self.get_storage)().with(f)
122    }
123
124    /// Gets an `Arc<T>` to the current thread's linked instance from
125    /// the object family referenced by the static variable.
126    ///
127    /// The instance behind this `Arc` is the same one accessed by all other calls through the static
128    /// variable on this thread. Note that it is still possible to create multiple instances on a
129    /// single thread, e.g. by cloning the `T` within. The "one instance per thread" logic only
130    /// applies when the instances are accessed through the static variable.
131    ///
132    /// # Example
133    ///
134    /// ```
135    /// # use std::sync::atomic::{AtomicUsize, Ordering};
136    /// # use std::sync::{Arc, Mutex};
137    /// #
138    /// # #[linked::object]
139    /// # struct ServiceMonitor {
140    /// #     local_checks: AtomicUsize,
141    /// #     global_failures: Arc<Mutex<usize>>,
142    /// # }
143    /// #
144    /// # impl ServiceMonitor {
145    /// #     pub fn new() -> Self {
146    /// #         let global_failures = Arc::new(Mutex::new(0));
147    /// #         linked::new!(Self {
148    /// #             local_checks: AtomicUsize::new(0),
149    /// #             global_failures: Arc::clone(&global_failures),
150    /// #         })
151    /// #     }
152    /// #     
153    /// #     pub fn check_service(&self, success: bool) {
154    /// #         self.local_checks.fetch_add(1, Ordering::Relaxed);
155    /// #         if !success {
156    /// #             *self.global_failures.lock().unwrap() += 1;
157    /// #         }
158    /// #     }
159    /// #     
160    /// #     pub fn local_checks(&self) -> usize {
161    /// #         self.local_checks.load(Ordering::Relaxed)
162    /// #     }
163    /// #     
164    /// #     pub fn global_failures(&self) -> usize {
165    /// #         *self.global_failures.lock().unwrap()
166    /// #     }
167    /// # }
168    /// use std::thread;
169    ///
170    /// linked::thread_local_arc!(static MONITOR: ServiceMonitor = ServiceMonitor::new());
171    ///
172    /// // Get an Arc to reuse across multiple operations
173    /// let monitor = MONITOR.to_arc();
174    /// monitor.check_service(true);
175    /// monitor.check_service(false); // This will increment global failures
176    /// assert_eq!(monitor.local_checks(), 2);
177    /// assert_eq!(monitor.global_failures(), 1);
178    ///
179    /// // Multiple calls to to_arc() return Arc to the same instance
180    /// let monitor2 = MONITOR.to_arc();
181    /// assert_eq!(monitor2.local_checks(), 2); // Same instance as monitor
182    /// assert_eq!(monitor2.global_failures(), 1);
183    ///
184    /// // Clone the Arc for efficiency when passing around
185    /// let monitor_clone = Arc::clone(&monitor);
186    /// monitor_clone.check_service(true);
187    /// assert_eq!(monitor.local_checks(), 3);
188    /// assert_eq!(monitor.global_failures(), 1);
189    ///
190    /// // You can send the Arc to other threads (since T: Send + Sync)
191    /// let monitor_for_thread = Arc::clone(&monitor);
192    /// thread::spawn(move || {
193    ///     // This Arc still refers to the original thread's instance
194    ///     monitor_for_thread.check_service(false);
195    /// }).join().unwrap();
196    /// assert_eq!(monitor.local_checks(), 4); // Local checks on main thread: 4
197    /// assert_eq!(monitor.global_failures(), 2); // Global failures from both threads: 2
198    ///
199    /// // But each thread gets its own instance when accessing through the static
200    /// thread::spawn(|| {
201    ///     let thread_monitor = MONITOR.to_arc();
202    ///     assert_eq!(thread_monitor.local_checks(), 0); // Fresh local state
203    ///     assert_eq!(thread_monitor.global_failures(), 2); // But sees shared global state
204    ///     
205    ///     thread_monitor.check_service(false);
206    ///     assert_eq!(thread_monitor.local_checks(), 1); // Local: 1
207    ///     assert_eq!(thread_monitor.global_failures(), 3); // Global: 3
208    /// }).join().unwrap();
209    ///
210    /// // Back on main thread: local state unchanged, global state updated
211    /// assert_eq!(monitor.local_checks(), 4); // Still 4 locally
212    /// assert_eq!(monitor.global_failures(), 3); // But sees update from other thread
213    /// ```
214    ///
215    /// # Performance
216    ///
217    /// This function merely clones an `Arc`, which is relatively fast but still more work than
218    /// doing nothing. The most efficient way to access the current thread's linked instance is
219    /// to reuse the `Arc<T>` returned from this method.
220    ///
221    /// If you are not in a situation where you can reuse the `Arc<T>` and a shared reference is
222    /// satisfactory, prefer calling [`.with()`][Self::with] instead, which does not create an
223    /// `Arc` and thereby saves a few nanoseconds.
224    #[must_use]
225    #[inline]
226    pub fn to_arc(&self) -> Arc<T> {
227        (self.get_storage)().with(Arc::clone)
228    }
229}
230
231/// Declares that all static variables within the macro body
232/// contain thread-local [linked objects][crate].
233///
234/// A single instance from the same family is maintained per-thread, represented
235/// as an `Arc<T>`. This implies `T: Send + Sync`.
236///
237/// Call [`.with()`][2] to execute a closure with a shared reference to the current thread's
238/// instance of the linked object. This is the most efficient way to use the static variable,
239/// although the closure style can sometimes be cumbersome.
240///
241/// Call [`.to_arc()`][1] on the static variable to obtain a thread-specific linked instance
242/// of the object, wrapped in an `Arc`. This does not limit you to a closure. Every [`.to_arc()`][1]
243/// call returns an `Arc` for the same instance per thread (though you may clone the `T` inside to
244/// create additional instances not governed by the mechanics of this macro).
245///
246/// # Accessing linked instances
247///
248/// If you are making multiple calls from the same thread, prefer calling [`.to_arc()`][1] and
249/// reusing the returned `Arc<T>` for optimal performance.
250///
251/// If you are not making multiple calls, you may yield optimal performance by calling
252/// [`.with()`][2] to execute a closure with a shared reference to the current thread's
253/// linked instance.
254///
255/// # Example
256///
257/// ```
258/// # #[linked::object]
259/// # struct TokenCache { }
260/// # impl TokenCache { fn with_capacity(capacity: usize) -> Self { linked::new!(Self { } ) } fn get_token(&self) -> usize { 42 } }
261/// linked::thread_local_arc!(static TOKEN_CACHE: TokenCache = TokenCache::with_capacity(1000));
262///
263/// fn do_something() {
264///     // `.with()` is the most efficient way to access the instance of the current thread.
265///     let token = TOKEN_CACHE.with(|cache| cache.get_token());
266/// }
267/// ```
268///
269/// # Dynamic family relationships
270///
271/// If you need fully `Arc`-style dynamic storage (i.e. not a single static variable) then consider
272/// either passing instances of [`InstancePerThreadSync<T>`][6] between threads or using
273/// [`Family`][3] to manually control instance creation.
274///
275/// # Cross-thread usage
276///
277/// While you can pass the `Arc` returned by [`.to_arc()`][1] between threads, the object within
278/// will typically (depending on implementation choices) remain aligned to the original thread it
279/// was created on, which may lead to suboptimal performance if you try to use it on a different
280/// thread. For optimal multithreaded behavior, call `.with()` or `.to_arc()` on the thread the
281/// instance will be used on.
282///
283/// [1]: StaticInstancePerThreadSync::to_arc
284/// [2]: StaticInstancePerThreadSync::with
285/// [3]: crate::Family
286/// [5]: crate::Object
287/// [6]: crate::InstancePerThreadSync
288#[macro_export]
289macro_rules! thread_local_arc {
290    () => {};
291
292    ($(#[$attr:meta])* $vis:vis static $NAME:ident: $t:ty = $e:expr; $($rest:tt)*) => (
293        $crate::thread_local_arc!($(#[$attr])* $vis static $NAME: $t = $e);
294        $crate::thread_local_arc!($($rest)*);
295    );
296
297    ($(#[$attr:meta])* $vis:vis static $NAME:ident: $t:ty = $e:expr) => {
298        $crate::__private::paste! {
299            $crate::instances!(#[doc(hidden)] static [< $NAME _INITIALIZER >]: $t = $e;);
300
301            ::std::thread_local!(#[doc(hidden)] static [< $NAME _ARC >]: ::std::sync::Arc<$t> = ::std::sync::Arc::new([< $NAME _INITIALIZER >].get()));
302
303            $(#[$attr])* $vis const $NAME: $crate::StaticInstancePerThreadSync<$t> =
304                $crate::StaticInstancePerThreadSync::new(move || &[< $NAME _ARC >]);
305        }
306    };
307}
308
309#[cfg(test)]
310#[cfg_attr(coverage_nightly, coverage(off))]
311mod tests {
312    use std::sync::atomic::{self, AtomicUsize};
313    use std::thread;
314
315    #[linked::object]
316    struct TokenCache {
317        local_value: AtomicUsize,
318    }
319
320    impl TokenCache {
321        fn new(value: usize) -> Self {
322            linked::new!(Self {
323                local_value: AtomicUsize::new(value)
324            })
325        }
326
327        fn value(&self) -> usize {
328            self.local_value.load(atomic::Ordering::Relaxed)
329        }
330
331        fn increment(&self) {
332            self.local_value.fetch_add(1, atomic::Ordering::Relaxed);
333        }
334    }
335
336    #[test]
337    fn smoke_test() {
338        linked::thread_local_arc! {
339            static BLUE_TOKEN_CACHE: TokenCache = TokenCache::new(1000);
340            static YELLOW_TOKEN_CACHE: TokenCache = TokenCache::new(2000);
341        }
342
343        assert_eq!(BLUE_TOKEN_CACHE.to_arc().value(), 1000);
344        assert_eq!(YELLOW_TOKEN_CACHE.to_arc().value(), 2000);
345
346        BLUE_TOKEN_CACHE.with(|cache| {
347            assert_eq!(cache.value(), 1000);
348        });
349        YELLOW_TOKEN_CACHE.with(|cache| {
350            assert_eq!(cache.value(), 2000);
351        });
352
353        BLUE_TOKEN_CACHE.to_arc().increment();
354        YELLOW_TOKEN_CACHE.to_arc().increment();
355
356        assert_eq!(BLUE_TOKEN_CACHE.to_arc().value(), 1001);
357        assert_eq!(YELLOW_TOKEN_CACHE.to_arc().value(), 2001);
358
359        // Another thread gets instances aligned to the other thread.
360        thread::spawn(move || {
361            assert_eq!(BLUE_TOKEN_CACHE.to_arc().value(), 1000);
362            assert_eq!(YELLOW_TOKEN_CACHE.to_arc().value(), 2000);
363
364            BLUE_TOKEN_CACHE.to_arc().increment();
365            YELLOW_TOKEN_CACHE.to_arc().increment();
366
367            assert_eq!(BLUE_TOKEN_CACHE.to_arc().value(), 1001);
368            assert_eq!(YELLOW_TOKEN_CACHE.to_arc().value(), 2001);
369        })
370        .join()
371        .unwrap();
372
373        assert_eq!(BLUE_TOKEN_CACHE.to_arc().value(), 1001);
374        assert_eq!(YELLOW_TOKEN_CACHE.to_arc().value(), 2001);
375
376        // We can move instances to other threads and they stay aligned to the original thread.
377        let blue_cache = BLUE_TOKEN_CACHE.to_arc();
378        let yellow_cache = YELLOW_TOKEN_CACHE.to_arc();
379
380        thread::spawn(move || {
381            assert_eq!(blue_cache.value(), 1001);
382            assert_eq!(yellow_cache.value(), 2001);
383
384            blue_cache.increment();
385            yellow_cache.increment();
386
387            assert_eq!(blue_cache.value(), 1002);
388            assert_eq!(yellow_cache.value(), 2002);
389        })
390        .join()
391        .unwrap();
392    }
393}