1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
//! This module exists because `lazy_static` causes TSAN to
//! be very unhappy. We rely heavily on TSAN for finding
//! races, so we don't use `lazy_static`.
use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering::SeqCst};
/// A lazily initialized value
pub struct Lazy<T, F> {
value: AtomicPtr<T>,
init_mu: AtomicBool,
init: F,
}
impl<T, F> Lazy<T, F> {
/// Create a new Lazy
pub const fn new(init: F) -> Self
where
F: Sized,
{
Self {
value: AtomicPtr::new(std::ptr::null_mut()),
init_mu: AtomicBool::new(false),
init,
}
}
}
impl<T, F> Drop for Lazy<T, F> {
fn drop(&mut self) {
let value_ptr = self.value.load(SeqCst);
if !value_ptr.is_null() {
#[allow(unsafe_code)]
unsafe {
drop(Box::from_raw(value_ptr))
}
}
}
}
impl<T, F> std::ops::Deref for Lazy<T, F>
where
F: Fn() -> T,
{
type Target = T;
fn deref(&self) -> &T {
{
let value_ptr = self.value.load(SeqCst);
if !value_ptr.is_null() {
#[allow(unsafe_code)]
unsafe {
return &*value_ptr;
}
}
}
// compare_and_swap returns the last value on success,
// or the current value on failure. We want to keep
// looping as long as it returns true, so we don't need
// any explicit conversion here.
while self.init_mu.compare_and_swap(false, true, SeqCst) {}
{
let value_ptr = self.value.load(SeqCst);
// we need to check this again because
// maybe some other thread completed
// the initialization already.
if !value_ptr.is_null() {
let unlock = self.init_mu.swap(false, SeqCst);
assert!(unlock);
#[allow(unsafe_code)]
unsafe {
return &*value_ptr;
}
}
}
{
let value = (self.init)();
let value_ptr = Box::into_raw(Box::new(value));
self.value.store(value_ptr, SeqCst);
let unlock = self.init_mu.swap(false, SeqCst);
assert!(unlock);
#[allow(unsafe_code)]
unsafe {
&*value_ptr
}
}
}
}