1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
//! This module contains `EpochCounter` and `EpochId` types used for change detection.
use core::{
cell::Cell,
fmt::{self, Debug},
sync::atomic::{AtomicU64, Ordering},
};
/// Monotonically incremented epoch counter.
/// It is assumed that underlying value cannot overflow in any reasonable amount of time.
/// For this purpose only increment operation is possible and counter starts with 0.
/// If incremented every nanosecond the counter will overflow in 14'029 years.
/// Vibes tell me no currently written software will run in 14'000 years, let alone 14'029.
pub struct EpochCounter {
value: AtomicU64,
}
impl EpochCounter {
/// Returns new epoch counter.
pub const fn new() -> Self {
EpochCounter {
value: AtomicU64::new(0),
}
}
/// Returns current epoch id.
#[inline(always)]
pub fn current(&self) -> EpochId {
EpochId {
value: self.value.load(Ordering::Relaxed),
}
}
/// Returns current epoch id.
/// But faster.
#[inline(always)]
pub fn current_mut(&mut self) -> EpochId {
EpochId {
value: *self.value.get_mut(),
}
}
/// Bumps to the next epoch and returns new epoch id.
#[inline(always)]
pub fn next(&self) -> EpochId {
let old = self.value.fetch_add(1, Ordering::Relaxed);
debug_assert!(old < u64::MAX);
EpochId { value: old + 1 }
}
/// Bumps to the next epoch and returns new epoch id.
/// But faster
#[inline(always)]
pub fn next_mut(&mut self) -> EpochId {
let value = self.value.get_mut();
debug_assert!(*value < u64::MAX);
*value += 1;
EpochId { value: *value }
}
/// Bumps to the next epoch and returns new epoch id if `cond` is true.
/// Otherwise returns current epoch id.
#[inline(always)]
pub fn next_if(&self, cond: bool) -> EpochId {
if cond {
self.next()
} else {
self.current()
}
}
}
/// Epoch identifier.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
pub struct EpochId {
value: u64,
}
impl Default for EpochId {
fn default() -> Self {
EpochId::start()
}
}
impl Debug for EpochId {
#[inline(always)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
<u64 as Debug>::fmt(&self.value, f)
}
}
impl EpochId {
/// Returns id of starting epoch.
#[inline(always)]
pub const fn start() -> Self {
EpochId { value: 0 }
}
/// Returns true if this epoch comes strictly before the `other`.
#[inline(always)]
pub const fn before(&self, other: EpochId) -> bool {
self.value < other.value
}
/// Returns true if this epoch comes strictly after the `other`.
#[inline(always)]
pub const fn after(&self, other: EpochId) -> bool {
self.value > other.value
}
/// Updates epoch id to later of this and the `other`.
#[inline(always)]
pub fn update(&mut self, other: EpochId) {
self.value = self.value.max(other.value);
}
/// Bumps epoch to specified one.
/// Assumes this epoch is strictly before epoch `to`.
#[inline(always)]
pub fn bump(&mut self, to: EpochId) {
debug_assert!(
self.before(to),
"`EpochId::bump` must be used only for older epochs"
);
*self = to;
}
/// Bumps epoch to specified one.
/// Assumes this epoch is before epoch `to` or the same.
#[inline(always)]
pub fn bump_again(&mut self, to: EpochId) {
debug_assert!(
!self.after(to),
"`EpochId::bump` must be used only for older epochs"
);
*self = to;
}
/// Bumps epoch to specified one.
/// Assumes this epoch is strictly before epoch to.
#[inline(always)]
pub fn bump_cell(cell: &Cell<Self>, to: EpochId) {
debug_assert!(
!cell.get().after(to),
"`EpochId::bump_cell` must be used only for older or same epochs"
);
cell.set(to);
}
}