openentropy_core/sources/frontier/
cas_contention.rs1use crate::source::{EntropySource, Platform, SourceCategory, SourceInfo};
8use crate::sources::helpers::{mach_time, xor_fold_u64};
9
10use std::sync::Arc;
11use std::sync::atomic::{AtomicU64, Ordering};
12use std::thread;
13
14const NUM_THREADS: usize = 4;
15const NUM_TARGETS: usize = 64;
16const TARGET_SPACING: usize = 16; #[derive(Debug, Clone)]
21pub struct CASContentionConfig {
22 pub num_threads: usize,
28}
29
30impl Default for CASContentionConfig {
31 fn default() -> Self {
32 Self {
33 num_threads: NUM_THREADS,
34 }
35 }
36}
37
38pub struct CASContentionSource {
51 config: CASContentionConfig,
52}
53
54impl CASContentionSource {
55 pub fn new(config: CASContentionConfig) -> Self {
56 Self { config }
57 }
58}
59
60impl Default for CASContentionSource {
61 fn default() -> Self {
62 Self::new(CASContentionConfig::default())
63 }
64}
65
66static CAS_CONTENTION_INFO: SourceInfo = SourceInfo {
67 name: "cas_contention",
68 description: "Multi-thread atomic CAS arbitration contention jitter",
69 physics: "Spawns 4 threads performing atomic compare-and-swap operations on \
70 shared targets spread across 128-byte-aligned cache lines. The \
71 hardware coherence engine (MOESI protocol on Apple Silicon) must \
72 arbitrate concurrent exclusive-access requests. This arbitration is \
73 physically nondeterministic due to interconnect fabric latency \
74 variations, thermal state, and traffic from other cores/devices. \
75 XOR-combining timing measurements from all threads amplifies the \
76 arbitration entropy.",
77 category: SourceCategory::Microarch,
78 platform: Platform::Any,
79 requirements: &[],
80 entropy_rate_estimate: 2000.0,
81 composite: false,
82};
83
84struct ThreadResult {
85 timings: Vec<u64>,
86}
87
88impl EntropySource for CASContentionSource {
89 fn info(&self) -> &SourceInfo {
90 &CAS_CONTENTION_INFO
91 }
92
93 fn is_available(&self) -> bool {
94 true
95 }
96
97 fn collect(&self, n_samples: usize) -> Vec<u8> {
98 let samples_per_thread = n_samples * 4 + 64;
99 let nthreads = self.config.num_threads;
100
101 let total_atomics = NUM_TARGETS * TARGET_SPACING;
103 let targets: Arc<Vec<AtomicU64>> =
104 Arc::new((0..total_atomics).map(|_| AtomicU64::new(0)).collect());
105
106 let go = Arc::new(AtomicU64::new(0));
107 let stop = Arc::new(AtomicU64::new(0));
108
109 let mut handles = Vec::with_capacity(nthreads);
110
111 for thread_id in 0..nthreads {
112 let targets = targets.clone();
113 let go = go.clone();
114 let stop = stop.clone();
115 let count = samples_per_thread;
116
117 handles.push(thread::spawn(move || {
118 let mut timings = Vec::with_capacity(count);
119 let mut lcg: u64 = mach_time() ^ ((thread_id as u64) << 32) | 1;
120
121 while go.load(Ordering::Acquire) == 0 {
123 std::hint::spin_loop();
124 }
125
126 for _ in 0..count {
127 if stop.load(Ordering::Relaxed) != 0 {
128 break;
129 }
130
131 lcg = lcg.wrapping_mul(6364136223846793005).wrapping_add(1);
132 let idx = ((lcg >> 32) as usize % NUM_TARGETS) * TARGET_SPACING;
133
134 let t0 = mach_time();
135
136 let expected = targets[idx].load(Ordering::Relaxed);
137 let _ = targets[idx].compare_exchange_weak(
138 expected,
139 expected.wrapping_add(1),
140 Ordering::AcqRel,
141 Ordering::Relaxed,
142 );
143
144 let t1 = mach_time();
145 timings.push(t1.wrapping_sub(t0));
146 }
147
148 ThreadResult { timings }
149 }));
150 }
151
152 go.store(1, Ordering::Release);
154
155 let results: Vec<ThreadResult> = handles
157 .into_iter()
158 .map(|h| h.join().unwrap_or(ThreadResult { timings: vec![] }))
159 .collect();
160
161 stop.store(1, Ordering::Release);
163
164 let min_len = results.iter().map(|r| r.timings.len()).min().unwrap_or(0);
166 if min_len < 4 {
167 return Vec::new();
168 }
169
170 let mut combined: Vec<u64> = Vec::with_capacity(min_len);
171 for i in 0..min_len {
172 let mut val = 0u64;
173 for result in &results {
174 val ^= result.timings[i];
175 }
176 combined.push(val);
177 }
178
179 let deltas: Vec<u64> = combined
181 .windows(2)
182 .map(|w| w[1].wrapping_sub(w[0]))
183 .collect();
184 let xored: Vec<u64> = deltas.windows(2).map(|w| w[0] ^ w[1]).collect();
185 let mut raw: Vec<u8> = xored.iter().map(|&x| xor_fold_u64(x)).collect();
186 raw.truncate(n_samples);
187 raw
188 }
189}
190
191#[cfg(test)]
192mod tests {
193 use super::*;
194
195 #[test]
196 fn info() {
197 let src = CASContentionSource::default();
198 assert_eq!(src.info().name, "cas_contention");
199 assert!(matches!(src.info().category, SourceCategory::Microarch));
200 assert!(!src.info().composite);
201 }
202
203 #[test]
204 fn custom_config() {
205 let config = CASContentionConfig { num_threads: 2 };
206 let src = CASContentionSource::new(config);
207 assert_eq!(src.config.num_threads, 2);
208 }
209
210 #[test]
211 #[ignore] fn collects_bytes() {
213 let src = CASContentionSource::default();
214 assert!(src.is_available());
215 let data = src.collect(64);
216 assert!(!data.is_empty());
217 let unique: std::collections::HashSet<u8> = data.iter().copied().collect();
218 assert!(unique.len() > 1, "Expected variation in collected bytes");
219 }
220}