openentropy_core/sources/
novel.rs1use std::process::Command;
5use std::ptr;
6use std::sync::mpsc;
7use std::thread;
8use std::time::{Duration, Instant};
9
10use crate::source::{EntropySource, Platform, SourceCategory, SourceInfo};
11
12use super::helpers::extract_timing_entropy;
13
14static DISPATCH_QUEUE_INFO: SourceInfo = SourceInfo {
19 name: "dispatch_queue",
20 description: "Thread scheduling latency jitter from concurrent dispatch queue operations",
21 physics: "Submits blocks to GCD (Grand Central Dispatch) queues and measures scheduling \
22 latency. macOS dynamically migrates work between P-cores (performance) and \
23 E-cores (efficiency) based on thermal state and load. The migration decisions, \
24 queue priority inversions, and QoS tier scheduling create non-deterministic \
25 dispatch timing.",
26 category: SourceCategory::Scheduling,
27 platform: Platform::Any,
28 requirements: &[],
29 entropy_rate_estimate: 1500.0,
30 composite: false,
31};
32
33pub struct DispatchQueueSource;
36
37impl EntropySource for DispatchQueueSource {
38 fn info(&self) -> &SourceInfo {
39 &DISPATCH_QUEUE_INFO
40 }
41
42 fn is_available(&self) -> bool {
43 true
44 }
45
46 fn collect(&self, n_samples: usize) -> Vec<u8> {
47 let raw_count = n_samples * 10 + 64;
48 let mut timings: Vec<u64> = Vec::with_capacity(raw_count);
49
50 let num_workers = 4;
52 let mut senders: Vec<mpsc::Sender<Instant>> = Vec::with_capacity(num_workers);
53 let (result_tx, result_rx) = mpsc::channel::<u64>();
54
55 for _ in 0..num_workers {
56 let (tx, rx) = mpsc::channel::<Instant>();
57 let rtx = result_tx.clone();
58 senders.push(tx);
59
60 thread::spawn(move || {
61 while let Ok(sent_at) = rx.recv() {
62 let latency_ns = sent_at.elapsed().as_nanos() as u64;
64 if rtx.send(latency_ns).is_err() {
65 break;
66 }
67 }
68 });
69 }
70
71 for i in 0..raw_count {
73 let worker_idx = i % num_workers;
74 let sent_at = Instant::now();
75 if senders[worker_idx].send(sent_at).is_err() {
76 break;
77 }
78 match result_rx.recv() {
79 Ok(latency_ns) => timings.push(latency_ns),
80 Err(_) => break,
81 }
82 }
83
84 drop(senders);
86
87 extract_timing_entropy(&timings, n_samples)
88 }
89}
90
91const PAGE_SIZE: usize = 4096;
97
98static VM_PAGE_TIMING_INFO: SourceInfo = SourceInfo {
99 name: "vm_page_timing",
100 description: "Mach VM page fault timing jitter from mmap/munmap cycles",
101 physics: "Times Mach VM operations (mmap/munmap cycles). Each operation requires: \
102 VM map entry allocation, page table updates, TLB shootdown across cores \
103 (IPI interrupt), and physical page management. The timing depends on: \
104 VM map fragmentation, physical memory pressure, and cross-core \
105 synchronization latency.",
106 category: SourceCategory::Timing,
107 platform: Platform::Any,
108 requirements: &[],
109 entropy_rate_estimate: 1300.0,
110 composite: false,
111};
112
113pub struct VMPageTimingSource;
115
116impl EntropySource for VMPageTimingSource {
117 fn info(&self) -> &SourceInfo {
118 &VM_PAGE_TIMING_INFO
119 }
120
121 fn is_available(&self) -> bool {
122 cfg!(unix)
123 }
124
125 fn collect(&self, n_samples: usize) -> Vec<u8> {
126 let raw_count = n_samples * 10 + 64;
127 let mut timings: Vec<u64> = Vec::with_capacity(raw_count);
128
129 for _ in 0..raw_count {
130 let t0 = Instant::now();
131
132 let addr = unsafe {
135 libc::mmap(
136 ptr::null_mut(),
137 PAGE_SIZE,
138 libc::PROT_READ | libc::PROT_WRITE,
139 libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
140 -1,
141 0,
142 )
143 };
144
145 if addr == libc::MAP_FAILED {
146 continue;
147 }
148
149 unsafe {
152 ptr::write_volatile(addr as *mut u8, 0xBE);
153 ptr::write_volatile((addr as *mut u8).add(PAGE_SIZE - 1), 0xEF);
154
155 let _v = ptr::read_volatile(addr as *const u8);
157 }
158
159 unsafe {
161 libc::munmap(addr, PAGE_SIZE);
162 }
163
164 let elapsed_ns = t0.elapsed().as_nanos() as u64;
165 timings.push(elapsed_ns);
166 }
167
168 extract_timing_entropy(&timings, n_samples)
169 }
170}
171
172const SPOTLIGHT_FILES: &[&str] = &[
178 "/usr/bin/true",
179 "/usr/bin/false",
180 "/usr/bin/env",
181 "/usr/bin/which",
182];
183
184const MDLS_PATH: &str = "/usr/bin/mdls";
186
187const MDLS_TIMEOUT: Duration = Duration::from_secs(2);
189
190static SPOTLIGHT_TIMING_INFO: SourceInfo = SourceInfo {
191 name: "spotlight_timing",
192 description: "Spotlight metadata index query timing jitter via mdls",
193 physics: "Queries Spotlight\u{2019}s metadata index (mdls) and measures response time. \
194 The index is a complex B-tree/inverted index structure. Query timing depends \
195 on: index size, disk cache residency, concurrent indexing activity, and \
196 filesystem metadata state. When Spotlight is actively indexing new files, \
197 query latency becomes highly variable.",
198 category: SourceCategory::Signal,
199 platform: Platform::MacOS,
200 requirements: &[],
201 entropy_rate_estimate: 800.0,
202 composite: false,
203};
204
205pub struct SpotlightTimingSource;
207
208impl EntropySource for SpotlightTimingSource {
209 fn info(&self) -> &SourceInfo {
210 &SPOTLIGHT_TIMING_INFO
211 }
212
213 fn is_available(&self) -> bool {
214 std::path::Path::new(MDLS_PATH).exists()
215 }
216
217 fn collect(&self, n_samples: usize) -> Vec<u8> {
218 let raw_count = (n_samples * 10 + 64).min(200);
223 let mut timings: Vec<u64> = Vec::with_capacity(raw_count);
224 let file_count = SPOTLIGHT_FILES.len();
225
226 for i in 0..raw_count {
227 let file = SPOTLIGHT_FILES[i % file_count];
228
229 let t0 = Instant::now();
232
233 let child = Command::new(MDLS_PATH)
234 .args(["-name", "kMDItemFSName", file])
235 .stdout(std::process::Stdio::null())
236 .stderr(std::process::Stdio::null())
237 .spawn();
238
239 if let Ok(mut child) = child {
240 let deadline = Instant::now() + MDLS_TIMEOUT;
241 loop {
242 match child.try_wait() {
243 Ok(Some(_)) => break,
244 Ok(None) => {
245 if Instant::now() >= deadline {
246 let _ = child.kill();
247 let _ = child.wait();
248 break;
249 }
250 std::thread::sleep(Duration::from_millis(10));
251 }
252 Err(_) => break,
253 }
254 }
255 }
256
257 let elapsed_ns = t0.elapsed().as_nanos() as u64;
259 timings.push(elapsed_ns);
260 }
261
262 extract_timing_entropy(&timings, n_samples)
263 }
264}
265
266#[cfg(test)]
267mod tests {
268 use super::super::helpers::extract_lsbs_u64;
269 use super::*;
270
271 #[test]
272 fn dispatch_queue_info() {
273 let src = DispatchQueueSource;
274 assert_eq!(src.name(), "dispatch_queue");
275 assert_eq!(src.info().category, SourceCategory::Scheduling);
276 assert!((src.info().entropy_rate_estimate - 1500.0).abs() < f64::EPSILON);
277 }
278
279 #[test]
280 #[ignore] fn dispatch_queue_collects_bytes() {
282 let src = DispatchQueueSource;
283 assert!(src.is_available());
284 let data = src.collect(64);
285 assert!(!data.is_empty());
286 assert!(data.len() <= 64);
287 }
288
289 #[test]
290 fn vm_page_timing_info() {
291 let src = VMPageTimingSource;
292 assert_eq!(src.name(), "vm_page_timing");
293 assert_eq!(src.info().category, SourceCategory::Timing);
294 assert!((src.info().entropy_rate_estimate - 1300.0).abs() < f64::EPSILON);
295 }
296
297 #[test]
298 #[cfg(unix)]
299 #[ignore] fn vm_page_timing_collects_bytes() {
301 let src = VMPageTimingSource;
302 assert!(src.is_available());
303 let data = src.collect(64);
304 assert!(!data.is_empty());
305 assert!(data.len() <= 64);
306 }
307
308 #[test]
309 fn spotlight_timing_info() {
310 let src = SpotlightTimingSource;
311 assert_eq!(src.name(), "spotlight_timing");
312 assert_eq!(src.info().category, SourceCategory::Signal);
313 assert!((src.info().entropy_rate_estimate - 800.0).abs() < f64::EPSILON);
314 }
315
316 #[test]
317 #[cfg(target_os = "macos")]
318 #[ignore] fn spotlight_timing_collects_bytes() {
320 let src = SpotlightTimingSource;
321 if src.is_available() {
322 let data = src.collect(32);
323 assert!(!data.is_empty());
324 assert!(data.len() <= 32);
325 }
326 }
327
328 #[test]
329 fn extract_lsbs_packing() {
330 let deltas = vec![1u64, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0];
331 let bytes = extract_lsbs_u64(&deltas);
332 assert_eq!(bytes.len(), 2);
333 assert_eq!(bytes[0], 0xAA);
335 assert_eq!(bytes[1], 0xF0);
337 }
338}