openentropy_core/sources/
silicon.rs1use rand::Rng;
6
7use crate::source::{EntropySource, SourceCategory, SourceInfo};
8
9use super::helpers::{extract_timing_entropy, mach_time};
10
11pub struct DRAMRowBufferSource;
21
22static DRAM_ROW_BUFFER_INFO: SourceInfo = SourceInfo {
23 name: "dram_row_buffer",
24 description: "DRAM row buffer hit/miss timing from random memory accesses",
25 physics: "Measures DRAM row buffer hit/miss timing by accessing different memory rows. \
26 DRAM is organized into rows of capacitor cells. Accessing an open row (hit) \
27 is fast; accessing a different row requires precharge + activate (miss), \
28 which is slower. The exact timing depends on: physical address mapping, \
29 row buffer state from ALL system activity, memory controller scheduling, \
30 and DRAM refresh interference.",
31 category: SourceCategory::Silicon,
32 platform_requirements: &[],
33 entropy_rate_estimate: 3000.0,
34 composite: false,
35};
36
37impl EntropySource for DRAMRowBufferSource {
38 fn info(&self) -> &SourceInfo {
39 &DRAM_ROW_BUFFER_INFO
40 }
41
42 fn is_available(&self) -> bool {
43 true
44 }
45
46 fn collect(&self, n_samples: usize) -> Vec<u8> {
47 const BUF_SIZE: usize = 32 * 1024 * 1024; let num_accesses = n_samples * 4 + 64;
52
53 let mut buffer: Vec<u8> = vec![0u8; BUF_SIZE];
55 for i in (0..BUF_SIZE).step_by(4096) {
56 buffer[i] = i as u8;
57 }
58
59 let mut rng = rand::rng();
60 let mut timings = Vec::with_capacity(num_accesses);
61
62 for _ in 0..num_accesses {
63 let idx1 = rng.random_range(0..BUF_SIZE);
66 let idx2 = rng.random_range(0..BUF_SIZE);
67
68 let t0 = mach_time();
69 let _v1 = unsafe { std::ptr::read_volatile(&buffer[idx1]) };
72 let _v2 = unsafe { std::ptr::read_volatile(&buffer[idx2]) };
73 let t1 = mach_time();
74
75 timings.push(t1.wrapping_sub(t0));
76 }
77
78 std::hint::black_box(&buffer);
80
81 extract_timing_entropy(&timings, n_samples)
82 }
83}
84
85pub struct CacheContentionSource;
95
96static CACHE_CONTENTION_INFO: SourceInfo = SourceInfo {
97 name: "cache_contention",
98 description: "L1/L2 cache contention timing from alternating access patterns",
99 physics: "Measures L1/L2 cache miss patterns by alternating access patterns. Cache \
100 timing depends on what every other process and hardware unit is doing \
101 \u{2014} the cache is a shared resource whose state is fundamentally \
102 unpredictable. A cache miss requires main memory access (100+ ns vs \
103 1 ns for L1 hit).",
104 category: SourceCategory::Silicon,
105 platform_requirements: &[],
106 entropy_rate_estimate: 2500.0,
107 composite: false,
108};
109
110impl EntropySource for CacheContentionSource {
111 fn info(&self) -> &SourceInfo {
112 &CACHE_CONTENTION_INFO
113 }
114
115 fn is_available(&self) -> bool {
116 true
117 }
118
119 fn collect(&self, n_samples: usize) -> Vec<u8> {
120 const BUF_SIZE: usize = 8 * 1024 * 1024; let mut buffer: Vec<u8> = vec![0u8; BUF_SIZE];
123 for i in (0..BUF_SIZE).step_by(4096) {
125 buffer[i] = i as u8;
126 }
127
128 let num_rounds = n_samples * 4 + 64;
130 let mut rng = rand::rng();
131 let mut timings = Vec::with_capacity(num_rounds);
132
133 for round in 0..num_rounds {
134 let t0 = mach_time();
135
136 match round % 3 {
139 0 => {
140 let start = rng.random_range(0..BUF_SIZE.saturating_sub(512));
142 let mut sink: u8 = 0;
143 for offset in 0..512 {
144 sink ^= unsafe { std::ptr::read_volatile(&buffer[start + offset]) };
146 }
147 std::hint::black_box(sink);
148 }
149 1 => {
150 let mut sink: u8 = 0;
152 for _ in 0..512 {
153 let idx = rng.random_range(0..BUF_SIZE);
154 sink ^= unsafe { std::ptr::read_volatile(&buffer[idx]) };
156 }
157 std::hint::black_box(sink);
158 }
159 _ => {
160 let start = rng.random_range(0..BUF_SIZE.saturating_sub(512 * 64));
162 let mut sink: u8 = 0;
163 for i in 0..512 {
164 sink ^= unsafe { std::ptr::read_volatile(&buffer[start + i * 64]) };
166 }
167 std::hint::black_box(sink);
168 }
169 }
170
171 let t1 = mach_time();
172 timings.push(t1.wrapping_sub(t0));
173 }
174
175 std::hint::black_box(&buffer);
176
177 extract_timing_entropy(&timings, n_samples)
178 }
179}
180
181pub struct PageFaultTimingSource;
190
191static PAGE_FAULT_TIMING_INFO: SourceInfo = SourceInfo {
192 name: "page_fault_timing",
193 description: "Minor page fault timing via mmap/munmap cycles",
194 physics: "Triggers and times minor page faults via mmap/munmap. Page fault resolution \
195 requires: TLB lookup, hardware page table walk (up to 4 levels on ARM64), \
196 physical page allocation from the kernel free list, and zero-fill for \
197 security. The timing depends on physical memory fragmentation.",
198 category: SourceCategory::Silicon,
199 platform_requirements: &[],
200 entropy_rate_estimate: 1500.0,
201 composite: false,
202};
203
204impl EntropySource for PageFaultTimingSource {
205 fn info(&self) -> &SourceInfo {
206 &PAGE_FAULT_TIMING_INFO
207 }
208
209 fn is_available(&self) -> bool {
210 true
211 }
212
213 fn collect(&self, n_samples: usize) -> Vec<u8> {
214 let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
216 let num_pages: usize = 8;
217 let map_size = page_size * num_pages;
218
219 let num_cycles = (n_samples * 4 / num_pages) + 4;
221
222 let mut timings = Vec::with_capacity(num_cycles * num_pages);
223
224 for _ in 0..num_cycles {
225 let addr = unsafe {
228 libc::mmap(
229 std::ptr::null_mut(),
230 map_size,
231 libc::PROT_READ | libc::PROT_WRITE,
232 libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
233 -1,
234 0,
235 )
236 };
237
238 if addr == libc::MAP_FAILED {
239 continue;
240 }
241
242 for p in 0..num_pages {
245 let page_ptr = unsafe { (addr as *mut u8).add(p * page_size) };
248
249 let t0 = mach_time();
250 unsafe {
253 std::ptr::write_volatile(page_ptr, 0xAA);
254 let _v = std::ptr::read_volatile(page_ptr);
255 }
256 let t1 = mach_time();
257
258 timings.push(t1.wrapping_sub(t0));
259 }
260
261 unsafe {
263 libc::munmap(addr, map_size);
264 }
265 }
266
267 extract_timing_entropy(&timings, n_samples)
268 }
269}
270
271pub struct SpeculativeExecutionSource;
282
283static SPECULATIVE_EXECUTION_INFO: SourceInfo = SourceInfo {
284 name: "speculative_execution",
285 description: "Branch predictor state timing via data-dependent branches",
286 physics: "Measures timing variations from the CPU's speculative execution engine. \
287 The branch predictor maintains per-address history that depends on ALL \
288 previously executed code. Mispredictions cause pipeline flushes (~15 cycle \
289 penalty on M4). By running data-dependent branches and measuring timing, \
290 we capture the predictor's internal state.",
291 category: SourceCategory::Silicon,
292 platform_requirements: &[],
293 entropy_rate_estimate: 2000.0,
294 composite: false,
295};
296
297impl EntropySource for SpeculativeExecutionSource {
298 fn info(&self) -> &SourceInfo {
299 &SPECULATIVE_EXECUTION_INFO
300 }
301
302 fn is_available(&self) -> bool {
303 true
304 }
305
306 fn collect(&self, n_samples: usize) -> Vec<u8> {
307 let num_batches = n_samples * 4 + 64;
309 let mut timings = Vec::with_capacity(num_batches);
310
311 let mut lcg_state: u64 = mach_time() ^ 0xDEAD_BEEF_CAFE_BABE;
314
315 for batch_idx in 0..num_batches {
316 let batch_size = 10 + (batch_idx % 31);
319
320 let t0 = mach_time();
321
322 let mut accumulator: u64 = 0;
325 for _ in 0..batch_size {
326 lcg_state = lcg_state
328 .wrapping_mul(6364136223846793005)
329 .wrapping_add(1442695040888963407);
330
331 if lcg_state & 0x8000_0000 != 0 {
333 accumulator = accumulator.wrapping_add(lcg_state);
334 } else {
335 accumulator = accumulator.wrapping_mul(lcg_state | 1);
336 }
337
338 if (lcg_state >> 16) & 0xFF > 128 {
340 accumulator ^= lcg_state.rotate_left(7);
341 } else {
342 accumulator ^= lcg_state.rotate_right(11);
343 }
344
345 if (lcg_state >> 32) & 0x1 != 0 {
347 accumulator = accumulator.wrapping_add(batch_idx as u64);
348 }
349 }
350
351 std::hint::black_box(accumulator);
353
354 let t1 = mach_time();
355 timings.push(t1.wrapping_sub(t0));
356 }
357
358 extract_timing_entropy(&timings, n_samples)
359 }
360}
361
362#[cfg(test)]
367mod tests {
368 use super::*;
369
370 #[test]
371 #[ignore] fn dram_row_buffer_collects_bytes() {
373 let src = DRAMRowBufferSource;
374 assert!(src.is_available());
375 let data = src.collect(128);
376 assert!(!data.is_empty());
377 assert!(data.len() <= 128);
378 if data.len() > 1 {
380 let first = data[0];
381 assert!(data.iter().any(|&b| b != first), "all bytes were identical");
382 }
383 }
384
385 #[test]
386 #[ignore] fn cache_contention_collects_bytes() {
388 let src = CacheContentionSource;
389 assert!(src.is_available());
390 let data = src.collect(128);
391 assert!(!data.is_empty());
392 assert!(data.len() <= 128);
393 if data.len() > 1 {
394 let first = data[0];
395 assert!(data.iter().any(|&b| b != first), "all bytes were identical");
396 }
397 }
398
399 #[test]
400 #[ignore] fn page_fault_timing_collects_bytes() {
402 let src = PageFaultTimingSource;
403 assert!(src.is_available());
404 let data = src.collect(64);
405 assert!(!data.is_empty());
406 assert!(data.len() <= 64);
407 }
408
409 #[test]
410 #[ignore] fn speculative_execution_collects_bytes() {
412 let src = SpeculativeExecutionSource;
413 assert!(src.is_available());
414 let data = src.collect(128);
415 assert!(!data.is_empty());
416 assert!(data.len() <= 128);
417 if data.len() > 1 {
418 let first = data[0];
419 assert!(data.iter().any(|&b| b != first), "all bytes were identical");
420 }
421 }
422
423 #[test]
424 fn source_info_categories() {
425 assert_eq!(DRAMRowBufferSource.info().category, SourceCategory::Silicon);
426 assert_eq!(
427 CacheContentionSource.info().category,
428 SourceCategory::Silicon
429 );
430 assert_eq!(
431 PageFaultTimingSource.info().category,
432 SourceCategory::Silicon
433 );
434 assert_eq!(
435 SpeculativeExecutionSource.info().category,
436 SourceCategory::Silicon
437 );
438 }
439
440 #[test]
441 fn source_info_names() {
442 assert_eq!(DRAMRowBufferSource.name(), "dram_row_buffer");
443 assert_eq!(CacheContentionSource.name(), "cache_contention");
444 assert_eq!(PageFaultTimingSource.name(), "page_fault_timing");
445 assert_eq!(SpeculativeExecutionSource.name(), "speculative_execution");
446 }
447}