Skip to main content

openentropy_core/sources/frontier/
fsync_journal.rs

1//! Filesystem journal commit timing — full storage stack entropy.
2//!
3//! APFS uses copy-on-write with a journal. Each fsync crosses:
4//!   CPU → filesystem → NVMe controller → NAND flash → back
5//!
6//! Each layer adds independent noise:
7//! - Checksum computation (CPU pipeline state)
8//! - NVMe command queuing and arbitration
9//! - Flash cell program timing (temperature-dependent)
10//! - B-tree update (memory allocation nondeterminism)
11//! - Barrier flush (controller firmware scheduling)
12//!
13//! Different from disk_io because this specifically measures the full
14//! journal commit path, not just raw block reads.
15//!
16
17use std::io::Write;
18
19use crate::source::{EntropySource, Platform, SourceCategory, SourceInfo};
20use crate::sources::helpers::extract_timing_entropy;
21
22static FSYNC_JOURNAL_INFO: SourceInfo = SourceInfo {
23    name: "fsync_journal",
24    description: "APFS journal commit timing from full storage stack traversal",
25    physics: "Creates a file, writes data, and calls fsync to force a full journal commit. \
26              Each commit traverses the entire storage stack: CPU \u{2192} APFS filesystem \
27              (B-tree update, copy-on-write allocation, checksum) \u{2192} NVMe controller \
28              (command queuing, arbitration) \u{2192} NAND flash (cell programming, charge \
29              injection timing, wear-dependent oxide characteristics) \u{2192} barrier flush \
30              (controller firmware scheduling). Every layer contributes independent \
31              timing noise from physically distinct sources.",
32    category: SourceCategory::IO,
33    platform: Platform::Any,
34    requirements: &[],
35    entropy_rate_estimate: 2000.0,
36    composite: false,
37};
38
39/// Entropy source from filesystem journal commit timing.
40pub struct FsyncJournalSource;
41
42impl EntropySource for FsyncJournalSource {
43    fn info(&self) -> &SourceInfo {
44        &FSYNC_JOURNAL_INFO
45    }
46
47    fn is_available(&self) -> bool {
48        true
49    }
50
51    fn collect(&self, n_samples: usize) -> Vec<u8> {
52        let raw_count = n_samples * 4 + 64;
53        let mut timings: Vec<u64> = Vec::with_capacity(raw_count);
54        let write_data = [0xAAu8; 512];
55
56        for i in 0..raw_count {
57            // Create a new temp file each iteration to exercise the full
58            // APFS allocation + B-tree insert + journal commit path.
59            let mut tmpfile = match tempfile::NamedTempFile::new() {
60                Ok(f) => f,
61                Err(_) => continue,
62            };
63
64            // Vary the first bytes to prevent APFS deduplication.
65            let mut buf = write_data;
66            buf[0] = (i & 0xFF) as u8;
67            buf[1] = ((i >> 8) & 0xFF) as u8;
68
69            let t0 = std::time::Instant::now();
70            if tmpfile.write_all(&buf).is_err() {
71                continue;
72            }
73            if tmpfile.flush().is_err() {
74                continue;
75            }
76            // fsync forces the full journal commit.
77            let file = tmpfile.as_file();
78            if file.sync_all().is_err() {
79                continue;
80            }
81            let elapsed = t0.elapsed();
82
83            timings.push(elapsed.as_nanos() as u64);
84            // tmpfile is automatically deleted on drop.
85        }
86
87        extract_timing_entropy(&timings, n_samples)
88    }
89}
90
91#[cfg(test)]
92mod tests {
93    use super::*;
94
95    #[test]
96    fn info() {
97        let src = FsyncJournalSource;
98        assert_eq!(src.name(), "fsync_journal");
99        assert_eq!(src.info().category, SourceCategory::IO);
100        assert!(!src.info().composite);
101    }
102
103    #[test]
104    #[ignore] // I/O dependent
105    fn collects_bytes() {
106        let src = FsyncJournalSource;
107        assert!(src.is_available());
108        let data = src.collect(64);
109        assert!(!data.is_empty());
110        assert!(data.len() <= 64);
111    }
112}