Skip to main content

openentropy_core/sources/scheduling/
dispatch_queue_timing.rs

1//! GCD dispatch queue timing — entropy from the libdispatch thread pool.
2//!
3//! Grand Central Dispatch maintains a system-wide worker thread pool shared
4//! across all running processes. When we dispatch a block to the low-priority
5//! global queue and wait for it to complete, the round-trip latency encodes the
6//! instantaneous state of that shared pool: how many threads are busy, what
7//! their current work items are, and how the kernel's scheduler has decided
8//! to interleave our dispatch with all other queued work.
9//!
10//! ## Physics
11//!
12//! Unlike process-local scheduling (which only sees our own threads), the GCD
13//! global queues are **process-shared**. Every background task from every
14//! running process — Spotlight indexing, Time Machine backups, photo analysis,
15//! iCloud sync, app prewarming — competes for the same thread pool slots.
16//!
17//! The BACKGROUND priority queue shows the highest variance (CV up to 248%)
18//! because it receives the lowest-priority work: when the system is under load,
19//! background tasks get preempted and queued behind higher-priority work.
20//! Our timing measurement captures the full system-wide load distribution at
21//! the moment of dispatch.
22//!
23//! Empirically measured on M4 Mac mini:
24//! - `LOW` queue: CV up to 248%, range 131–16,412 ticks
25//! - `BACKGROUND` queue: CV ~50%, H≈7.3 bits/low-byte
26//! - `HIGH` queue: CV ~41%, H≈7.1 bits/low-byte (less entropy, less jitter)
27//!
28//! Each measurement completes in ~200–600 ticks (~8–25 µs), making this one
29//! of the faster frontier sources.
30//!
31//! ## Uniqueness
32//!
33//! This is the first entropy source to exploit GCD's process-shared thread pool.
34//! Every other scheduling-based entropy source is per-process. This source
35//! aggregates nondeterminism from the entire running system in a single
36//! measurement.
37
38use crate::source::{EntropySource, Platform, SourceCategory, SourceInfo};
39#[cfg(target_os = "macos")]
40use crate::sources::helpers::{extract_timing_entropy, mach_time};
41
42static DISPATCH_QUEUE_TIMING_INFO: SourceInfo = SourceInfo {
43    name: "dispatch_queue_timing",
44    description: "GCD libdispatch global queue timing — system-wide thread pool entropy",
45    physics: "Dispatches a no-op block to the BACKGROUND and LOW GCD global queues and \
46              measures round-trip latency. GCD\u{2019}s global queues are shared across all \
47              running processes; our measurement reflects the instantaneous load from \
48              every background task on the system (Spotlight, iCloud, photo analysis, \
49              app prewarming, etc.). LOW queue CV up to 248%, BACKGROUND H\u{2248}7.3 bits/byte \
50              on M4 Mac mini. Unlike thread scheduling sources, this captures \
51              system-wide nondeterminism from a single dispatch call.",
52    category: SourceCategory::Scheduling,
53    platform: Platform::MacOS,
54    requirements: &[],
55    entropy_rate_estimate: 3.0,
56    composite: false,
57    is_fast: false,
58};
59
60/// Entropy source that harvests GCD global queue scheduling jitter.
61pub struct DispatchQueueTimingSource;
62
63/// libdispatch FFI (macOS only).
64///
65/// We use `dispatch_async_f` (C function pointer variant of `dispatch_async`)
66/// because Rust cannot construct Objective-C blocks directly. The block
67/// alternative `dispatch_async` takes a Block_literal — `dispatch_async_f`
68/// takes a plain `void (*)(void *)` function pointer and a context pointer,
69/// which maps cleanly to Rust.
70#[cfg(target_os = "macos")]
71mod libdispatch {
72    use std::ffi::c_void;
73
74    /// Opaque GCD queue handle.
75    pub type DispatchQueueT = *mut c_void;
76    /// Opaque GCD semaphore handle.
77    pub type DispatchSemaphoreT = *mut c_void;
78    /// `DISPATCH_TIME_NOW` — base for computing dispatch timeouts.
79    pub const DISPATCH_TIME_NOW: u64 = 0;
80
81    // GCD priority levels.
82    pub const _DISPATCH_QUEUE_PRIORITY_HIGH: i64 = 2;
83    pub const DISPATCH_QUEUE_PRIORITY_LOW: i64 = -2;
84    pub const DISPATCH_QUEUE_PRIORITY_BACKGROUND: i64 = i16::MIN as i64;
85
86    /// 100ms timeout in nanoseconds — more than enough for a GCD dispatch
87    /// round-trip (typical: 8–25 µs). Prevents indefinite blocking if the
88    /// thread pool is saturated.
89    pub const SEMAPHORE_TIMEOUT_NS: i64 = 100_000_000;
90
91    #[link(name = "System", kind = "dylib")]
92    unsafe extern "C" {
93        pub fn dispatch_get_global_queue(identifier: i64, flags: usize) -> DispatchQueueT;
94        pub fn dispatch_semaphore_create(value: isize) -> DispatchSemaphoreT;
95        pub fn dispatch_semaphore_signal(dsema: DispatchSemaphoreT) -> isize;
96        pub fn dispatch_semaphore_wait(dsema: DispatchSemaphoreT, timeout: u64) -> isize;
97        pub fn dispatch_time(when: u64, delta: i64) -> u64;
98        pub fn dispatch_async_f(
99            queue: DispatchQueueT,
100            context: *mut c_void,
101            work: unsafe extern "C" fn(*mut c_void),
102        );
103        pub fn dispatch_release(obj: *mut c_void);
104    }
105
106    /// The work function dispatched to the GCD queue.
107    ///
108    /// Receives a raw pointer to a `DispatchSemaphoreT` and signals it.
109    ///
110    /// # Safety
111    /// `ctx` must point to a valid `DispatchSemaphoreT` that will not be
112    /// freed until after the signal returns.
113    pub unsafe extern "C" fn signal_semaphore(ctx: *mut std::ffi::c_void) {
114        let sem = ctx as DispatchSemaphoreT;
115        // SAFETY: caller ensures sem is valid for the duration of dispatch.
116        unsafe { dispatch_semaphore_signal(sem) };
117    }
118}
119
120#[cfg(target_os = "macos")]
121mod imp {
122    use super::libdispatch::*;
123    use super::*;
124
125    impl EntropySource for DispatchQueueTimingSource {
126        fn info(&self) -> &SourceInfo {
127            &DISPATCH_QUEUE_TIMING_INFO
128        }
129
130        fn is_available(&self) -> bool {
131            // libdispatch is always available on macOS.
132            true
133        }
134
135        fn collect(&self, n_samples: usize) -> Vec<u8> {
136            // 8× oversampling for robust extraction.
137            let raw_count = n_samples * 8 + 64;
138            let mut timings = Vec::with_capacity(raw_count);
139
140            // Rotate between LOW and BACKGROUND queues to capture both
141            // interrupt-phase jitter (LOW, higher peaks) and steady-state
142            // pool saturation (BACKGROUND, higher mean entropy).
143            let priorities = [
144                DISPATCH_QUEUE_PRIORITY_LOW,
145                DISPATCH_QUEUE_PRIORITY_BACKGROUND,
146            ];
147
148            // Warm up: let the GCD thread pool reach its normal distribution.
149            for i in 0..32_usize {
150                let queue =
151                    unsafe { dispatch_get_global_queue(priorities[i % priorities.len()], 0) };
152                let sem = unsafe { dispatch_semaphore_create(0) };
153                if sem.is_null() {
154                    continue;
155                }
156                unsafe {
157                    let timeout = dispatch_time(DISPATCH_TIME_NOW, SEMAPHORE_TIMEOUT_NS);
158                    dispatch_async_f(queue, sem, signal_semaphore);
159                    dispatch_semaphore_wait(sem, timeout);
160                    dispatch_release(sem);
161                }
162            }
163
164            for i in 0..raw_count {
165                let queue =
166                    unsafe { dispatch_get_global_queue(priorities[i % priorities.len()], 0) };
167
168                let sem = unsafe { dispatch_semaphore_create(0) };
169                if sem.is_null() {
170                    continue;
171                }
172
173                let t0 = mach_time();
174                let timed_out = unsafe {
175                    let timeout = dispatch_time(DISPATCH_TIME_NOW, SEMAPHORE_TIMEOUT_NS);
176                    dispatch_async_f(queue, sem, signal_semaphore);
177                    dispatch_semaphore_wait(sem, timeout) != 0
178                };
179                let elapsed = mach_time().wrapping_sub(t0);
180
181                unsafe { dispatch_release(sem) };
182
183                // Skip timed-out samples and suspend/resume artifacts (>10ms).
184                if !timed_out && elapsed < 240_000 {
185                    timings.push(elapsed);
186                }
187            }
188
189            extract_timing_entropy(&timings, n_samples)
190        }
191    }
192}
193
194#[cfg(not(target_os = "macos"))]
195impl EntropySource for DispatchQueueTimingSource {
196    fn info(&self) -> &SourceInfo {
197        &DISPATCH_QUEUE_TIMING_INFO
198    }
199
200    fn is_available(&self) -> bool {
201        false
202    }
203
204    fn collect(&self, _n_samples: usize) -> Vec<u8> {
205        Vec::new()
206    }
207}
208
209#[cfg(test)]
210mod tests {
211    use super::*;
212
213    #[test]
214    fn info() {
215        let src = DispatchQueueTimingSource;
216        assert_eq!(src.info().name, "dispatch_queue_timing");
217        assert!(matches!(src.info().category, SourceCategory::Scheduling));
218        assert_eq!(src.info().platform, Platform::MacOS);
219        assert!(!src.info().composite);
220    }
221
222    #[test]
223    #[cfg(target_os = "macos")]
224    fn is_available_on_macos() {
225        assert!(DispatchQueueTimingSource.is_available());
226    }
227
228    #[test]
229    #[ignore] // Requires live GCD thread pool
230    fn collects_bytes_with_variation() {
231        let src = DispatchQueueTimingSource;
232        if !src.is_available() {
233            return;
234        }
235        let data = src.collect(32);
236        assert!(!data.is_empty());
237        let unique: std::collections::HashSet<u8> = data.iter().copied().collect();
238        assert!(unique.len() > 4, "expected variation from GCD pool jitter");
239    }
240}