preemptive_threads/security/
crypto_rng.rs1use crate::errors::ThreadError;
4use portable_atomic::{AtomicU64, AtomicUsize, Ordering};
5use core::arch::asm;
6
7pub struct SecureRng {
9 entropy_pool: [u64; 32],
11 pool_position: AtomicUsize,
13 bytes_generated: AtomicU64,
15 entropy_collected: AtomicU64,
17 state: ChaCha20State,
19}
20
21impl SecureRng {
22 const fn new() -> Self {
23 Self {
24 entropy_pool: [0; 32],
25 pool_position: AtomicUsize::new(0),
26 bytes_generated: AtomicU64::new(0),
27 entropy_collected: AtomicU64::new(0),
28 state: ChaCha20State::new(),
29 }
30 }
31
32 pub fn init(&mut self) -> Result<(), ThreadError> {
34 self.collect_hardware_entropy()?;
36 self.collect_timing_entropy()?;
37 self.collect_system_entropy()?;
38
39 self.state.initialize(&self.entropy_pool)?;
41
42 Ok(())
43 }
44
45 pub fn fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), ThreadError> {
47 if dest.is_empty() {
48 return Ok(());
49 }
50
51 self.state.generate_bytes(dest)?;
53 self.bytes_generated.fetch_add(dest.len() as u64, Ordering::Relaxed);
54
55 if self.bytes_generated.load(Ordering::Relaxed) % (1024 * 1024) == 0 {
57 self.reseed()?;
58 }
59
60 Ok(())
61 }
62
63 pub fn next_u64(&mut self) -> Result<u64, ThreadError> {
65 let mut bytes = [0u8; 8];
66 self.fill_bytes(&mut bytes)?;
67 Ok(u64::from_ne_bytes(bytes))
68 }
69
70 pub fn next_u32(&mut self) -> Result<u32, ThreadError> {
72 let mut bytes = [0u8; 4];
73 self.fill_bytes(&mut bytes)?;
74 Ok(u32::from_ne_bytes(bytes))
75 }
76
77 pub fn gen_range(&mut self, max: u64) -> Result<u64, ThreadError> {
79 if max == 0 {
80 return Ok(0);
81 }
82
83 let limit = u64::MAX - (u64::MAX % max);
85 loop {
86 let value = self.next_u64()?;
87 if value < limit {
88 return Ok(value % max);
89 }
90 }
91 }
92
93 fn collect_hardware_entropy(&mut self) -> Result<(), ThreadError> {
95 let mut entropy_count = 0;
96
97 #[cfg(feature = "x86_64")]
99 if is_rdrand_available() {
100 for i in 0..16 {
101 if let Some(value) = rdrand_u64() {
102 self.entropy_pool[i] ^= value;
103 entropy_count += 1;
104 }
105 }
106 }
107
108 #[cfg(feature = "x86_64")]
110 if is_rdseed_available() {
111 for i in 16..32 {
112 if let Some(value) = rdseed_u64() {
113 self.entropy_pool[i] ^= value;
114 entropy_count += 1;
115 }
116 }
117 }
118
119 #[cfg(feature = "arm64")]
121 if is_arm64_rng_available() {
122 for i in 0..16 {
123 if let Some(value) = arm64_random_u64() {
124 self.entropy_pool[i] ^= value;
125 entropy_count += 1;
126 }
127 }
128 }
129
130 self.entropy_collected.fetch_add(entropy_count, Ordering::Relaxed);
131
132 if entropy_count < 8 {
133 return Err(ThreadError::UnsupportedOperation(
134 "Insufficient hardware entropy sources".into()
135 ));
136 }
137
138 Ok(())
139 }
140
141 fn collect_timing_entropy(&mut self) -> Result<(), ThreadError> {
143 for i in 0..32 {
145 let start_time = get_cycle_count();
146
147 volatile_memory_access();
149 cache_timing_variation();
150
151 let end_time = get_cycle_count();
152 let timing_entropy = end_time.wrapping_sub(start_time);
153
154 self.entropy_pool[i] = self.entropy_pool[i]
156 .wrapping_mul(0x6c078965)
157 .wrapping_add(timing_entropy);
158 }
159
160 Ok(())
161 }
162
163 fn collect_system_entropy(&mut self) -> Result<(), ThreadError> {
165 let system_entropy = [
167 core::ptr::null::<u8>() as usize as u64, self as *const _ as usize as u64, get_cycle_count(), crate::time::get_monotonic_time().as_nanos() as u64, ];
172
173 for (i, &entropy) in system_entropy.iter().enumerate() {
174 self.entropy_pool[i % 32] ^= entropy;
175 }
176
177 Ok(())
178 }
179
180 fn reseed(&mut self) -> Result<(), ThreadError> {
182 self.collect_timing_entropy()?;
184
185 self.state.reseed(&self.entropy_pool)?;
187
188 Ok(())
189 }
190}
191
192#[repr(align(64))]
194struct ChaCha20State {
195 state: [u32; 16],
196 counter: u64,
197 initialized: bool,
198}
199
200impl ChaCha20State {
201 const fn new() -> Self {
202 Self {
203 state: [0; 16],
204 counter: 0,
205 initialized: false,
206 }
207 }
208
209 fn initialize(&mut self, entropy: &[u64; 32]) -> Result<(), ThreadError> {
211 self.state[0] = 0x61707865; self.state[1] = 0x3320646e; self.state[2] = 0x79622d32; self.state[3] = 0x6b206574; for i in 0..8 {
219 let entropy_u64 = entropy[i];
220 self.state[4 + i] = (entropy_u64 >> (32 * (i & 1))) as u32;
221 }
222
223 self.state[12] = 0; self.state[13] = 0; self.state[14] = (entropy[30] & 0xFFFFFFFF) as u32; self.state[15] = (entropy[31] & 0xFFFFFFFF) as u32; self.counter = 0;
230 self.initialized = true;
231
232 Ok(())
233 }
234
235 fn reseed(&mut self, entropy: &[u64; 32]) -> Result<(), ThreadError> {
237 if !self.initialized {
238 return self.initialize(entropy);
239 }
240
241 for i in 0..8 {
243 let entropy_u64 = entropy[i + 8];
244 self.state[4 + i] ^= (entropy_u64 >> (32 * (i & 1))) as u32;
245 }
246
247 self.state[14] ^= (entropy[30] & 0xFFFFFFFF) as u32;
249 self.state[15] ^= (entropy[31] & 0xFFFFFFFF) as u32;
250
251 Ok(())
252 }
253
254 fn generate_bytes(&mut self, dest: &mut [u8]) -> Result<(), ThreadError> {
256 if !self.initialized {
257 return Err(ThreadError::InvalidState());
258 }
259
260 let mut dest_pos = 0;
261 let mut block = [0u8; 64];
262
263 while dest_pos < dest.len() {
264 self.chacha20_block(&mut block);
266
267 let copy_len = core::cmp::min(64, dest.len() - dest_pos);
269 dest[dest_pos..dest_pos + copy_len].copy_from_slice(&block[..copy_len]);
270 dest_pos += copy_len;
271
272 self.counter = self.counter.wrapping_add(1);
274 self.state[12] = (self.counter & 0xFFFFFFFF) as u32;
275 self.state[13] = (self.counter >> 32) as u32;
276 }
277
278 Ok(())
279 }
280
281 fn chacha20_block(&self, output: &mut [u8; 64]) {
283 let mut working_state = self.state;
284
285 for _ in 0..10 {
287 quarter_round_by_indices(&mut working_state, 0, 4, 8, 12);
289 quarter_round_by_indices(&mut working_state, 1, 5, 9, 13);
290 quarter_round_by_indices(&mut working_state, 2, 6, 10, 14);
291 quarter_round_by_indices(&mut working_state, 3, 7, 11, 15);
292
293 quarter_round_by_indices(&mut working_state, 0, 5, 10, 15);
295 quarter_round_by_indices(&mut working_state, 1, 6, 11, 12);
296 quarter_round_by_indices(&mut working_state, 2, 7, 8, 13);
297 quarter_round_by_indices(&mut working_state, 3, 4, 9, 14);
298 }
299
300 for i in 0..16 {
302 working_state[i] = working_state[i].wrapping_add(self.state[i]);
303 }
304
305 for (i, &word) in working_state.iter().enumerate() {
307 let bytes = word.to_le_bytes();
308 output[i * 4..(i + 1) * 4].copy_from_slice(&bytes);
309 }
310 }
311}
312
313fn quarter_round(a: &mut u32, b: &mut u32, c: &mut u32, d: &mut u32) {
315 *a = a.wrapping_add(*b);
316 *d ^= *a;
317 *d = d.rotate_left(16);
318
319 *c = c.wrapping_add(*d);
320 *b ^= *c;
321 *b = b.rotate_left(12);
322
323 *a = a.wrapping_add(*b);
324 *d ^= *a;
325 *d = d.rotate_left(8);
326
327 *c = c.wrapping_add(*d);
328 *b ^= *c;
329 *b = b.rotate_left(7);
330}
331
332fn quarter_round_by_indices(state: &mut [u32; 16], a: usize, b: usize, c: usize, d: usize) {
334 state[a] = state[a].wrapping_add(state[b]);
335 state[d] ^= state[a];
336 state[d] = state[d].rotate_left(16);
337
338 state[c] = state[c].wrapping_add(state[d]);
339 state[b] ^= state[c];
340 state[b] = state[b].rotate_left(12);
341
342 state[a] = state[a].wrapping_add(state[b]);
343 state[d] ^= state[a];
344 state[d] = state[d].rotate_left(8);
345
346 state[c] = state[c].wrapping_add(state[d]);
347 state[b] ^= state[c];
348 state[b] = state[b].rotate_left(7);
349}
350
351static mut SECURE_RNG: SecureRng = SecureRng::new();
353
354#[cfg(feature = "x86_64")]
357fn is_rdrand_available() -> bool {
358 let cpuid_result: u32;
360 unsafe {
361 asm!(
362 "mov eax, 1",
363 "cpuid",
364 "mov {}, ecx",
365 out(reg) cpuid_result,
366 out("eax") _,
367 out("ebx") _,
368 out("ecx") _,
369 out("edx") _,
370 );
371 }
372 (cpuid_result & (1 << 30)) != 0
373}
374
375#[cfg(feature = "x86_64")]
376fn is_rdseed_available() -> bool {
377 let cpuid_result: u32;
379 unsafe {
380 asm!(
381 "mov eax, 7",
382 "mov ecx, 0",
383 "cpuid",
384 "mov {}, ebx",
385 out(reg) cpuid_result,
386 out("eax") _,
387 out("ebx") _,
388 out("ecx") _,
389 out("edx") _,
390 );
391 }
392 (cpuid_result & (1 << 18)) != 0
393}
394
395#[cfg(feature = "x86_64")]
396fn rdrand_u64() -> Option<u64> {
397 let mut value: u64;
398 let success: u8;
399
400 unsafe {
401 asm!(
402 "rdrand {}",
403 "setc {}",
404 out(reg) value,
405 out(reg_byte) success,
406 options(nomem, nostack)
407 );
408 }
409
410 if success != 0 {
411 Some(value)
412 } else {
413 None
414 }
415}
416
417#[cfg(feature = "x86_64")]
418fn rdseed_u64() -> Option<u64> {
419 let mut value: u64;
420 let success: u8;
421
422 unsafe {
423 asm!(
424 "rdseed {}",
425 "setc {}",
426 out(reg) value,
427 out(reg_byte) success,
428 options(nomem, nostack)
429 );
430 }
431
432 if success != 0 {
433 Some(value)
434 } else {
435 None
436 }
437}
438
439#[cfg(feature = "arm64")]
440fn is_arm64_rng_available() -> bool {
441 true
444}
445
446#[cfg(feature = "arm64")]
447fn arm64_random_u64() -> Option<u64> {
448 Some(get_cycle_count())
451}
452
453fn get_cycle_count() -> u64 {
455 #[cfg(feature = "x86_64")]
456 unsafe {
457 let low: u32;
458 let high: u32;
459 asm!(
460 "rdtsc",
461 out("eax") low,
462 out("edx") high,
463 options(nomem, nostack)
464 );
465 ((high as u64) << 32) | (low as u64)
466 }
467
468 #[cfg(feature = "arm64")]
469 unsafe {
470 let counter: u64;
471 asm!(
472 "mrs {}, cntvct_el0",
473 out(reg) counter,
474 options(nomem, nostack)
475 );
476 counter
477 }
478
479 #[cfg(not(any(feature = "x86_64", feature = "arm64")))]
480 {
481 crate::time::get_monotonic_time().as_nanos() as u64
482 }
483}
484
485fn volatile_memory_access() {
487 static mut DUMMY: [u8; 64] = [0; 64];
488
489 unsafe {
490 for i in 0..64 {
491 core::ptr::write_volatile(&mut DUMMY[i], i as u8);
492 }
493 }
494}
495
496fn cache_timing_variation() {
498 static mut CACHE_DATA: [u64; 256] = [0; 256];
500
501 unsafe {
502 let index = (get_cycle_count() as usize) % 256;
503 core::ptr::read_volatile(&CACHE_DATA[index]);
504 }
505}
506
507#[derive(Debug, Clone)]
509pub struct SecureRngStats {
510 pub bytes_generated: u64,
511 pub entropy_collected: u64,
512 pub initialized: bool,
513}
514
515pub fn init_secure_rng() -> Result<(), ThreadError> {
517 unsafe {
518 SECURE_RNG.init()?;
519 }
520
521 Ok(())
523}
524
525pub fn secure_random_bytes(dest: &mut [u8]) -> Result<(), ThreadError> {
527 unsafe {
528 SECURE_RNG.fill_bytes(dest)
529 }
530}
531
532pub fn secure_random_u64() -> Result<u64, ThreadError> {
534 unsafe {
535 SECURE_RNG.next_u64()
536 }
537}
538
539pub fn secure_random_u32() -> Result<u32, ThreadError> {
541 unsafe {
542 SECURE_RNG.next_u32()
543 }
544}
545
546pub fn secure_random_range(max: u64) -> Result<u64, ThreadError> {
548 unsafe {
549 SECURE_RNG.gen_range(max)
550 }
551}
552
553pub fn get_secure_rng_stats() -> SecureRngStats {
555 unsafe {
556 SecureRngStats {
557 bytes_generated: SECURE_RNG.bytes_generated.load(Ordering::Relaxed),
558 entropy_collected: SECURE_RNG.entropy_collected.load(Ordering::Relaxed),
559 initialized: SECURE_RNG.state.initialized,
560 }
561 }
562}