use std::collections::VecDeque;
use std::sync::Arc;
use tokio::sync::Mutex;
use tokio::time::{interval, Duration};
use tracing::{debug, info, warn};
use sha2::{Digest, Sha256};
use zeroize::Zeroize;
use crate::constant_time::ct_eq;
use chacha20::{ChaCha20, cipher::{KeyIvInit, StreamCipher}};
const MIN_BUFFER_SIZE: usize = 1024 * 256;
const TARGET_BUFFER_SIZE: usize = 1024 * 1024;
#[allow(dead_code)]
const FETCH_CHUNK_SIZE: usize = 1024 * 32;
const MIN_ENTROPY_QUALITY: f64 = 0.95;
fn validate_entropy_quality(bytes: &[u8]) -> bool {
if bytes.is_empty() {
debug!("Entropy validation failed: empty bytes");
return false;
}
if bytes.len() < 32 {
if ct_is_zero(bytes) {
warn!("Entropy is all zeros");
return false;
}
let all_ones = bytes.iter().all(|&b| b == 0xFF);
if all_ones {
warn!("Entropy is all ones");
return false;
}
debug!("Small sample passed basic checks");
return true;
}
let mut byte_counts = [0u32; 256];
for &byte in bytes {
byte_counts[byte as usize] += 1;
}
if bytes.len() >= 256 {
let expected_count = bytes.len() as f64 / 256.0;
let mut chi_square = 0.0;
for &count in &byte_counts {
if count > 0 {
let diff = count as f64 - expected_count;
chi_square += (diff * diff) / expected_count;
}
}
debug!("Chi-square test result: {}", chi_square);
if chi_square < 100.0 || chi_square > 500.0 {
warn!("Entropy failed chi-square test: {}", chi_square);
return false;
}
}
let mut repeated_bytes = 0;
for i in 1..bytes.len() {
if ct_eq(&[bytes[i]], &[bytes[i-1]]) {
repeated_bytes += 1;
}
}
let repeat_ratio = repeated_bytes as f64 / bytes.len() as f64;
debug!("Repeat ratio: {:.2}%", repeat_ratio * 100.0);
if repeat_ratio > 0.05 {
warn!("Entropy has too many repeated bytes: {:.2}%", repeat_ratio * 100.0);
return false;
}
if bytes.len() >= 64 {
let mut shannon_entropy = 0.0;
for &count in &byte_counts {
if count > 0 {
let probability = count as f64 / bytes.len() as f64;
shannon_entropy -= probability * probability.log2();
}
}
debug!("Shannon entropy: {:.2} bits", shannon_entropy);
if shannon_entropy < 7.5 {
warn!("Entropy has insufficient Shannon entropy: {:.2} bits", shannon_entropy);
return false;
}
}
true
}
fn ct_is_zero(bytes: &[u8]) -> bool {
let mut acc = 0u8;
for &b in bytes {
acc |= b;
}
acc == 0
}
#[derive(Debug)]
pub struct TrueVernamBuffer {
buffer: VecDeque<u8>,
bytes_consumed: u64,
bytes_fetched: u64,
}
impl TrueVernamBuffer {
pub fn new() -> Self {
Self {
buffer: VecDeque::with_capacity(TARGET_BUFFER_SIZE),
bytes_consumed: 0,
bytes_fetched: 0,
}
}
pub fn push_entropy(&mut self, bytes: &[u8]) -> Result<(), EntropyError> {
if bytes.len() < 32 {
return Err(EntropyError::InvalidBufferSize { size: bytes.len() });
}
if !validate_entropy_quality(bytes) {
return Err(EntropyError::InvalidQuality);
}
debug!(
"📥 Adding {} bytes to True Vernam buffer (hash: {})",
bytes.len(),
hex::encode(&Sha256::digest(bytes)[..8])
);
self.buffer.extend(bytes.iter());
self.bytes_fetched += bytes.len() as u64;
debug!(
"📥 Added {} bytes to True Vernam buffer (total: {})",
bytes.len(),
self.buffer.len()
);
Ok(())
}
pub fn consume(&mut self, count: usize) -> Result<Vec<u8>, EntropyError> {
if self.buffer.is_empty() {
return Err(EntropyError::BufferEmpty);
}
if self.buffer.len() < count {
return Err(EntropyError::InsufficientEntropy {
requested: count,
available: self.buffer.len(),
});
}
let mut result = Vec::with_capacity(count);
for _ in 0..count {
if let Some(byte) = self.buffer.pop_front() {
result.push(byte);
}
}
self.bytes_consumed += count as u64;
debug!(
"🔑 Consumed {} TRUE random bytes (remaining: {})",
count,
self.buffer.len()
);
Ok(result)
}
pub fn needs_refill(&self) -> bool {
self.buffer.len() < TARGET_BUFFER_SIZE / 2
}
pub fn is_critical(&self) -> bool {
self.buffer.len() < MIN_BUFFER_SIZE
}
pub fn available(&self) -> usize {
self.buffer.len()
}
pub fn stats(&self) -> (u64, u64) {
(self.bytes_consumed, self.bytes_fetched)
}
}
impl Drop for TrueVernamBuffer {
fn drop(&mut self) {
for byte in &mut self.buffer {
byte.zeroize();
}
self.bytes_consumed.zeroize();
self.bytes_fetched.zeroize();
}
}
impl Default for TrueVernamBuffer {
fn default() -> Self {
Self::new()
}
}
pub struct TrueVernamFetcher {
vernam_url: String,
buffer: Arc<Mutex<TrueVernamBuffer>>,
swarm_seed: Option<[u8; 32]>,
}
impl TrueVernamFetcher {
pub fn new(vernam_url: String, buffer: Arc<Mutex<TrueVernamBuffer>>) -> Self {
Self {
vernam_url,
buffer,
swarm_seed: None,
}
}
pub fn set_swarm_seed(&mut self, seed: [u8; 32]) {
self.swarm_seed = Some(seed);
info!("🔗 True Vernam: Swarm seed set - TRUSTLESS mode activated!");
}
pub fn start_background_task(self) {
tokio::spawn(async move {
info!("🚀 True Vernam: Starting initial buffer fill...");
for _ in 0..32 {
if let Err(e) = self.fetch_hybrid_entropy().await {
warn!("Initial fetch failed: {}", e);
}
}
info!("✅ True Vernam: Initial buffer ready!");
let mut interval = interval(Duration::from_secs(10));
loop {
interval.tick().await;
let needs_refill = {
let buffer = self.buffer.lock().await;
buffer.needs_refill()
};
if needs_refill {
if let Err(e) = self.fetch_hybrid_entropy().await {
warn!("Entropy fetch failed: {}", e);
}
}
}
});
}
async fn fetch_hybrid_entropy(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let mut local_entropy = [0u8; 32];
getrandom::getrandom(&mut local_entropy)
.map_err(|e| format!("CSPRNG unavailable: {}", e))?;
let worker_entropy = if self.swarm_seed.is_some() {
debug!("🚀 Swarm mode: skipping Worker entropy (cost optimization)");
let mut extra_local = [0u8; 32];
getrandom::getrandom(&mut extra_local)
.map_err(|e| format!("CSPRNG unavailable: {}", e))?;
extra_local.to_vec()
} else {
self.fetch_worker_entropy().await.unwrap_or_else(|e| {
warn!(
"Worker entropy fetch failed: {}, using additional local randomness",
e
);
let mut fallback = [0u8; 32];
match getrandom::getrandom(&mut fallback) {
Ok(_) => fallback.to_vec(),
Err(e) => {
tracing::error!("🚨 CRITICAL: Both CSPRNG and Worker entropy failed. Cannot proceed securely.");
panic!("Entropy generation failed: {}. System cannot operate without secure randomness.", e);
}
}
})
};
let mut combined = [0u8; 32];
for i in 0..32 {
combined[i] = local_entropy[i] ^ worker_entropy[i];
}
if let Some(swarm_seed) = &self.swarm_seed {
for i in 0..32 {
combined[i] ^= swarm_seed[i];
}
debug!("🔗 Hybrid entropy: local XOR local2 XOR swarm (INFORMATION-THEORETIC)");
} else {
debug!("⚠️ Hybrid entropy: local XOR worker (information-theoretic)");
}
{
let mut buffer = self.buffer.lock().await;
if let Err(e) = buffer.push_entropy(&combined) {
warn!("Failed to add entropy to buffer: {}", e);
return Err(e.into());
}
}
Ok(())
}
async fn fetch_worker_entropy(
&self,
) -> Result<Vec<u8>, Box<dyn std::error::Error + Send + Sync>> {
let url = format!(
"{}/entropy?size=32&n=1024",
self.vernam_url.trim_end_matches('/')
);
let response = reqwest::get(&url).await?;
if !response.status().is_success() {
return Err(format!("Failed to fetch entropy: {}", response.status()).into());
}
let body = response.text().await?;
let json: serde_json::Value = serde_json::from_str(&body)?;
if let Some(entropy_hex) = json["entropy"].as_str() {
let entropy_bytes = hex::decode(entropy_hex)?;
Ok(entropy_bytes)
} else if let Some(entropy_array) = json["entropy"].as_array() {
let mut all_entropy = Vec::new();
for entry in entropy_array {
if let Some(hex_str) = entry.as_str() {
let bytes = hex::decode(hex_str)?;
all_entropy.extend(bytes);
}
}
Ok(all_entropy)
} else {
Err("Missing entropy field".into())
}
}
}
#[derive(Debug, Clone)]
pub enum EntropyError {
InvalidQuality,
BufferEmpty,
InsufficientEntropy { requested: usize, available: usize },
InvalidBufferSize { size: usize },
}
impl std::fmt::Display for EntropyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EntropyError::InvalidQuality => write!(f, "Entropy quality validation failed - data does not appear to be truly random"),
EntropyError::BufferEmpty => write!(f, "Buffer is empty - no entropy available for consumption"),
EntropyError::InsufficientEntropy { requested, available } => {
write!(f, "Insufficient entropy: requested {} bytes but only {} available", requested, available)
}
EntropyError::InvalidBufferSize { size } => {
write!(f, "Invalid buffer size: {} bytes (minimum 32 bytes required)", size)
}
}
}
}
impl std::error::Error for EntropyError {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer_consume_removes_bytes() {
let mut buffer = TrueVernamBuffer::new();
let entropy: Vec<u8> = vec![
0xDE, 0xAD, 0xBE, 0xEF, 0x55, 0xAA, 0x12, 0x34,
0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0x11, 0x22,
0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA,
0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x11, 0x22,
];
buffer.push_entropy(&entropy).expect("Valid entropy should be accepted");
assert_eq!(buffer.available(), entropy.len());
let consumed = buffer.consume(3).unwrap();
assert_eq!(consumed, vec![0xDE, 0xAD, 0xBE]);
assert_eq!(buffer.available(), entropy.len() - 3);
let consumed = buffer.consume(2).unwrap();
assert_eq!(consumed, vec![0xEF, 0x55]);
assert_eq!(buffer.available(), entropy.len() - 5);
match buffer.consume(28) {
Err(EntropyError::InsufficientEntropy { requested, available }) => {
assert_eq!(requested, 28);
assert_eq!(available, 27);
}
_ => panic!("Expected InsufficientEntropy error"),
}
}
#[test]
fn test_bytes_never_reused() {
let mut buffer = TrueVernamBuffer::new();
let entropy: Vec<u8> = (0..200).map(|i| (i * 7 + 13) as u8).collect();
buffer.push_entropy(&entropy).expect("Valid entropy should be accepted");
let _chunk1 = buffer.consume(50).unwrap();
let _chunk2 = buffer.consume(50).unwrap();
assert_eq!(buffer.available(), 100);
let _chunk3 = buffer.consume(100).unwrap();
assert_eq!(buffer.available(), 0);
match buffer.consume(1) {
Err(EntropyError::BufferEmpty) => {}, _ => panic!("Expected BufferEmpty error"),
}
}
#[test]
fn test_entropy_validation() {
let mut buffer = TrueVernamBuffer::new();
let valid_entropy: Vec<u8> = vec![
0xDE, 0xAD, 0xBE, 0xEF, 0x55, 0xAA, 0x12, 0x34,
0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0x11, 0x22,
0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA,
0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x11, 0x22,
];
assert!(buffer.push_entropy(&valid_entropy).is_ok());
let low_entropy = vec![0x00; 100]; assert!(buffer.push_entropy(&low_entropy).is_err());
let repeated_pattern = vec![0xAB; 50]; assert!(buffer.push_entropy(&repeated_pattern).is_err());
}
}
use std::sync::atomic::{AtomicU64, Ordering};
pub struct SynchronizedVernamBuffer {
shared_seed: [u8; 32],
position_counter: AtomicU64,
starting_round: u64,
drand_client: Arc<crate::drand::DrandEntropy>,
}
impl SynchronizedVernamBuffer {
pub fn new_with_drand(shared_seed: [u8; 32], starting_round: u64, drand_client: Arc<crate::drand::DrandEntropy>) -> Self {
Self {
shared_seed,
position_counter: AtomicU64::new(0),
starting_round,
drand_client,
}
}
pub fn new(shared_seed: [u8; 32]) -> Self {
let drand_client = Arc::new(crate::drand::DrandEntropy::new());
Self {
shared_seed,
position_counter: AtomicU64::new(0),
starting_round: 0, drand_client,
}
}
pub fn create_shared_seed(
mlkem_secret: [u8; 32],
drand_entropy: [u8; 32],
peer_contributions: [u8; 32],
) -> [u8; 32] {
let mut shared_seed = [0u8; 32];
for i in 0..32 {
shared_seed[i] = mlkem_secret[i] ^ drand_entropy[i] ^ peer_contributions[i];
}
debug!("🔑 Created information-theoretic shared seed (secure if any source is random)");
shared_seed
}
async fn generate_true_otp_keystream(&self, position: u64, length: usize) -> Result<Vec<u8>, crate::drand::DrandError> {
if length > 32 {
return Err(crate::drand::DrandError::NetworkError("True OTP limited to 32 bytes".to_string()));
}
let round_number = self.starting_round + position / 32;
let drand_entropy = self.drand_client.fetch_round(round_number).await?;
let start_byte = (position % 32) as usize;
let end_byte = std::cmp::min(start_byte + length, 32);
let mut keystream = vec![0u8; length];
keystream.copy_from_slice(&drand_entropy[start_byte..end_byte]);
debug!("🔑 Generated TRUE OTP keystream: {} bytes from drand round {} (info-theoretic)", length, round_number);
Ok(keystream)
}
async fn generate_at_position(&self, position: u64, length: usize) -> Vec<u8> {
if length <= 32 && self.starting_round > 0 {
match self.generate_true_otp_keystream(position, length).await {
Ok(keystream) => return keystream,
Err(e) => {
warn!("Failed to fetch drand round for true OTP: {}. Falling back to ChaCha20.", e);
}
}
}
let mut keystream = vec![0u8; length];
let mut nonce_bytes = [0u8; 12];
nonce_bytes[4..12].copy_from_slice(&position.to_be_bytes());
let mut cipher = ChaCha20::new(self.shared_seed.as_ref().into(), nonce_bytes.as_ref().into());
cipher.apply_keystream(&mut keystream);
debug!("🔑 Generated {} bytes at position {} (computational ChaCha20)", length, position);
keystream
}
pub async fn consume(&self, length: usize) -> Vec<u8> {
let position = self.position_counter.fetch_add(length as u64, Ordering::SeqCst);
self.generate_at_position(position, length).await
}
pub fn consume_sync(&self, length: usize) -> Vec<u8> {
let position = self.position_counter.fetch_add(length as u64, Ordering::SeqCst);
if length <= 32 && self.starting_round > 0 {
match tokio::runtime::Handle::try_current() {
Ok(handle) => {
match tokio::task::block_in_place(|| {
handle.block_on(self.generate_true_otp_keystream(position, length))
}) {
Ok(keystream) => return keystream,
Err(e) => {
warn!("Failed to fetch drand round for true OTP: {}. Falling back to ChaCha20.", e);
}
}
}
Err(_) => {
warn!("No Tokio runtime available for true OTP. Falling back to ChaCha20.");
}
}
}
let mut keystream = vec![0u8; length];
let mut nonce_bytes = [0u8; 12];
nonce_bytes[4..12].copy_from_slice(&position.to_be_bytes());
let mut cipher = ChaCha20::new(self.shared_seed.as_ref().into(), nonce_bytes.as_ref().into());
cipher.apply_keystream(&mut keystream);
keystream
}
pub fn current_position(&self) -> u64 {
self.position_counter.load(Ordering::SeqCst)
}
pub fn reset_position(&self, new_position: u64) {
self.position_counter.store(new_position, Ordering::SeqCst);
warn!("🚨 Position counter reset to {} - synchronization may be broken!", new_position);
}
}
#[cfg(test)]
mod synchronized_tests {
use super::*;
#[test]
fn test_synchronized_generation() {
let seed = [0x42; 32];
let alice_buffer = SynchronizedVernamBuffer::new(seed);
let bob_buffer = SynchronizedVernamBuffer::new(seed);
let alice_keystream = alice_buffer.consume_sync(32);
let bob_keystream = bob_buffer.consume_sync(32);
assert_eq!(alice_keystream, bob_keystream);
assert_eq!(alice_keystream.len(), 32);
let alice_keystream2 = alice_buffer.consume_sync(32);
assert_ne!(alice_keystream, alice_keystream2);
}
#[test]
fn test_consume_synchronization() {
let _seed = [0xDE, 0xAD, 0xBE, 0xEF]; let full_seed = [0xDE, 0xAD, 0xBE, 0xEF].repeat(8).try_into().unwrap();
let alice_buffer = SynchronizedVernamBuffer::new(full_seed);
let bob_buffer = SynchronizedVernamBuffer::new(full_seed);
let plaintext = b"Hello, this is secret!";
let alice_keystream = alice_buffer.consume_sync(plaintext.len());
let mut alice_ciphertext = plaintext.to_vec();
for (i, byte) in alice_ciphertext.iter_mut().enumerate() {
*byte ^= alice_keystream[i];
}
let bob_keystream = bob_buffer.consume_sync(plaintext.len());
let mut bob_plaintext = alice_ciphertext.clone();
for (i, byte) in bob_plaintext.iter_mut().enumerate() {
*byte ^= bob_keystream[i];
}
assert_eq!(bob_plaintext, plaintext);
assert_eq!(alice_keystream, bob_keystream); }
#[tokio::test]
async fn test_true_otp_generation() {
use crate::drand::DrandEntropy;
let drand_client = Arc::new(DrandEntropy::new());
let alice_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client.clone());
let bob_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client);
let alice_keystream = alice_buffer.consume(16).await;
let bob_keystream = bob_buffer.consume(16).await;
assert_eq!(alice_keystream, bob_keystream);
assert_eq!(alice_keystream.len(), 16);
let alice_keystream2 = alice_buffer.consume(16).await;
assert_ne!(alice_keystream, alice_keystream2);
}
#[tokio::test]
async fn test_true_otp_synchronization() {
use crate::drand::DrandEntropy;
let drand_client = Arc::new(DrandEntropy::new());
let alice_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client.clone());
let bob_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client.clone());
let plaintext = b"Hello, World! This is a test message.";
let mut alice_ciphertext = Vec::new();
let mut bob_ciphertext = Vec::new();
for chunk in plaintext.chunks(8) {
let keystream = alice_buffer.consume(chunk.len()).await;
let encrypted: Vec<u8> = chunk.iter().zip(keystream.iter()).map(|(p, k)| p ^ k).collect();
alice_ciphertext.extend(encrypted);
}
for chunk in plaintext.chunks(8) {
let keystream = bob_buffer.consume(chunk.len()).await;
let encrypted: Vec<u8> = chunk.iter().zip(keystream.iter()).map(|(p, k)| p ^ k).collect();
bob_ciphertext.extend(encrypted);
}
assert_eq!(alice_ciphertext, bob_ciphertext);
let mut bob_plaintext = Vec::new();
let bob_buffer_decrypt = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client.clone());
for chunk in alice_ciphertext.chunks(8) {
let keystream = bob_buffer_decrypt.consume(chunk.len()).await;
let decrypted: Vec<u8> = chunk.iter().zip(keystream.iter()).map(|(c, k)| c ^ k).collect();
bob_plaintext.extend(decrypted);
}
assert_eq!(bob_plaintext, plaintext);
}
#[tokio::test]
async fn test_true_otp_large_message_fallback() {
use crate::drand::DrandEntropy;
let drand_client = Arc::new(DrandEntropy::new());
let buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client);
let keystream = buffer.consume(64).await;
assert_eq!(keystream.len(), 64);
let keystream2 = buffer.consume(64).await;
assert_ne!(keystream, keystream2);
}
#[test]
fn test_true_otp_sync_method() {
use crate::drand::DrandEntropy;
let drand_client = Arc::new(DrandEntropy::new());
let alice_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client.clone());
let bob_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client);
let alice_keystream = alice_buffer.consume_sync(16);
let bob_keystream = bob_buffer.consume_sync(16);
assert_eq!(alice_keystream, bob_keystream);
assert_eq!(alice_keystream.len(), 16);
}
#[tokio::test]
async fn test_synchronization_failure() {
use crate::drand::DrandEntropy;
let drand_client = Arc::new(DrandEntropy::new());
let alice_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client.clone());
let bob_buffer = SynchronizedVernamBuffer::new_with_drand([0x42; 32], 1000, drand_client);
let _alice_keystream1 = alice_buffer.consume(16).await;
let _bob_keystream1 = bob_buffer.consume(8).await;
let alice_keystream2 = alice_buffer.consume(16).await;
let bob_keystream2 = bob_buffer.consume(16).await;
assert_ne!(alice_keystream2, bob_keystream2);
}
}