#![deny(unsafe_code)]
#![deny(missing_docs)]
#![deny(clippy::unwrap_used)]
#![deny(clippy::panic)]
use chrono::{DateTime, Utc};
use parking_lot::{Mutex, RwLock};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs::{self, File, OpenOptions};
use std::io::{BufWriter, Write};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::unified_api::error::{CoreError, Result};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuditEvent {
pub id: String,
pub timestamp: DateTime<Utc>,
pub event_type: AuditEventType,
pub actor: Option<String>,
pub resource: Option<String>,
pub action: String,
pub outcome: AuditOutcome,
pub metadata: HashMap<String, String>,
pub integrity_hash: String,
}
impl AuditEvent {
#[must_use]
pub fn new(event_type: AuditEventType, action: &str, outcome: AuditOutcome) -> Self {
Self {
id: generate_uuid(),
timestamp: Utc::now(),
event_type,
actor: None,
resource: None,
action: action.to_string(),
outcome,
metadata: HashMap::new(),
integrity_hash: String::new(),
}
}
#[must_use]
pub fn builder(
event_type: AuditEventType,
action: &str,
outcome: AuditOutcome,
) -> AuditEventBuilder {
AuditEventBuilder::new(event_type, action, outcome)
}
#[must_use]
pub fn with_actor(mut self, actor: impl Into<String>) -> Self {
self.actor = Some(actor.into());
self
}
#[must_use]
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
self.resource = Some(resource.into());
self
}
#[must_use]
pub fn with_metadata(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
self.metadata.insert(key.into(), value.into());
self
}
#[must_use]
pub fn id(&self) -> &str {
&self.id
}
#[must_use]
pub fn timestamp(&self) -> DateTime<Utc> {
self.timestamp
}
#[must_use]
pub fn event_type(&self) -> &AuditEventType {
&self.event_type
}
#[must_use]
pub fn actor(&self) -> Option<&str> {
self.actor.as_deref()
}
#[must_use]
pub fn resource(&self) -> Option<&str> {
self.resource.as_deref()
}
#[must_use]
pub fn action(&self) -> &str {
&self.action
}
#[must_use]
pub fn outcome(&self) -> &AuditOutcome {
&self.outcome
}
#[must_use]
pub fn metadata(&self) -> &HashMap<String, String> {
&self.metadata
}
#[must_use]
pub fn integrity_hash(&self) -> &str {
&self.integrity_hash
}
}
pub struct AuditEventBuilder {
event: AuditEvent,
}
impl AuditEventBuilder {
#[must_use]
pub fn new(event_type: AuditEventType, action: &str, outcome: AuditOutcome) -> Self {
Self { event: AuditEvent::new(event_type, action, outcome) }
}
#[must_use]
pub fn actor(mut self, actor: impl Into<String>) -> Self {
self.event.actor = Some(actor.into());
self
}
#[must_use]
pub fn resource(mut self, resource: impl Into<String>) -> Self {
self.event.resource = Some(resource.into());
self
}
#[must_use]
pub fn metadata(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
self.event.metadata.insert(key.into(), value.into());
self
}
#[must_use]
pub fn build(self) -> AuditEvent {
self.event
}
}
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum AuditEventType {
Authentication,
KeyOperation,
CryptoOperation,
AccessControl,
SessionManagement,
SecurityAlert,
ConfigurationChange,
System,
}
impl std::fmt::Display for AuditEventType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Authentication => write!(f, "authentication"),
Self::KeyOperation => write!(f, "key_operation"),
Self::CryptoOperation => write!(f, "crypto_operation"),
Self::AccessControl => write!(f, "access_control"),
Self::SessionManagement => write!(f, "session_management"),
Self::SecurityAlert => write!(f, "security_alert"),
Self::ConfigurationChange => write!(f, "configuration_change"),
Self::System => write!(f, "system"),
}
}
}
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum AuditOutcome {
Success,
Failure,
Denied,
}
impl std::fmt::Display for AuditOutcome {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Success => write!(f, "success"),
Self::Failure => write!(f, "failure"),
Self::Denied => write!(f, "denied"),
}
}
}
#[derive(Debug, Clone)]
pub struct AuditConfig {
pub storage_path: PathBuf,
pub max_file_size_bytes: u64,
pub max_file_age: Duration,
pub retention_days: u32,
}
impl Default for AuditConfig {
fn default() -> Self {
Self {
storage_path: PathBuf::from("audit_logs"),
max_file_size_bytes: 100 * 1024 * 1024, max_file_age: Duration::from_secs(24 * 60 * 60), retention_days: 90,
}
}
}
impl AuditConfig {
#[must_use]
pub fn new(storage_path: PathBuf) -> Self {
Self { storage_path, ..Default::default() }
}
#[must_use]
pub fn with_max_file_size(mut self, max_bytes: u64) -> Self {
self.max_file_size_bytes = max_bytes;
self
}
#[must_use]
pub fn with_max_file_age(mut self, max_age: Duration) -> Self {
self.max_file_age = max_age;
self
}
#[must_use]
pub fn with_retention_days(mut self, days: u32) -> Self {
self.retention_days = days;
self
}
#[must_use]
pub fn storage_path(&self) -> &PathBuf {
&self.storage_path
}
#[must_use]
pub fn max_file_size_bytes(&self) -> u64 {
self.max_file_size_bytes
}
#[must_use]
pub fn max_file_age(&self) -> Duration {
self.max_file_age
}
#[must_use]
pub fn retention_days(&self) -> u32 {
self.retention_days
}
}
pub trait AuditStorage: Send + Sync {
fn write(&self, event: &AuditEvent) -> Result<()>;
fn flush(&self) -> Result<()>;
}
struct FileState {
writer: BufWriter<File>,
current_path: PathBuf,
current_size: u64,
created_at: DateTime<Utc>,
}
pub struct FileAuditStorage {
config: AuditConfig,
file_state: Mutex<Option<FileState>>,
previous_hash: RwLock<String>,
}
impl FileAuditStorage {
pub fn new(config: AuditConfig) -> Result<Arc<Self>> {
fs::create_dir_all(&config.storage_path).map_err(|e| {
CoreError::AuditError(format!(
"Failed to create audit directory '{}': {}",
config.storage_path.display(),
e
))
})?;
let storage = Arc::new(Self {
config,
file_state: Mutex::new(None),
previous_hash: RwLock::new(String::new()),
});
storage.cleanup_old_files()?;
Ok(storage)
}
#[must_use]
pub fn config(&self) -> &AuditConfig {
&self.config
}
fn compute_integrity_hash(event: &AuditEvent, previous_hash: &str) -> Result<String> {
let mut buf = Vec::new();
buf.extend_from_slice(previous_hash.as_bytes());
buf.extend_from_slice(event.id.as_bytes());
buf.extend_from_slice(event.timestamp.to_rfc3339().as_bytes());
buf.extend_from_slice(event.event_type.to_string().as_bytes());
if let Some(ref actor) = event.actor {
buf.extend_from_slice(actor.as_bytes());
}
if let Some(ref resource) = event.resource {
buf.extend_from_slice(resource.as_bytes());
}
buf.extend_from_slice(event.action.as_bytes());
buf.extend_from_slice(event.outcome.to_string().as_bytes());
let mut metadata_keys: Vec<&String> = event.metadata.keys().collect();
metadata_keys.sort();
for key in metadata_keys {
buf.extend_from_slice(key.as_bytes());
if let Some(value) = event.metadata.get(key) {
buf.extend_from_slice(value.as_bytes());
}
}
let digest = crate::primitives::hash::sha2::sha256(&buf)
.map_err(|e| CoreError::AuditError(format!("integrity hash failed: {}", e)))?;
Ok(hex::encode(digest))
}
fn needs_rotation(&self, state: &FileState) -> bool {
if state.current_size >= self.config.max_file_size_bytes {
return true;
}
let age =
Utc::now().signed_duration_since(state.created_at).to_std().unwrap_or(Duration::ZERO);
age >= self.config.max_file_age
}
fn rotate_if_needed(&self, state: &mut Option<FileState>) -> Result<()> {
let should_rotate = state.as_ref().is_some_and(|s| self.needs_rotation(s));
if should_rotate {
if let Some(mut old_state) = state.take() {
tracing::info!(
"Rotating audit file: {} (size: {} bytes)",
old_state.current_path.display(),
old_state.current_size
);
old_state.writer.flush().map_err(|e| {
CoreError::AuditError(format!("Failed to flush audit file: {}", e))
})?;
}
}
if state.is_none() {
*state = Some(self.create_new_file()?);
}
Ok(())
}
fn create_new_file(&self) -> Result<FileState> {
let now = Utc::now();
let filename = format!("audit-{}.jsonl", now.format("%Y-%m-%dT%H-%M-%S"));
let path = self.config.storage_path.join(&filename);
let file = OpenOptions::new().create(true).append(true).open(&path).map_err(|e| {
CoreError::AuditError(format!(
"Failed to create audit file '{}': {}",
path.display(),
e
))
})?;
tracing::debug!("Created new audit file: {}", path.display());
Ok(FileState {
writer: BufWriter::new(file),
current_path: path,
current_size: 0,
created_at: now,
})
}
fn cleanup_old_files(&self) -> Result<()> {
let retention_duration = chrono::Duration::days(i64::from(self.config.retention_days));
let Some(cutoff) = Utc::now().checked_sub_signed(retention_duration) else {
return Err(CoreError::AuditError(format!(
"Retention period of {} days overflows date arithmetic",
self.config.retention_days
)));
};
let entries = fs::read_dir(&self.config.storage_path).map_err(|e| {
CoreError::AuditError(format!(
"Failed to read audit directory '{}': {}",
self.config.storage_path.display(),
e
))
})?;
for entry in entries {
let Ok(entry) = entry else { continue };
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) != Some("jsonl") {
continue;
}
let Ok(metadata) = fs::metadata(&path) else { continue };
let modified = match metadata.modified() {
Ok(t) => DateTime::<Utc>::from(t),
Err(_) => continue,
};
if modified < cutoff {
if let Err(e) = fs::remove_file(&path) {
tracing::warn!("Failed to remove old audit file '{}': {}", path.display(), e);
} else {
tracing::info!("Removed old audit file: {}", path.display());
}
}
}
Ok(())
}
fn write_event_to_file(&self, event: &mut AuditEvent) -> Result<()> {
let mut file_state = self.file_state.lock();
self.rotate_if_needed(&mut file_state)?;
let state = file_state
.as_mut()
.ok_or_else(|| CoreError::AuditError("No active audit file".to_string()))?;
let previous_hash = self.previous_hash.read().clone();
event.integrity_hash = Self::compute_integrity_hash(event, &previous_hash)?;
{
let mut prev = self.previous_hash.write();
prev.clone_from(&event.integrity_hash);
}
let json = serde_json::to_string(event).map_err(|e| {
CoreError::AuditError(format!("Failed to serialize audit event: {}", e))
})?;
let line = format!("{}\n", json);
let line_bytes = line.as_bytes();
state
.writer
.write_all(line_bytes)
.map_err(|e| CoreError::AuditError(format!("Failed to write audit event: {}", e)))?;
state.current_size = state.current_size.saturating_add(line_bytes.len() as u64);
Ok(())
}
}
impl AuditStorage for FileAuditStorage {
fn write(&self, event: &AuditEvent) -> Result<()> {
let mut event_copy = event.clone();
self.write_event_to_file(&mut event_copy)
}
fn flush(&self) -> Result<()> {
let mut file_state = self.file_state.lock();
if let Some(ref mut state) = *file_state {
state
.writer
.flush()
.map_err(|e| CoreError::AuditError(format!("Failed to flush audit file: {}", e)))?;
}
Ok(())
}
}
fn generate_uuid() -> String {
let bytes_vec = crate::primitives::rand::csprng::random_bytes(16);
let mut bytes = [0u8; 16];
bytes.copy_from_slice(&bytes_vec);
bytes[6] = (bytes[6] & 0x0f) | 0x40;
bytes[8] = (bytes[8] & 0x3f) | 0x80;
format!(
"{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
bytes[0],
bytes[1],
bytes[2],
bytes[3],
bytes[4],
bytes[5],
bytes[6],
bytes[7],
bytes[8],
bytes[9],
bytes[10],
bytes[11],
bytes[12],
bytes[13],
bytes[14],
bytes[15]
)
}
#[cfg(test)]
#[allow(
clippy::panic,
clippy::unwrap_used,
clippy::expect_used,
clippy::indexing_slicing,
clippy::arithmetic_side_effects,
clippy::panic_in_result_fn,
clippy::unnecessary_wraps,
clippy::redundant_clone,
clippy::useless_vec,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::clone_on_copy,
clippy::len_zero,
clippy::single_match,
clippy::unnested_or_patterns,
clippy::default_constructed_unit_structs,
clippy::redundant_closure_for_method_calls,
clippy::semicolon_if_nothing_returned,
clippy::unnecessary_unwrap,
clippy::redundant_pattern_matching,
clippy::missing_const_for_thread_local,
clippy::get_first,
clippy::float_cmp,
clippy::needless_borrows_for_generic_args,
unused_qualifications
)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn test_audit_event_creation_has_correct_defaults_succeeds() {
let event =
AuditEvent::new(AuditEventType::CryptoOperation, "encrypt_data", AuditOutcome::Success);
assert!(!event.id.is_empty());
assert_eq!(event.action, "encrypt_data");
assert_eq!(event.outcome, AuditOutcome::Success);
assert!(event.actor.is_none());
assert!(event.resource.is_none());
}
#[test]
fn test_audit_event_builder_sets_actor_resource_and_metadata_succeeds() {
let event = AuditEvent::builder(
AuditEventType::KeyOperation,
"generate_keypair",
AuditOutcome::Success,
)
.actor("user@example.com")
.resource("key-001")
.metadata("algorithm", "ML-KEM-768")
.build();
assert_eq!(event.actor.as_deref(), Some("user@example.com"));
assert_eq!(event.resource.as_deref(), Some("key-001"));
assert_eq!(event.metadata.get("algorithm").map(|s| s.as_str()), Some("ML-KEM-768"));
}
#[test]
fn test_audit_event_with_methods_sets_fields_correctly_succeeds() {
let event = AuditEvent::new(AuditEventType::Authentication, "login", AuditOutcome::Success)
.with_actor("admin")
.with_resource("system")
.with_metadata("ip", "192.168.1.1");
assert_eq!(event.actor(), Some("admin"));
assert_eq!(event.resource(), Some("system"));
assert_eq!(event.metadata().get("ip").map(|s| s.as_str()), Some("192.168.1.1"));
}
#[test]
fn test_audit_config_default_has_expected_values_succeeds() {
let config = AuditConfig::default();
assert_eq!(config.max_file_size_bytes, 100 * 1024 * 1024);
assert_eq!(config.max_file_age, Duration::from_secs(24 * 60 * 60));
assert_eq!(config.retention_days, 90);
}
#[test]
fn test_audit_config_builder_sets_all_fields_correctly_succeeds() {
let config = AuditConfig::new(std::env::temp_dir().join("audit"))
.with_max_file_size(50 * 1024 * 1024)
.with_max_file_age(Duration::from_secs(12 * 60 * 60))
.with_retention_days(30);
assert_eq!(config.max_file_size_bytes, 50 * 1024 * 1024);
assert_eq!(config.max_file_age, Duration::from_secs(12 * 60 * 60));
assert_eq!(config.retention_days, 30);
}
#[test]
fn test_file_audit_storage_creation_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path);
let storage = FileAuditStorage::new(config);
assert!(storage.is_ok());
}
}
#[test]
fn test_file_audit_storage_write_creates_file_on_disk_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path.clone());
if let Ok(storage) = FileAuditStorage::new(config) {
let event = AuditEvent::new(
AuditEventType::CryptoOperation,
"test_operation",
AuditOutcome::Success,
);
let result = storage.write(&event);
assert!(result.is_ok());
let flush_result = storage.flush();
assert!(flush_result.is_ok());
let entries: Vec<_> = fs::read_dir(&temp_path)
.map(|r| r.filter_map(|e| e.ok()).collect())
.unwrap_or_default();
assert!(!entries.is_empty());
}
}
}
#[test]
fn test_integrity_hash_chain_produces_unique_chained_hashes_are_unique() {
let event1 =
AuditEvent::new(AuditEventType::CryptoOperation, "operation1", AuditOutcome::Success);
let event2 =
AuditEvent::new(AuditEventType::CryptoOperation, "operation2", AuditOutcome::Success);
let hash1 = FileAuditStorage::compute_integrity_hash(&event1, "").unwrap();
let hash2 = FileAuditStorage::compute_integrity_hash(&event2, &hash1).unwrap();
assert_ne!(hash1, hash2);
let hash2_again = FileAuditStorage::compute_integrity_hash(&event2, &hash1).unwrap();
assert_eq!(hash2, hash2_again);
let hash2_different =
FileAuditStorage::compute_integrity_hash(&event2, "different").unwrap();
assert_ne!(hash2, hash2_different);
}
#[test]
fn test_uuid_generation_produces_unique_v4_uuids_are_unique() {
let uuid1 = generate_uuid();
let uuid2 = generate_uuid();
assert_eq!(uuid1.len(), 36);
assert_eq!(uuid2.len(), 36);
assert_ne!(uuid1, uuid2);
let parts: Vec<&str> = uuid1.split('-').collect();
assert_eq!(parts.len(), 5);
assert_eq!(parts[0].len(), 8);
assert_eq!(parts[1].len(), 4);
assert_eq!(parts[2].len(), 4);
assert_eq!(parts[3].len(), 4);
assert_eq!(parts[4].len(), 12);
assert!(parts[2].starts_with('4'));
}
#[test]
fn test_audit_event_type_display_has_correct_format() {
assert_eq!(AuditEventType::Authentication.to_string(), "authentication");
assert_eq!(AuditEventType::KeyOperation.to_string(), "key_operation");
assert_eq!(AuditEventType::CryptoOperation.to_string(), "crypto_operation");
assert_eq!(AuditEventType::AccessControl.to_string(), "access_control");
assert_eq!(AuditEventType::SessionManagement.to_string(), "session_management");
assert_eq!(AuditEventType::SecurityAlert.to_string(), "security_alert");
assert_eq!(AuditEventType::ConfigurationChange.to_string(), "configuration_change");
assert_eq!(AuditEventType::System.to_string(), "system");
}
#[test]
fn test_audit_outcome_display_has_correct_format() {
assert_eq!(AuditOutcome::Success.to_string(), "success");
assert_eq!(AuditOutcome::Failure.to_string(), "failure");
assert_eq!(AuditOutcome::Denied.to_string(), "denied");
}
#[test]
fn test_audit_config_accessors_return_configured_values_succeeds() {
let test_path = std::env::temp_dir().join("latticearc_audit_test");
let config = AuditConfig::new(test_path.clone())
.with_max_file_size(1024)
.with_max_file_age(Duration::from_secs(60))
.with_retention_days(7);
assert_eq!(config.storage_path(), &test_path);
assert_eq!(config.max_file_size_bytes(), 1024);
assert_eq!(config.max_file_age(), Duration::from_secs(60));
assert_eq!(config.retention_days(), 7);
}
#[test]
fn test_audit_event_accessors_return_correct_values_succeeds() {
let event =
AuditEvent::new(AuditEventType::SecurityAlert, "detect_anomaly", AuditOutcome::Failure)
.with_actor("system")
.with_resource("network")
.with_metadata("severity", "high");
assert!(!event.id().is_empty());
assert_eq!(*event.event_type(), AuditEventType::SecurityAlert);
assert_eq!(event.action(), "detect_anomaly");
assert_eq!(*event.outcome(), AuditOutcome::Failure);
assert_eq!(event.actor(), Some("system"));
assert_eq!(event.resource(), Some("network"));
assert!(event.metadata().contains_key("severity"));
assert!(event.integrity_hash().is_empty());
let now = Utc::now();
let diff = now.signed_duration_since(event.timestamp());
assert!(diff.num_seconds() < 5);
}
#[test]
fn test_file_audit_storage_config_accessor_returns_configured_path_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path.clone()).with_retention_days(30);
if let Ok(storage) = FileAuditStorage::new(config) {
assert_eq!(storage.config().storage_path(), &temp_path);
assert_eq!(storage.config().retention_days(), 30);
}
}
}
#[test]
fn test_file_audit_storage_multiple_events_writes_all_to_file_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path.clone());
if let Ok(storage) = FileAuditStorage::new(config) {
for i in 0..5 {
let event = AuditEvent::new(
AuditEventType::CryptoOperation,
&format!("operation_{}", i),
AuditOutcome::Success,
);
let result = storage.write(&event);
assert!(result.is_ok(), "Write {} should succeed", i);
}
storage.flush().expect("Flush should succeed");
let entries: Vec<_> =
fs::read_dir(&temp_path).unwrap().filter_map(|e| e.ok()).collect();
assert_eq!(entries.len(), 1, "Should have one audit file");
let content = fs::read_to_string(entries[0].path()).unwrap();
let lines: Vec<&str> = content.lines().collect();
assert_eq!(lines.len(), 5, "Should have 5 event lines");
for line in &lines {
let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
assert!(!parsed["integrity_hash"].as_str().unwrap().is_empty());
}
}
}
}
#[test]
fn test_file_audit_storage_rotation_by_size_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path.clone()).with_max_file_size(100);
if let Ok(storage) = FileAuditStorage::new(config) {
for i in 0..10 {
let event = AuditEvent::new(
AuditEventType::CryptoOperation,
&format!("operation_{}", i),
AuditOutcome::Success,
)
.with_metadata("data", "some value to make the event larger");
let result = storage.write(&event);
assert!(result.is_ok(), "Write {} should succeed even with rotation", i);
}
storage.flush().expect("Flush should succeed");
let entries: Vec<_> = fs::read_dir(&temp_path)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.path().extension().and_then(|ext| ext.to_str()) == Some("jsonl"))
.collect();
assert!(!entries.is_empty(), "Should have at least one audit file");
}
}
}
#[test]
fn test_flush_without_writes_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path);
if let Ok(storage) = FileAuditStorage::new(config) {
let result = storage.flush();
assert!(result.is_ok());
}
}
}
#[test]
fn test_audit_event_serialization_roundtrip_preserves_all_fields_roundtrip() {
let event =
AuditEvent::new(AuditEventType::KeyOperation, "rotate_key", AuditOutcome::Success)
.with_actor("admin")
.with_resource("key-123")
.with_metadata("old_algo", "RSA-2048")
.with_metadata("new_algo", "ML-KEM-768");
let json = serde_json::to_string(&event).expect("Serialization should succeed");
let deserialized: AuditEvent =
serde_json::from_str(&json).expect("Deserialization should succeed");
assert_eq!(deserialized.action, event.action);
assert_eq!(deserialized.actor, event.actor);
assert_eq!(deserialized.resource, event.resource);
assert_eq!(deserialized.outcome, event.outcome);
assert_eq!(deserialized.event_type, event.event_type);
assert_eq!(deserialized.metadata.len(), 2);
}
#[test]
fn test_integrity_hash_includes_metadata_produces_distinct_hashes_are_unique() {
let event_no_meta =
AuditEvent::new(AuditEventType::System, "startup", AuditOutcome::Success);
let event_with_meta =
AuditEvent::new(AuditEventType::System, "startup", AuditOutcome::Success)
.with_metadata("version", "1.0");
let hash1 = FileAuditStorage::compute_integrity_hash(&event_no_meta, "").unwrap();
let hash2 = FileAuditStorage::compute_integrity_hash(&event_with_meta, "").unwrap();
assert_ne!(hash1, hash2, "Different metadata should produce different hashes");
}
#[test]
fn test_audit_event_all_types_and_outcomes_write_successfully_succeeds() {
let types = [
AuditEventType::Authentication,
AuditEventType::KeyOperation,
AuditEventType::CryptoOperation,
AuditEventType::AccessControl,
AuditEventType::SessionManagement,
AuditEventType::SecurityAlert,
AuditEventType::ConfigurationChange,
AuditEventType::System,
];
let outcomes = [AuditOutcome::Success, AuditOutcome::Failure, AuditOutcome::Denied];
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path);
if let Ok(storage) = FileAuditStorage::new(config) {
for event_type in &types {
for outcome in &outcomes {
let event = AuditEvent::new(*event_type, "test", *outcome);
assert!(storage.write(&event).is_ok());
}
}
assert!(storage.flush().is_ok());
}
}
}
#[test]
fn test_file_audit_storage_rotation_by_age_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config =
AuditConfig::new(temp_path.clone()).with_max_file_age(Duration::from_secs(0));
if let Ok(storage) = FileAuditStorage::new(config) {
let event1 =
AuditEvent::new(AuditEventType::CryptoOperation, "op_1", AuditOutcome::Success);
assert!(storage.write(&event1).is_ok());
std::thread::sleep(Duration::from_millis(10));
let event2 =
AuditEvent::new(AuditEventType::CryptoOperation, "op_2", AuditOutcome::Success);
assert!(storage.write(&event2).is_ok());
assert!(storage.flush().is_ok());
}
}
}
#[test]
fn test_cleanup_removes_old_jsonl_files_without_error_fails() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let old_file = temp_path.join("audit-old.jsonl");
fs::write(&old_file, "old data\n").unwrap();
let config = AuditConfig::new(temp_path.clone()).with_retention_days(36500); let storage = FileAuditStorage::new(config);
assert!(storage.is_ok());
assert!(old_file.exists());
}
}
#[test]
fn test_cleanup_skips_non_jsonl_files_leaving_them_intact_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let txt_file = temp_path.join("notes.txt");
fs::write(&txt_file, "not an audit file\n").unwrap();
let config = AuditConfig::new(temp_path).with_retention_days(0);
let storage = FileAuditStorage::new(config);
assert!(storage.is_ok());
assert!(txt_file.exists());
}
}
#[test]
fn test_write_sets_integrity_hash_to_64_char_hex_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path.clone());
if let Ok(storage) = FileAuditStorage::new(config) {
let event = AuditEvent::new(
AuditEventType::CryptoOperation,
"hash_test",
AuditOutcome::Success,
);
storage.write(&event).unwrap();
storage.flush().unwrap();
let entries: Vec<_> =
fs::read_dir(&temp_path).unwrap().filter_map(|e| e.ok()).collect();
let content = fs::read_to_string(entries[0].path()).unwrap();
let parsed: serde_json::Value = serde_json::from_str(content.trim()).unwrap();
let hash = parsed["integrity_hash"].as_str().unwrap();
assert!(!hash.is_empty(), "Integrity hash should be set after write");
assert_eq!(hash.len(), 64, "SHA-256 hash should be 64 hex chars");
}
}
}
#[test]
fn test_integrity_hash_chain_consistency_produces_unique_hashes_per_event_are_unique() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let config = AuditConfig::new(temp_path.clone());
if let Ok(storage) = FileAuditStorage::new(config) {
for i in 0..3 {
let event = AuditEvent::new(
AuditEventType::CryptoOperation,
&format!("chain_op_{}", i),
AuditOutcome::Success,
);
storage.write(&event).unwrap();
}
storage.flush().unwrap();
let entries: Vec<_> =
fs::read_dir(&temp_path).unwrap().filter_map(|e| e.ok()).collect();
let content = fs::read_to_string(entries[0].path()).unwrap();
let events: Vec<AuditEvent> =
content.lines().map(|line| serde_json::from_str(line).unwrap()).collect();
assert_eq!(events.len(), 3);
let hashes: Vec<&str> = events.iter().map(|e| e.integrity_hash.as_str()).collect();
assert!(hashes.iter().all(|h| !h.is_empty()));
assert_ne!(hashes[0], hashes[1]);
assert_ne!(hashes[1], hashes[2]);
}
}
}
#[test]
fn test_compute_integrity_hash_with_actor_and_resource_differs_from_without_succeeds() {
let event = AuditEvent::new(AuditEventType::System, "test", AuditOutcome::Success)
.with_actor("user1")
.with_resource("resource1");
let hash_with = FileAuditStorage::compute_integrity_hash(&event, "").unwrap();
let event_without = AuditEvent::new(AuditEventType::System, "test", AuditOutcome::Success);
let hash_without = FileAuditStorage::compute_integrity_hash(&event_without, "").unwrap();
assert_ne!(hash_with, hash_without);
}
#[test]
fn test_audit_event_serde_roundtrip_all_fields_roundtrip() {
let event =
AuditEvent::new(AuditEventType::AccessControl, "policy_eval", AuditOutcome::Denied)
.with_actor("service-account")
.with_resource("secrets/key-001")
.with_metadata("policy_id", "pol-42")
.with_metadata("deny_reason", "insufficient_privileges");
let json = serde_json::to_string(&event).unwrap();
let deserialized: AuditEvent = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.event_type, AuditEventType::AccessControl);
assert_eq!(deserialized.outcome, AuditOutcome::Denied);
assert_eq!(deserialized.actor.as_deref(), Some("service-account"));
assert_eq!(deserialized.resource.as_deref(), Some("secrets/key-001"));
assert_eq!(deserialized.metadata.len(), 2);
assert_eq!(
deserialized.metadata.get("deny_reason").map(|s| s.as_str()),
Some("insufficient_privileges")
);
}
#[test]
fn test_max_file_size_bytes_influences_rotation_trigger_has_correct_size() {
let config_tiny = AuditConfig::default().with_max_file_size(1); let config_large = AuditConfig::default().with_max_file_size(100 * 1024 * 1024);
assert_ne!(
config_tiny.max_file_size_bytes(),
config_large.max_file_size_bytes(),
"max_file_size_bytes must differ between the two configs"
);
assert_eq!(config_tiny.max_file_size_bytes(), 1);
assert_eq!(config_large.max_file_size_bytes(), 100 * 1024 * 1024);
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let dir_tiny = dir.path().join("tiny");
fs::create_dir_all(&dir_tiny).unwrap();
let storage_tiny =
FileAuditStorage::new(AuditConfig::new(dir_tiny.clone()).with_max_file_size(1));
if let Ok(s) = storage_tiny {
let event1 = AuditEvent::new(
AuditEventType::CryptoOperation,
"op_first",
AuditOutcome::Success,
);
s.write(&event1).unwrap();
let event2 = AuditEvent::new(
AuditEventType::CryptoOperation,
"op_second",
AuditOutcome::Success,
);
assert!(
s.write(&event2).is_ok(),
"Write after size-triggered rotation must succeed"
);
s.flush().unwrap();
}
let dir_large = dir.path().join("large");
fs::create_dir_all(&dir_large).unwrap();
let storage_large = FileAuditStorage::new(
AuditConfig::new(dir_large.clone()).with_max_file_size(100 * 1024 * 1024),
);
if let Ok(s) = storage_large {
for i in 0..5 {
let event = AuditEvent::new(
AuditEventType::CryptoOperation,
&format!("op_{}", i),
AuditOutcome::Success,
);
s.write(&event).unwrap();
}
s.flush().unwrap();
let file_count = fs::read_dir(&dir_large)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.path().extension().and_then(|x| x.to_str()) == Some("jsonl"))
.count();
assert_eq!(
file_count, 1,
"max_file_size_bytes=100MB must not rotate for 5 small events (got {})",
file_count
);
}
}
}
#[test]
fn test_retention_days_influences_cleanup_cutoff_succeeds() {
let config_short = AuditConfig::default().with_retention_days(1);
let config_long = AuditConfig::default().with_retention_days(365);
assert_ne!(
config_short.retention_days(),
config_long.retention_days(),
"retention_days must influence the cleanup cutoff"
);
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let temp_path = dir.path().to_path_buf();
let new_file = temp_path.join("current.jsonl");
fs::write(&new_file, "fresh event\n").unwrap();
let config = AuditConfig::new(temp_path).with_retention_days(0);
let storage = FileAuditStorage::new(config);
assert!(storage.is_ok(), "Storage creation with retention_days=0 must succeed");
}
}
#[test]
fn test_max_file_age_influences_rotation_trigger_succeeds() {
let config_short = AuditConfig::default().with_max_file_age(Duration::from_secs(1));
let config_long = AuditConfig::default().with_max_file_age(Duration::from_secs(86400));
assert_ne!(
config_short.max_file_age(),
config_long.max_file_age(),
"max_file_age must influence when file rotation is triggered"
);
}
#[test]
fn test_storage_path_influences_file_location_succeeds() {
let temp_dir = TempDir::new();
if let Ok(dir) = temp_dir {
let path_a = dir.path().join("audit_a");
let path_b = dir.path().join("audit_b");
let config_a = AuditConfig::new(path_a.clone());
let config_b = AuditConfig::new(path_b.clone());
assert_ne!(
config_a.storage_path(),
config_b.storage_path(),
"storage_path must differ between configs"
);
if let Ok(storage_a) = FileAuditStorage::new(config_a) {
let event = AuditEvent::new(AuditEventType::System, "start", AuditOutcome::Success);
storage_a.write(&event).unwrap();
storage_a.flush().unwrap();
assert!(path_a.exists(), "Storage path A must be created by FileAuditStorage::new");
}
if let Ok(storage_b) = FileAuditStorage::new(config_b) {
let event = AuditEvent::new(AuditEventType::System, "start", AuditOutcome::Success);
storage_b.write(&event).unwrap();
storage_b.flush().unwrap();
assert!(path_b.exists(), "Storage path B must be created by FileAuditStorage::new");
}
let files_a: Vec<_> = fs::read_dir(&path_a)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.path().extension().and_then(|x| x.to_str()) == Some("jsonl"))
.collect();
let files_b: Vec<_> = fs::read_dir(&path_b)
.unwrap()
.filter_map(|e| e.ok())
.filter(|e| e.path().extension().and_then(|x| x.to_str()) == Some("jsonl"))
.collect();
assert_eq!(files_a.len(), 1, "storage_path_a must contain exactly one .jsonl file");
assert_eq!(files_b.len(), 1, "storage_path_b must contain exactly one .jsonl file");
assert_ne!(
files_a[0].path(),
files_b[0].path(),
"Files in different storage paths must have different absolute paths"
);
}
}
}