use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::io::Write;
use std::path::PathBuf;
use std::sync::{Mutex, OnceLock};
use super::events::AuditEvent;
use crate::error::Error;
const APPEND_LOCK_CAP: usize = 256;
struct AppendLockEntry {
lock: std::sync::Arc<Mutex<()>>,
last_used: u64,
}
fn append_locks() -> &'static Mutex<HashMap<PathBuf, AppendLockEntry>> {
static LOCKS: OnceLock<Mutex<HashMap<PathBuf, AppendLockEntry>>> = OnceLock::new();
LOCKS.get_or_init(|| Mutex::new(HashMap::new()))
}
fn next_lock_tick() -> u64 {
static TICK: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
TICK.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
const AUDIT_LOG_MAX_READ: u64 = 256 * 1024 * 1024;
fn read_audit_log_tail(path: &std::path::Path) -> std::io::Result<String> {
use std::io::{Read, Seek, SeekFrom};
let mut file = crate::file::atomic_open::open_read_no_traverse(path).map_err(|e| match e {
crate::error::Error::PolicyTampered(msg) => {
std::io::Error::new(std::io::ErrorKind::PermissionDenied, msg)
}
crate::error::Error::StorageIo(io) => io,
other => std::io::Error::other(format!("audit log open failed: {other}")),
})?;
let len = file.metadata()?.len();
if len <= AUDIT_LOG_MAX_READ {
let mut s = String::new();
file.read_to_string(&mut s)?;
return Ok(s);
}
file.seek(SeekFrom::Start(len - AUDIT_LOG_MAX_READ))?;
let cap = usize::try_from(AUDIT_LOG_MAX_READ).unwrap_or(usize::MAX);
let mut buf = Vec::with_capacity(cap);
file.take(AUDIT_LOG_MAX_READ).read_to_end(&mut buf)?;
let s = String::from_utf8_lossy(&buf).to_string();
let trimmed = s.split_once('\n').map_or("", |(_, rest)| rest).to_string();
Ok(trimmed)
}
fn append_lock_for(root: &std::path::Path) -> std::sync::Arc<Mutex<()>> {
let key = std::fs::canonicalize(root).unwrap_or_else(|_| root.to_path_buf());
let mut map = append_locks()
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let tick = next_lock_tick();
if let Some(existing) = map.get_mut(&key) {
existing.last_used = tick;
return existing.lock.clone();
}
if map.len() >= APPEND_LOCK_CAP {
if let Some(victim) = map
.iter()
.min_by_key(|(_, v)| v.last_used)
.map(|(k, _)| k.clone())
{
map.remove(&victim);
}
}
let lock = std::sync::Arc::new(Mutex::new(()));
map.insert(
key,
AppendLockEntry {
lock: lock.clone(),
last_used: tick,
},
);
lock
}
pub fn log(event: &AuditEvent) -> Result<(), Error> {
let dir = default_audit_dir()?;
log_at(&dir, event)
}
pub fn log_at(root: &std::path::Path, event: &AuditEvent) -> Result<(), Error> {
std::fs::create_dir_all(root)
.map_err(|e| Error::AuditLogFailed(format!("failed to create audit directory: {e}")))?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(root, std::fs::Permissions::from_mode(0o700)).map_err(|e| {
Error::AuditLogFailed(format!("failed to set audit directory permissions: {e}"))
})?;
}
let log_path = root.join("audit.log");
crate::guard::verify_not_symlink(&log_path)?;
let lock = append_lock_for(root);
let _append_guard = lock
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let lock_path = root.join("audit.log.lock");
crate::guard::verify_not_symlink(&lock_path)?;
let _xprocess_lock = crate::config::persistence::advisory_lock::acquire(&lock_path, true)
.map_err(|e| {
Error::AuditLogFailed(format!("audit cross-process lock acquire failed: {e}"))
})?;
let event_plaintext = zeroize::Zeroizing::new(canonical_event_json(event)?);
let sealed_hex = super::cipher::encrypt_event(root, event_plaintext.as_bytes())?;
let body_payload = canonical_sealed_payload(&sealed_hex);
verify_chain_if_exists(&log_path)?;
let prev = last_chain(&log_path).unwrap_or_else(|| "genesis".to_string());
let chain = compute_chain(&prev, &body_payload);
let mut line = build_entry_line(&now_iso8601(), std::process::id(), &chain, &sealed_hex)?;
line.push('\n');
let was_new = !log_path.exists();
let mut file = crate::file::atomic_open::open_append_no_traverse(&log_path)
.map_err(|e| Error::AuditLogFailed(format!("failed to open audit log: {e}")))?;
#[cfg(target_os = "linux")]
{
let _ = std::process::Command::new("chattr")
.args(["+a", "--", log_path.to_string_lossy().as_ref()])
.output();
}
file.write_all(line.as_bytes())
.map_err(|e| Error::AuditLogFailed(format!("failed to write audit log: {e}")))?;
let _ = file.sync_data();
if was_new {
fsync_dir_best_effort(root);
}
Ok(())
}
#[allow(unused_variables)]
fn fsync_dir_best_effort(dir: &std::path::Path) {
#[cfg(unix)]
{
if let Ok(d) = std::fs::File::open(dir) {
let _ = d.sync_all();
}
}
}
pub fn log_required(event: &AuditEvent) -> Result<(), Error> {
log(event)
}
pub fn log_required_at(root: &std::path::Path, event: &AuditEvent) -> Result<(), Error> {
log_at(root, event)
}
#[must_use]
pub fn extract_json_field(json: &str, key: &str) -> Option<String> {
let pattern = format!("\"{key}\":\"");
let start = json.find(&pattern)? + pattern.len();
let bytes = json.as_bytes();
let mut end = start;
while end < bytes.len() {
let b = bytes[end];
if b == b'\\' && end + 1 < bytes.len() {
if bytes[end + 1] == b'u' && end + 5 < bytes.len() {
let h0 = hex_nibble(bytes[end + 2]);
let h1 = hex_nibble(bytes[end + 3]);
let is_high_surrogate = matches!((h0, h1), (Some(0xD), Some(0x8..=0xB)));
if is_high_surrogate
&& end + 11 < bytes.len()
&& bytes[end + 6] == b'\\'
&& bytes[end + 7] == b'u'
{
let l0 = hex_nibble(bytes[end + 8]);
let l1 = hex_nibble(bytes[end + 9]);
let is_low_surrogate = matches!((l0, l1), (Some(0xD), Some(0xC..=0xF)));
if is_low_surrogate {
end += 12;
continue;
}
}
end += 6;
} else {
end += 2; }
} else if b == b'"' {
break;
} else {
end += 1;
}
}
if end >= bytes.len() || bytes[end] != b'"' {
return None;
}
Some(json[start..end].to_string())
}
fn hex_nibble(b: u8) -> Option<u8> {
match b {
b'0'..=b'9' => Some(b - b'0'),
b'a'..=b'f' => Some(b - b'a' + 10),
b'A'..=b'F' => Some(b - b'A' + 10),
_ => None,
}
}
#[derive(Debug)]
pub struct ParsedEntry {
pub ts: String,
pub pid: u32,
pub chain: String,
pub event: super::AuditEvent,
pub envelope: EnvelopeKind,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EnvelopeKind {
Sealed,
LegacyPlaintext,
}
fn parse_entry_default_root() -> Option<std::path::PathBuf> {
default_audit_dir().ok()
}
#[must_use]
pub fn parse_entry(line: &str) -> Option<ParsedEntry> {
let root = parse_entry_default_root()?;
parse_entry_at(&root, line)
}
#[must_use]
pub fn parse_entry_at(root: &std::path::Path, line: &str) -> Option<ParsedEntry> {
if line.len() > 8 * 1024 * 1024 {
return None;
}
let value: serde_json::Value = serde_json::from_str(line).ok()?;
let obj = value.as_object()?;
let ts = obj.get("ts")?.as_str()?.to_string();
let pid = u32::try_from(obj.get("pid")?.as_u64()?).ok()?;
let chain = obj.get("chain")?.as_str()?.to_string();
if let Some(sealed_hex) = obj.get("sealed").and_then(serde_json::Value::as_str) {
let event = super::cipher::decrypt_event(root, sealed_hex)
.ok()
.and_then(|pt| serde_json::from_slice::<super::AuditEvent>(&pt).ok())
.unwrap_or_else(|| super::AuditEvent::EncryptedUnreadable {
reason: "audit key unavailable or entry corrupt on this device".to_string(),
});
return Some(ParsedEntry {
ts,
pid,
chain,
event,
envelope: EnvelopeKind::Sealed,
});
}
let mut body = obj.clone();
body.remove("ts");
body.remove("pid");
body.remove("chain");
let event = serde_json::from_value::<super::AuditEvent>(serde_json::Value::Object(body))
.unwrap_or_else(|_| super::AuditEvent::EncryptedUnreadable {
reason: "legacy entry failed to parse".to_string(),
});
Some(ParsedEntry {
ts,
pid,
chain,
event,
envelope: EnvelopeKind::LegacyPlaintext,
})
}
#[must_use]
pub fn read_last_parsed_at(root: &std::path::Path, n: usize) -> ParsedReadResult {
let raw = read_last_at(root, n);
let mut entries = Vec::with_capacity(raw.len());
let mut dropped = 0_usize;
for line in &raw {
match parse_entry_at(root, line) {
Some(p) => entries.push(p),
None => dropped += 1,
}
}
ParsedReadResult {
entries,
dropped_lines: dropped,
}
}
#[derive(Debug, Default)]
pub struct ParsedReadResult {
pub entries: Vec<ParsedEntry>,
pub dropped_lines: usize,
}
#[must_use]
pub fn read_last(n: usize) -> Vec<String> {
let Ok(dir) = default_audit_dir() else {
return Vec::new();
};
read_last_at(&dir, n)
}
#[must_use]
pub fn read_last_at(root: &std::path::Path, n: usize) -> Vec<String> {
let log_path = root.join("audit.log");
let Ok(contents) = read_audit_log_tail(&log_path) else {
return Vec::new();
};
contents.lines().rev().take(n).map(String::from).collect()
}
#[derive(Debug, Default, Clone)]
pub struct AuditFilter {
pub query: Option<String>,
pub event_type: Option<String>,
}
impl AuditFilter {
#[must_use]
pub fn is_empty(&self) -> bool {
self.query.is_none() && self.event_type.is_none()
}
#[must_use]
pub fn matches_line(&self, line: &str) -> bool {
if let Some(q) = &self.query {
let q_lower = q.to_ascii_lowercase();
if !line.to_ascii_lowercase().contains(&q_lower) {
return false;
}
}
if let Some(et) = &self.event_type {
let et_lower = et.to_ascii_lowercase();
let pattern = format!("\"event\":\"{et_lower}\"");
if !line.to_ascii_lowercase().contains(&pattern) {
return false;
}
}
true
}
}
#[must_use]
pub fn read_last_filtered(n: usize, filter: &AuditFilter) -> Vec<String> {
let Ok(dir) = default_audit_dir() else {
return Vec::new();
};
read_last_filtered_at(&dir, n, filter)
}
#[must_use]
pub fn read_last_filtered_at(
root: &std::path::Path,
n: usize,
filter: &AuditFilter,
) -> Vec<String> {
if filter.is_empty() {
return read_last_at(root, n);
}
let log_path = root.join("audit.log");
let Ok(contents) = read_audit_log_tail(&log_path) else {
return Vec::new();
};
contents
.lines()
.rev()
.filter(|line| filter.matches_line(&decrypted_view_for_filter(root, line)))
.take(n)
.map(String::from)
.collect()
}
fn decrypted_view_for_filter(root: &std::path::Path, line: &str) -> String {
let Some(parsed) = parse_entry_at(root, line) else {
return line.to_string();
};
if parsed.envelope == EnvelopeKind::LegacyPlaintext {
return line.to_string();
}
let Ok(event_value) = serde_json::to_value(&parsed.event) else {
return line.to_string();
};
let Some(event_obj) = event_value.as_object() else {
return line.to_string();
};
let mut out = serde_json::Map::new();
out.insert("ts".into(), serde_json::Value::String(parsed.ts.clone()));
out.insert("pid".into(), serde_json::Value::Number(parsed.pid.into()));
out.insert(
"chain".into(),
serde_json::Value::String(parsed.chain.clone()),
);
for (k, v) in event_obj {
out.insert(k.clone(), v.clone());
}
serde_json::to_string(&serde_json::Value::Object(out)).unwrap_or_else(|_| line.to_string())
}
#[must_use]
pub fn read_last_parsed_filtered_at(
root: &std::path::Path,
n: usize,
filter: &AuditFilter,
) -> ParsedReadResult {
let raw = read_last_filtered_at(root, n, filter);
let mut entries = Vec::with_capacity(raw.len());
let mut dropped = 0_usize;
for line in &raw {
match parse_entry_at(root, line) {
Some(p) => entries.push(p),
None => dropped += 1,
}
}
ParsedReadResult {
entries,
dropped_lines: dropped,
}
}
pub(crate) fn default_audit_dir() -> Result<std::path::PathBuf, Error> {
crate::vault::store::default_vault_root()
.map_err(|e| Error::AuditLogFailed(format!("cannot determine audit directory: {e}")))
}
fn now_iso8601() -> String {
let duration = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default();
let secs = duration.as_secs();
let days = secs / 86400;
let time_of_day = secs % 86400;
let hours = time_of_day / 3600;
let minutes = (time_of_day % 3600) / 60;
let seconds = time_of_day % 60;
let mut y = 1970_i64;
#[allow(clippy::cast_possible_wrap)]
let mut remaining = days as i64;
loop {
let year_days = if is_leap(y) { 366 } else { 365 };
if remaining < year_days {
break;
}
remaining -= year_days;
y += 1;
}
let leap = is_leap(y);
let month_days: [i64; 12] = [
31,
if leap { 29 } else { 28 },
31,
30,
31,
30,
31,
31,
30,
31,
30,
31,
];
let mut m = 0;
for days_in_month in &month_days {
if remaining < *days_in_month {
break;
}
remaining -= days_in_month;
m += 1;
}
format!(
"{y:04}-{:02}-{:02}T{hours:02}:{minutes:02}:{seconds:02}Z",
m + 1,
remaining + 1,
)
}
fn is_leap(y: i64) -> bool {
(y % 4 == 0 && y % 100 != 0) || y % 400 == 0
}
fn canonical_event_json(event: &super::AuditEvent) -> Result<String, Error> {
let raw = serde_json::to_string(event)
.map_err(|e| Error::AuditLogFailed(format!("audit event serialize: {e}")))?;
let value: serde_json::Value = serde_json::from_str(&raw)
.map_err(|e| Error::AuditLogFailed(format!("audit event roundtrip parse: {e}")))?;
serde_json::to_string(&value)
.map_err(|e| Error::AuditLogFailed(format!("audit event roundtrip serialize: {e}")))
}
fn canonical_sealed_payload(sealed_hex: &str) -> String {
let mut obj = serde_json::Map::new();
obj.insert(
"sealed".to_string(),
serde_json::Value::String(sealed_hex.to_string()),
);
serde_json::to_string(&serde_json::Value::Object(obj)).unwrap_or_default()
}
fn build_entry_line(ts: &str, pid: u32, chain: &str, sealed_hex: &str) -> Result<String, Error> {
let mut obj = serde_json::Map::new();
obj.insert("ts".to_string(), serde_json::Value::String(ts.to_string()));
obj.insert("pid".to_string(), serde_json::Value::Number(pid.into()));
obj.insert(
"chain".to_string(),
serde_json::Value::String(chain.to_string()),
);
obj.insert(
"sealed".to_string(),
serde_json::Value::String(sealed_hex.to_string()),
);
serde_json::to_string(&serde_json::Value::Object(obj))
.map_err(|e| Error::AuditLogFailed(format!("failed to serialize audit entry: {e}")))
}
fn compute_chain(prev: &str, payload: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(prev.as_bytes());
hasher.update(payload.as_bytes());
format!("{:x}", hasher.finalize())
}
fn last_chain(path: &std::path::Path) -> Option<String> {
let content = read_audit_log_tail(path).ok()?;
content
.lines()
.rev()
.find_map(|line| serde_json::from_str::<serde_json::Value>(line).ok())
.and_then(|v| {
v.get("chain")
.and_then(serde_json::Value::as_str)
.map(str::to_string)
})
}
const PER_LINE_CAP: u64 = 8 * 1024 * 1024;
fn verify_chain_if_exists(path: &std::path::Path) -> Result<(), Error> {
use std::io::{BufRead, Read};
if !path.exists() {
return Ok(());
}
let file = match crate::file::atomic_open::open_read_no_traverse(path) {
Ok(f) => f,
Err(Error::StorageIo(e)) if e.kind() == std::io::ErrorKind::NotFound => return Ok(()),
Err(e) => return Err(e),
};
let reader = std::io::BufReader::new(file);
let mut prev = "genesis".to_string();
let mut line_no = 0_usize;
let mut buf = String::new();
let mut br = reader;
loop {
buf.clear();
let mut limited = (&mut br).take(PER_LINE_CAP);
let n = limited.read_line(&mut buf)?;
if n == 0 {
break;
}
line_no += 1;
let trimmed = buf.trim_end_matches('\n').trim_end_matches('\r');
match check_one(&prev, trimmed) {
Ok(next_chain) => prev = next_chain,
Err(reason) => {
rotate_corrupted(path, line_no, &reason)?;
return Ok(());
}
}
}
Ok(())
}
fn check_one(prev: &str, line: &str) -> Result<String, String> {
let value: serde_json::Value =
serde_json::from_str(line).map_err(|e| format!("parse failure: {e}"))?;
let chain = value
.get("chain")
.and_then(serde_json::Value::as_str)
.ok_or_else(|| "chain field missing".to_string())?
.to_string();
let mut obj = value
.as_object()
.ok_or_else(|| "entry is not a JSON object".to_string())?
.clone();
obj.remove("chain");
obj.remove("ts");
obj.remove("pid");
let payload = serde_json::to_string(&serde_json::Value::Object(obj)).unwrap_or_default();
let expected = compute_chain(prev, &payload);
if crate::guard::constant_time_eq(expected.as_bytes(), chain.as_bytes()) {
Ok(chain)
} else {
Err("hash-chain mismatch".to_string())
}
}
fn rotate_corrupted(path: &std::path::Path, bad_line: usize, reason: &str) -> Result<(), Error> {
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map_or(0, |d| d.as_secs());
let corrupted = path.with_file_name(format!("audit.log.corrupted-{ts}"));
std::fs::rename(path, &corrupted)
.map_err(|e| Error::AuditLogFailed(format!("failed to rotate corrupted audit log: {e}")))?;
#[cfg(target_os = "linux")]
{
let _ = std::process::Command::new("chattr")
.args(["+a", "--", corrupted.to_string_lossy().as_ref()])
.output();
}
let _ = crate::guard::emit_signal_inline(
crate::guard::Signal::new(
crate::guard::SignalId::new("audit.chain.rotated_corruption"),
crate::guard::Category::AuditFailure,
crate::guard::Severity::Warn,
"audit log corruption rotated",
format!(
"audit log corruption detected at line {bad_line} ({reason}). \
rotated to {corrupted_path} and started a fresh chain — the \
corrupted file is preserved for hand inspection; new entries \
continue under {original_path}",
corrupted_path = corrupted.display(),
original_path = path.display()
),
"inspect the rotated file for forensic evidence; the chain itself is back to clean",
),
&crate::security_config::load_system_defaults(),
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::audit::events::AuditEvent;
#[test]
fn end_to_end_seal_chain_verify_decrypt() {
let dir = tempfile::tempdir().unwrap();
let root = dir.path();
log_at(
root,
&AuditEvent::SecretStored {
name: "OPENAI_API_KEY".to_string(),
},
)
.unwrap();
log_at(
root,
&AuditEvent::ApprovalResult {
binary: "/usr/bin/cat".to_string(),
secret: "DB_URL".to_string(),
decision: "AllowOnce".to_string(),
},
)
.unwrap();
let raw = std::fs::read_to_string(root.join("audit.log")).unwrap();
assert!(
!raw.contains("OPENAI_API_KEY"),
"plaintext secret name leaked into audit log: {raw}"
);
assert!(!raw.contains("/usr/bin/cat"));
assert!(!raw.contains("DB_URL"));
let parsed = read_last_parsed_at(root, 10);
assert_eq!(parsed.dropped_lines, 0);
assert_eq!(parsed.entries.len(), 2);
for entry in &parsed.entries {
assert_eq!(entry.envelope, EnvelopeKind::Sealed);
}
match &parsed.entries[0].event {
AuditEvent::ApprovalResult {
binary,
secret,
decision,
} => {
assert_eq!(binary, "/usr/bin/cat");
assert_eq!(secret, "DB_URL");
assert_eq!(decision, "AllowOnce");
}
other => panic!("unexpected event: {other:?}"),
}
log_at(
root,
&AuditEvent::SecretRevoked {
name: "DB_URL".to_string(),
},
)
.unwrap();
let after = read_last_at(root, 10);
assert_eq!(after.len(), 3);
assert!(
!root
.join("audit.log")
.parent()
.unwrap()
.read_dir()
.unwrap()
.any(|e| {
e.unwrap()
.file_name()
.to_string_lossy()
.starts_with("audit.log.corrupted-")
}),
"chain corruption rotation fired on a clean write sequence"
);
}
#[test]
fn filter_matches_decrypted_event_body() {
let dir = tempfile::tempdir().unwrap();
let root = dir.path();
log_at(
root,
&AuditEvent::SecretStored {
name: "PROD_DB_PASSWORD".to_string(),
},
)
.unwrap();
log_at(
root,
&AuditEvent::SecretStored {
name: "STAGING_API_KEY".to_string(),
},
)
.unwrap();
let filter = AuditFilter {
query: Some("PROD_DB".to_string()),
event_type: None,
};
let results = read_last_filtered_at(root, 10, &filter);
assert_eq!(
results.len(),
1,
"filter should decrypt to match event body, got {results:?}"
);
}
#[test]
fn concurrent_appends_produce_no_chain_corruption() {
use std::sync::Arc;
let dir = tempfile::tempdir().unwrap();
let root = Arc::new(dir.path().to_path_buf());
let mut handles = Vec::new();
for i in 0..16 {
let r = Arc::clone(&root);
handles.push(std::thread::spawn(move || {
for j in 0..8 {
log_at(
&r,
&AuditEvent::SecretStored {
name: format!("k-{i}-{j}"),
},
)
.unwrap();
}
}));
}
for h in handles {
h.join().unwrap();
}
let rotated = std::fs::read_dir(&*root)
.unwrap()
.filter_map(Result::ok)
.filter(|e| {
e.file_name()
.to_string_lossy()
.starts_with("audit.log.corrupted-")
})
.count();
assert_eq!(rotated, 0, "race produced corrupted-rotation siblings");
let n = read_last_at(&root, 1024).len();
assert_eq!(n, 16 * 8, "lost an entry under contention");
}
#[test]
fn parse_entry_rejects_oversized_line() {
let dir = tempfile::tempdir().unwrap();
let huge = "x".repeat(9 * 1024 * 1024);
assert!(parse_entry_at(dir.path(), &huge).is_none());
}
#[test]
fn now_iso8601_format() {
let ts = now_iso8601();
assert!(
ts.starts_with("20"),
"timestamp should start with 20xx: {ts}"
);
assert!(ts.ends_with('Z'), "timestamp should end with Z: {ts}");
assert!(ts.contains('T'), "timestamp should contain T: {ts}");
}
}