envseal 0.3.10

Write-only secret vault with process-level access control — post-agent secret management
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
//! Append-only audit log with SHA-256 hash chaining.
//!
//! Each entry stores `chain = SHA256(prev_chain || payload_json)`. On every
//! append we re-walk the entire log and verify the chain end-to-end before
//! adding a new line. Tampering with any historic line invalidates the
//! chain from that point forward.
//!
//! On Linux we additionally try to flip the file's `+a` (append-only)
//! attribute via `chattr`, which makes overwrites by the same UID
//! impossible without root.

use serde::Serialize;
use sha2::{Digest, Sha256};
use std::io::Write;

use super::events::AuditEvent;
use crate::error::Error;

/// A single audit log entry with timestamp and chain hash.
#[derive(Serialize)]
struct AuditEntry<'a> {
    /// ISO 8601 timestamp.
    ts: String,
    /// Process ID.
    pid: u32,
    /// SHA-256 chain value over previous hash + current JSON payload.
    chain: String,
    /// The event data.
    #[serde(flatten)]
    event: &'a AuditEvent,
}

/// Append an event to the audit log of the *default* vault.
///
/// Convenience wrapper around [`log_at`] for callers that operate
/// against the default vault root (CLI, MCP, desktop GUI). Functions
/// that already hold a [`crate::vault::Vault`] handle should prefer
/// [`log_at`] with `vault.root()` so the audit trail and the vault
/// it audits stay co-located — important for tests that use temp
/// vault roots and for any future multi-vault deployment.
///
/// # Errors
///
/// Returns [`Error::AuditLogFailed`] if the log directory or file cannot be
/// created/opened/written, or if the existing chain fails to verify.
pub fn log(event: &AuditEvent) -> Result<(), Error> {
    let dir = default_audit_dir()?;
    log_at(&dir, event)
}

/// Append an event to the audit log under a specific vault root.
///
/// The audit log lives at `<root>/audit.log`. Each append re-walks
/// the existing chain to verify integrity before adding the new
/// entry — see the module-level docs.
///
/// # Errors
/// Same as [`log`].
pub fn log_at(root: &std::path::Path, event: &AuditEvent) -> Result<(), Error> {
    std::fs::create_dir_all(root)
        .map_err(|e| Error::AuditLogFailed(format!("failed to create audit directory: {e}")))?;

    #[cfg(unix)]
    {
        use std::os::unix::fs::PermissionsExt;
        std::fs::set_permissions(root, std::fs::Permissions::from_mode(0o700)).map_err(|e| {
            Error::AuditLogFailed(format!("failed to set audit directory permissions: {e}"))
        })?;
    }

    let log_path = root.join("audit.log");
    crate::guard::verify_not_symlink(&log_path)?;

    // Canonicalize the chain-input payload so it matches the form
    // the verifier produces. `serde_json::to_string(event)` emits
    // fields in declaration order; `serde_json::to_string(&Value)`
    // emits fields in alphabetical order (BTreeMap iteration). For
    // any AuditEvent variant whose declaration order isn't
    // alphabetical (e.g. `SignalRecorded { tier, classification }`,
    // `SupervisorLeakDetected { secret, binary, leak_count }`) those
    // two paths produce different bytes → different SHA-256 → every
    // single append fails verify on the next read → rotate-corrupted
    // fires for entries the writer just produced. We were masking the
    // bug with the rotation failsafe; this fixes it by making both
    // sides hash the same canonical bytes.
    let payload = canonical_event_json(event);
    verify_chain_if_exists(&log_path)?;
    let prev = last_chain(&log_path).unwrap_or_else(|| "genesis".to_string());
    let chain = compute_chain(&prev, &payload);
    let entry = AuditEntry {
        ts: now_iso8601(),
        pid: std::process::id(),
        chain,
        event,
    };

    let mut line = serde_json::to_string(&entry)
        .map_err(|e| Error::AuditLogFailed(format!("failed to serialize audit entry: {e}")))?;
    line.push('\n');

    let mut file = {
        let mut opts = std::fs::OpenOptions::new();
        opts.create(true).append(true);
        #[cfg(unix)]
        {
            use std::os::unix::fs::OpenOptionsExt;
            opts.mode(0o600)
                .custom_flags(libc::O_NOFOLLOW | libc::O_CLOEXEC);
        }
        opts.open(&log_path)
            .map_err(|e| Error::AuditLogFailed(format!("failed to open audit log: {e}")))?
    };
    #[cfg(target_os = "linux")]
    {
        let _ = std::process::Command::new("chattr")
            .args(["+a", "--", log_path.to_string_lossy().as_ref()])
            .output();
    }

    file.write_all(line.as_bytes())
        .map_err(|e| Error::AuditLogFailed(format!("failed to write audit log: {e}")))?;

    Ok(())
}

/// Append an audit record under the default vault, or return
/// [`Error::AuditLogFailed`].
///
/// Security-sensitive operations must use this (not [`log`] directly) so a
/// full disk or permission failure cannot complete a secret access without a
/// forensic record.
///
/// # Errors
/// See [`log`].
pub fn log_required(event: &AuditEvent) -> Result<(), Error> {
    log(event)
}

/// Like [`log_required`] but writes to a specific vault root. Use
/// when you hold a [`crate::vault::Vault`] handle.
///
/// # Errors
/// Same as [`log_at`].
pub fn log_required_at(root: &std::path::Path, event: &AuditEvent) -> Result<(), Error> {
    log_at(root, event)
}

/// Extract the value of a string-typed JSON field from a single
/// audit-log line, without pulling in `serde_json` for every caller
/// that just wants a status surface.
///
/// This is the deliberate counterpart to the heavier
/// `serde_json::from_str` path used by chain verification: the log
/// renderer needs only the `ts`, `event`, `binary`, and `secret`
/// fields and knows the lines are well-formed JSON because we
/// produced them. Returns `None` if the field is absent or its
/// value isn't a string literal.
#[must_use]
pub fn extract_json_field(json: &str, key: &str) -> Option<String> {
    let pattern = format!("\"{key}\":\"");
    let start = json.find(&pattern)? + pattern.len();
    let mut end = start;
    while end < json.len() {
        let b = json.as_bytes()[end];
        if b == b'\\' && end + 1 < json.len() {
            if json.as_bytes()[end + 1] == b'u' && end + 5 < json.len() {
                end += 6; // skip \uXXXX
            } else {
                end += 2; // skip escaped character
            }
        } else if b == b'"' {
            break;
        } else {
            end += 1;
        }
    }
    if end >= json.len() || json.as_bytes()[end] != b'"' {
        return None;
    }
    Some(json[start..end].to_string())
}

/// One audit log entry, parsed from a single JSON line. The
/// envelope fields (`ts`, `pid`, `chain`) are co-located with the
/// flattened `AuditEvent` payload — same shape as the writer
/// produces in [`AuditEntry`].
///
/// Returned by [`parse_entry`] / [`read_last_parsed_at`] so callers
/// don't have to re-implement the JSON shape. The desktop GUI and
/// CLI both consume this via `ops::audit_log_parsed`.
#[derive(Debug, serde::Deserialize)]
pub struct ParsedEntry {
    /// ISO 8601 timestamp.
    pub ts: String,
    /// Process ID that wrote the entry.
    pub pid: u32,
    /// SHA-256 chain hash for tamper detection.
    pub chain: String,
    /// The flattened event payload — typed.
    #[serde(flatten)]
    pub event: super::AuditEvent,
}

/// Parse one JSON line into a typed [`ParsedEntry`]. Returns `None`
/// if the line isn't valid JSON or doesn't match the expected
/// schema — corrupt lines are skipped, not raised, so a forensic
/// reader can still display the rest of the chain.
#[must_use]
pub fn parse_entry(line: &str) -> Option<ParsedEntry> {
    serde_json::from_str(line).ok()
}

/// Read the last `n` entries from the audit log under a specific
/// vault root, parsed into typed [`ParsedEntry`] values. Lines that
/// fail to parse are dropped from the result (reported through the
/// returned `dropped_lines` count so a UI can flag a corrupted
/// chain without locking up).
#[must_use]
pub fn read_last_parsed_at(root: &std::path::Path, n: usize) -> ParsedReadResult {
    let raw = read_last_at(root, n);
    let mut entries = Vec::with_capacity(raw.len());
    let mut dropped = 0_usize;
    for line in &raw {
        match parse_entry(line) {
            Some(p) => entries.push(p),
            None => dropped += 1,
        }
    }
    ParsedReadResult {
        entries,
        dropped_lines: dropped,
    }
}

/// Result of [`read_last_parsed_at`].
#[derive(Debug, Default)]
pub struct ParsedReadResult {
    /// Successfully parsed entries (newest first, same order as
    /// [`read_last_at`]).
    pub entries: Vec<ParsedEntry>,
    /// Count of lines that failed to parse — non-zero implies log
    /// corruption that the chain-verifier will catch on next write.
    pub dropped_lines: usize,
}

/// Read the last `n` entries from the *default* vault's audit log.
/// Returns entries in reverse chronological order (newest first).
#[must_use]
pub fn read_last(n: usize) -> Vec<String> {
    let Ok(dir) = default_audit_dir() else {
        return Vec::new();
    };
    read_last_at(&dir, n)
}

/// Read the last `n` entries from the audit log under a specific vault root.
#[must_use]
pub fn read_last_at(root: &std::path::Path, n: usize) -> Vec<String> {
    let log_path = root.join("audit.log");
    let Ok(contents) = std::fs::read_to_string(&log_path) else {
        return Vec::new();
    };
    contents.lines().rev().take(n).map(String::from).collect()
}

/// Filter criteria for audit log queries.
///
/// All fields are optional — an empty filter matches everything. When
/// multiple fields are set, they are `ANDed` (every condition must match).
#[derive(Debug, Default, Clone)]
pub struct AuditFilter {
    /// Case-insensitive substring match against the full JSON line.
    /// Matches secret names, binary paths, signal IDs, event types — any
    /// text in the log entry.
    pub query: Option<String>,
    /// Restrict to a specific event type tag (the `event` JSON field).
    /// Exact match, case-insensitive. Examples: `secret_stored`,
    /// `approval_result`, `signal_recorded`.
    pub event_type: Option<String>,
}

impl AuditFilter {
    /// Returns `true` if this filter has no constraints at all.
    #[must_use]
    pub fn is_empty(&self) -> bool {
        self.query.is_none() && self.event_type.is_none()
    }

    /// Test whether a raw JSON log line passes all filter predicates.
    #[must_use]
    pub fn matches_line(&self, line: &str) -> bool {
        if let Some(q) = &self.query {
            let q_lower = q.to_ascii_lowercase();
            if !line.to_ascii_lowercase().contains(&q_lower) {
                return false;
            }
        }
        if let Some(et) = &self.event_type {
            let et_lower = et.to_ascii_lowercase();
            // Match the "event":"<type>" JSON field
            let pattern = format!("\"event\":\"{et_lower}\"");
            if !line.to_ascii_lowercase().contains(&pattern) {
                return false;
            }
        }
        true
    }
}

/// Read up to `n` entries from the default vault's audit log,
/// applying `filter` to each line before collection.
#[must_use]
pub fn read_last_filtered(n: usize, filter: &AuditFilter) -> Vec<String> {
    let Ok(dir) = default_audit_dir() else {
        return Vec::new();
    };
    read_last_filtered_at(&dir, n, filter)
}

/// Read up to `n` entries from the audit log under a specific vault
/// root, applying `filter` to each line. Newest-first order is
/// preserved; lines that don't match are skipped without counting
/// toward `n`.
#[must_use]
pub fn read_last_filtered_at(
    root: &std::path::Path,
    n: usize,
    filter: &AuditFilter,
) -> Vec<String> {
    if filter.is_empty() {
        return read_last_at(root, n);
    }
    let log_path = root.join("audit.log");
    let Ok(contents) = std::fs::read_to_string(&log_path) else {
        return Vec::new();
    };
    contents
        .lines()
        .rev()
        .filter(|line| filter.matches_line(line))
        .take(n)
        .map(String::from)
        .collect()
}

/// Like [`read_last_parsed_at`] but with filter support.
#[must_use]
pub fn read_last_parsed_filtered_at(
    root: &std::path::Path,
    n: usize,
    filter: &AuditFilter,
) -> ParsedReadResult {
    let raw = read_last_filtered_at(root, n, filter);
    let mut entries = Vec::with_capacity(raw.len());
    let mut dropped = 0_usize;
    for line in &raw {
        match parse_entry(line) {
            Some(p) => entries.push(p),
            None => dropped += 1,
        }
    }
    ParsedReadResult {
        entries,
        dropped_lines: dropped,
    }
}

/// Resolve the audit log directory for the default vault.
///
/// Co-located with the vault at `$XDG_CONFIG_HOME/envseal/` (Unix) or
/// `$HOME/.config/envseal/` so a single `~/.config/envseal/` directory
/// holds vault, security config, audit log, and policy. Matches
/// `crate::ops::vault_root` exactly so a vault and its audit trail
/// never end up in different parent directories.
pub(crate) fn default_audit_dir() -> Result<std::path::PathBuf, Error> {
    // Re-use the validated path logic from the vault so the audit log
    // and the vault it audits are co-located AND the same XDG_CONFIG_HOME
    // poisoning guards apply.
    crate::vault::store::default_vault_root()
        .map_err(|e| Error::AuditLogFailed(format!("cannot determine audit directory: {e}")))
}

/// Current time as ISO 8601 string (no external deps).
fn now_iso8601() -> String {
    let duration = std::time::SystemTime::now()
        .duration_since(std::time::UNIX_EPOCH)
        .unwrap_or_default();
    let secs = duration.as_secs();

    let days = secs / 86400;
    let time_of_day = secs % 86400;
    let hours = time_of_day / 3600;
    let minutes = (time_of_day % 3600) / 60;
    let seconds = time_of_day % 60;

    let mut y = 1970_i64;
    #[allow(clippy::cast_possible_wrap)]
    let mut remaining = days as i64;
    loop {
        let year_days = if is_leap(y) { 366 } else { 365 };
        if remaining < year_days {
            break;
        }
        remaining -= year_days;
        y += 1;
    }
    let leap = is_leap(y);
    let month_days: [i64; 12] = [
        31,
        if leap { 29 } else { 28 },
        31,
        30,
        31,
        30,
        31,
        31,
        30,
        31,
        30,
        31,
    ];
    let mut m = 0;
    for days_in_month in &month_days {
        if remaining < *days_in_month {
            break;
        }
        remaining -= days_in_month;
        m += 1;
    }

    format!(
        "{y:04}-{:02}-{:02}T{hours:02}:{minutes:02}:{seconds:02}Z",
        m + 1,
        remaining + 1,
    )
}

fn is_leap(y: i64) -> bool {
    (y % 4 == 0 && y % 100 != 0) || y % 400 == 0
}

/// Canonicalize an [`AuditEvent`] to its sorted-keys JSON form.
///
/// Both writer and verifier hash this same form so the chain
/// matches across roundtrips. Without this, the writer hashes
/// declaration-order JSON while the verifier hashes alphabetical-
/// order JSON (because the verifier roundtrips through
/// `serde_json::Value`, which uses `BTreeMap` internally) — every
/// non-alphabetically-declared variant produces a chain mismatch
/// the moment it's verified, and only the rotate-corrupted failsafe
/// kept the system usable.
fn canonical_event_json(event: &super::AuditEvent) -> String {
    // Roundtrip through Value → forces fields into the same
    // alphabetical order the verifier sees. Falls back to the
    // direct serialization if anything goes wrong (corrupted
    // event would be flagged downstream by the chain anyway).
    let raw =
        serde_json::to_string(event).unwrap_or_else(|_| String::from("{\"error\":\"serialize\"}"));
    let Ok(value): Result<serde_json::Value, _> = serde_json::from_str(&raw) else {
        return raw;
    };
    serde_json::to_string(&value).unwrap_or(raw)
}

fn compute_chain(prev: &str, payload: &str) -> String {
    let mut hasher = Sha256::new();
    hasher.update(prev.as_bytes());
    hasher.update(payload.as_bytes());
    format!("{:x}", hasher.finalize())
}

fn last_chain(path: &std::path::Path) -> Option<String> {
    let content = std::fs::read_to_string(path).ok()?;
    content
        .lines()
        .rev()
        .find_map(|line| serde_json::from_str::<serde_json::Value>(line).ok())
        .and_then(|v| {
            v.get("chain")
                .and_then(serde_json::Value::as_str)
                .map(str::to_string)
        })
}

/// Walk the on-disk chain. On any mismatch (corruption, tamper, or
/// schema drift between writer versions), rotate the file to a
/// `audit.log.corrupted-<unix_ts>` sibling for forensics and return
/// `Ok` so the caller can start a fresh chain — refusing to continue
/// would otherwise lock the user out of the vault permanently the
/// moment ANY corruption sneaks in. The corrupted file is NEVER
/// deleted; it stays alongside the new chain for hand inspection.
fn verify_chain_if_exists(path: &std::path::Path) -> Result<(), Error> {
    if !path.exists() {
        return Ok(());
    }
    let content = std::fs::read_to_string(path)?;
    let mut prev = "genesis".to_string();
    for (line_no, line) in content.lines().enumerate() {
        let bad = match check_one(&prev, line) {
            Ok(next_chain) => {
                prev = next_chain;
                None
            }
            Err(reason) => Some(reason),
        };
        if let Some(reason) = bad {
            rotate_corrupted(path, line_no + 1, &reason)?;
            return Ok(());
        }
    }
    Ok(())
}

/// Verify a single entry against the previous chain hash.
/// Returns the new chain value on success, or a human-readable
/// reason on failure.
fn check_one(prev: &str, line: &str) -> Result<String, String> {
    let value: serde_json::Value =
        serde_json::from_str(line).map_err(|e| format!("parse failure: {e}"))?;
    let chain = value
        .get("chain")
        .and_then(serde_json::Value::as_str)
        .ok_or_else(|| "chain field missing".to_string())?
        .to_string();
    let mut obj = value
        .as_object()
        .ok_or_else(|| "entry is not a JSON object".to_string())?
        .clone();
    obj.remove("chain");
    obj.remove("ts");
    obj.remove("pid");
    let payload = serde_json::to_string(&serde_json::Value::Object(obj)).unwrap_or_default();
    let expected = compute_chain(prev, &payload);
    if crate::guard::constant_time_eq(expected.as_bytes(), chain.as_bytes()) {
        Ok(chain)
    } else {
        Err("hash-chain mismatch".to_string())
    }
}

/// Rotate a corrupted audit log to a timestamped sibling so the
/// forensic record survives, then return success — the caller will
/// start a fresh chain at the original path on its next append.
fn rotate_corrupted(path: &std::path::Path, bad_line: usize, reason: &str) -> Result<(), Error> {
    let ts = std::time::SystemTime::now()
        .duration_since(std::time::UNIX_EPOCH)
        .map_or(0, |d| d.as_secs());
    let corrupted = path.with_file_name(format!("audit.log.corrupted-{ts}"));
    std::fs::rename(path, &corrupted)
        .map_err(|e| Error::AuditLogFailed(format!("failed to rotate corrupted audit log: {e}")))?;
    // Preserve append-only protection on the rotated file so an
    // attacker cannot destroy forensic evidence after triggering a
    // rotation via tampering.
    #[cfg(target_os = "linux")]
    {
        let _ = std::process::Command::new("chattr")
            .args(["+a", "--", corrupted.to_string_lossy().as_ref()])
            .output();
    }
    // Surface the rotation through the unified Signal pipeline so the
    // user's `security.toml` can demote this to `log` (don't bother
    // me about it — quiet vault) or promote it to `block` (paranoid
    // setup that wants any chain disturbance to halt operations).
    let _ = crate::guard::emit_signal_inline(
        crate::guard::Signal::new(
            crate::guard::SignalId::new("audit.chain.rotated_corruption"),
            crate::guard::Category::AuditFailure,
            crate::guard::Severity::Warn,
            "audit log corruption rotated",
            format!(
                "audit log corruption detected at line {bad_line} ({reason}). \
                 rotated to {corrupted_path} and started a fresh chain — the \
                 corrupted file is preserved for hand inspection; new entries \
                 continue under {original_path}",
                corrupted_path = corrupted.display(),
                original_path = path.display()
            ),
            "inspect the rotated file for forensic evidence; the chain itself is back to clean",
        ),
        &crate::security_config::load_system_defaults(),
    );
    Ok(())
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn now_iso8601_format() {
        let ts = now_iso8601();
        assert!(
            ts.starts_with("20"),
            "timestamp should start with 20xx: {ts}"
        );
        assert!(ts.ends_with('Z'), "timestamp should end with Z: {ts}");
        assert!(ts.contains('T'), "timestamp should contain T: {ts}");
    }
}