use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
use chrono::{TimeZone, Utc};
use cortex_core::{KeyLifecycleState, TrustTier};
use cortex_store::repo::{AuthorityRepo, KeyTimelineRecord, PrincipalTimelineRecord};
use rusqlite::Connection;
use serde_json::{json, Value};
fn cortex_bin() -> PathBuf {
PathBuf::from(env!("CARGO_BIN_EXE_cortex"))
}
fn run_in(cwd: &Path, args: &[&str]) -> std::process::Output {
Command::new(cortex_bin())
.current_dir(cwd)
.env("CORTEX_DATA_DIR", default_data_dir(cwd))
.env("XDG_DATA_HOME", cwd.join("xdg"))
.env("HOME", cwd)
.env("APPDATA", cwd.join("AppData").join("Roaming"))
.args(args)
.output()
.expect("spawn cortex")
}
fn assert_exit(out: &std::process::Output, expected: i32) {
let code = out.status.code().expect("process exited via signal");
assert_eq!(
code,
expected,
"expected exit {expected}, got {code}\nstdout: {}\nstderr: {}",
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr),
);
}
fn fixtures_dir() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests")
.join("fixtures")
}
fn init_default(cwd: &Path) {
let out = run_in(cwd, &["init"]);
assert_exit(&out, 0);
}
fn ingest_minimal_session(cwd: &Path) {
let session = fixtures_dir().join("session-minimal.json");
let out = run_in(cwd, &["ingest", session.to_str().unwrap()]);
assert_exit(&out, 0);
}
fn default_data_dir(cwd: &Path) -> PathBuf {
cwd.join("cortex-data")
}
fn blake3_file(path: &Path) -> String {
let bytes = fs::read(path).expect("read artifact");
format!("blake3:{}", blake3::hash(&bytes).to_hex())
}
fn assert_no_staging_dir(parent: &Path, output_name: &str) {
let prefix = format!(".{output_name}.staging");
let leaked: Vec<_> = fs::read_dir(parent)
.expect("read backup parent")
.filter_map(Result::ok)
.filter(|entry| entry.file_name().to_string_lossy().starts_with(&prefix))
.map(|entry| entry.path())
.collect();
assert!(leaked.is_empty(), "staging directories leaked: {leaked:?}");
}
fn assert_backup_bundle(output: &Path) {
assert!(output.join("BACKUP_MANIFEST").is_file(), "manifest missing");
assert!(output.join("cortex.db").is_file(), "SQLite copy missing");
assert!(output.join("events.jsonl").is_file(), "JSONL copy missing");
}
fn seed_migrate_operator_authority_for_backup(cwd: &Path) {
let db = default_data_dir(cwd).join("cortex.db");
let pool = Connection::open(&db).expect("open initialized sqlite db");
cortex_store::migrate::apply_pending(&pool).expect("apply migrations");
let repo = AuthorityRepo::new(&pool);
let effective_at = Utc.with_ymd_and_hms(2026, 1, 1, 12, 0, 1).unwrap();
repo.append_principal_state(
&PrincipalTimelineRecord {
principal_id: "operator-principal".into(),
trust_tier: TrustTier::Operator,
effective_at,
trust_review_due_at: None,
removed_at: None,
audit_ref: None,
},
&cortex_store::repo::authority::principal_state_policy_decision_test_allow(),
)
.expect("append operator trust state");
repo.append_key_state(
&KeyTimelineRecord {
key_id: "cli-backup-fixture-operator".into(),
principal_id: "operator-principal".into(),
state: KeyLifecycleState::Active,
effective_at,
reason: None,
audit_ref: None,
},
&cortex_store::repo::authority::key_state_policy_decision_test_allow(),
)
.expect("append active operator key state");
}
fn produce_operator_attestation(cwd: &Path) -> PathBuf {
let seed_path = cwd.join("operator-seed.bin");
fs::write(&seed_path, [0x07u8; 32]).expect("write operator signing seed");
let attestation = cwd.join("operator-attestation.json");
let out = run_in(
cwd,
&[
"migrate",
"sign-operator-attestation",
"--output",
attestation.to_str().unwrap(),
"--signing-seed",
seed_path.to_str().unwrap(),
"--operator-key-id",
"cli-backup-fixture-operator",
],
);
assert_exit(&out, 0);
attestation
}
#[test]
fn backup_writes_atomic_bundle_manifest_for_default_store_and_mirror() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
ingest_minimal_session(tmp.path());
let data_dir = default_data_dir(tmp.path());
let expected_sqlite = fs::read(data_dir.join("cortex.db")).unwrap();
let expected_jsonl = fs::read(data_dir.join("events.jsonl")).unwrap();
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 0);
let stdout = String::from_utf8_lossy(&out.stdout);
assert!(
stdout.contains("copied JSONL audit = verified"),
"stdout: {stdout}"
);
let manifest_path = output.join("BACKUP_MANIFEST");
assert!(manifest_path.exists(), "manifest should be published last");
assert_eq!(fs::read(output.join("cortex.db")).unwrap(), expected_sqlite);
assert_eq!(
fs::read(output.join("events.jsonl")).unwrap(),
expected_jsonl
);
let manifest: serde_json::Value =
serde_json::from_slice(&fs::read(&manifest_path).unwrap()).unwrap();
assert_eq!(manifest["kind"], "cortex_pre_v2_backup");
assert_eq!(manifest["schema_version"], json!(1));
assert_eq!(manifest["sqlite_store"], "cortex.db");
assert_eq!(manifest["jsonl_mirror"], "events.jsonl");
assert_eq!(manifest["tool_version"], env!("CARGO_PKG_VERSION"));
assert!(
manifest["backup_timestamp"]
.as_str()
.is_some_and(|value| value.ends_with('Z')),
"timestamp should be RFC3339 UTC: {manifest}"
);
assert_eq!(
manifest["sqlite_store_size_bytes"],
json!(fs::metadata(output.join("cortex.db")).unwrap().len())
);
assert_eq!(
manifest["sqlite_store_blake3"],
blake3_file(&output.join("cortex.db"))
);
assert_eq!(
manifest["jsonl_mirror_size_bytes"],
json!(fs::metadata(output.join("events.jsonl")).unwrap().len())
);
assert_eq!(
manifest["jsonl_mirror_blake3"],
blake3_file(&output.join("events.jsonl"))
);
assert_eq!(manifest["jsonl_mirror_audit_status"], json!("verified"));
assert_eq!(
manifest["jsonl_mirror_audit_rows_scanned"],
json!(fs::read_to_string(output.join("events.jsonl"))
.unwrap()
.lines()
.count())
);
assert_eq!(manifest["jsonl_mirror_audit_failures"], json!(0));
let table_row_counts = &manifest["table_row_counts"];
assert!(
table_row_counts.is_object(),
"manifest must include a table_row_counts object: {manifest}"
);
for table in ["events", "traces", "episodes", "memories"] {
assert!(
table_row_counts[table].is_u64(),
"manifest.table_row_counts.{table} must be a non-negative integer: {manifest}"
);
}
assert_eq!(table_row_counts["events"], json!(0));
assert_eq!(table_row_counts["traces"], json!(0));
assert_eq!(table_row_counts["episodes"], json!(0));
assert_eq!(table_row_counts["memories"], json!(0));
assert!(
!output.join("BACKUP_MANIFEST.tmp").exists(),
"temporary manifest must not remain after success"
);
}
#[test]
fn migrate_v2_rejects_manifest_missing_table_row_counts_without_mutation() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
ingest_minimal_session(tmp.path());
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 0);
let manifest_path = output.join("BACKUP_MANIFEST");
let mut manifest_json: Value =
serde_json::from_slice(&fs::read(&manifest_path).unwrap()).expect("parse manifest");
manifest_json
.as_object_mut()
.expect("manifest is object")
.remove("table_row_counts");
fs::write(
&manifest_path,
serde_json::to_vec_pretty(&manifest_json).expect("serialize stripped manifest"),
)
.expect("write stripped manifest");
let out = run_in(
tmp.path(),
&[
"migrate",
"v2",
"--backup-manifest",
manifest_path.to_str().unwrap(),
],
);
assert_exit(&out, 7);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("missing required `table_row_counts` field"),
"stderr: {stderr}"
);
assert!(stderr.contains("no state was changed"), "stderr: {stderr}");
}
#[test]
fn migrate_v2_cutover_path_prints_backup_manifest_table_row_counts() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
ingest_minimal_session(tmp.path());
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 0);
let manifest = output.join("BACKUP_MANIFEST");
let attestation = produce_operator_attestation(tmp.path());
seed_migrate_operator_authority_for_backup(tmp.path());
let out = run_in(
tmp.path(),
&[
"migrate",
"v2",
"--backup-manifest",
manifest.to_str().unwrap(),
"--operator-attestation",
attestation.to_str().unwrap(),
],
);
assert_exit(&out, 0);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("backup_manifest_table_row_counts events=0 traces=0 episodes=0 memories=0"),
"stderr: {stderr}"
);
}
#[cfg(unix)]
#[test]
fn backup_rejects_dangling_output_symlink_without_replacing_it() {
use std::os::unix::fs::symlink;
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let output = tmp.path().join("backup-link");
let missing_target = tmp.path().join("missing-target");
symlink(&missing_target, &output).unwrap();
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 7);
assert!(
fs::symlink_metadata(&output)
.unwrap()
.file_type()
.is_symlink(),
"backup must leave dangling output symlink untouched"
);
assert!(
!missing_target.exists(),
"backup must not publish through symlink"
);
assert_no_staging_dir(tmp.path(), "backup-link");
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("output")
&& stderr.contains("symlink")
&& stderr.contains("no state was changed"),
"stderr: {stderr}"
);
}
#[cfg(unix)]
#[test]
fn backup_rejects_symlink_output_parent_without_publishing_through_it() {
use std::os::unix::fs::symlink;
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let real_parent = tmp.path().join("real-parent");
let symlink_parent = tmp.path().join("link-parent");
fs::create_dir(&real_parent).unwrap();
symlink(&real_parent, &symlink_parent).unwrap();
let output = symlink_parent.join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 7);
assert!(
fs::symlink_metadata(&symlink_parent)
.unwrap()
.file_type()
.is_symlink(),
"backup must leave symlink parent untouched"
);
assert!(
!real_parent.join("backup-bundle").exists(),
"backup must not publish through symlink parent"
);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("output parent")
&& stderr.contains("symlink")
&& stderr.contains("no state was changed"),
"stderr: {stderr}"
);
}
#[test]
fn backup_rejects_existing_output_without_overwrite() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let output = tmp.path().join("backup-bundle");
fs::create_dir(&output).unwrap();
fs::write(output.join("sentinel"), b"keep").unwrap();
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 7);
assert_eq!(fs::read(output.join("sentinel")).unwrap(), b"keep");
assert!(!output.join("BACKUP_MANIFEST").exists());
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("already exists") && stderr.contains("no state was changed"),
"stderr: {stderr}"
);
}
#[test]
fn backup_checkpoints_active_sqlite_wal_sidecar_before_publishing() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let data_dir = default_data_dir(tmp.path());
fs::write(data_dir.join("cortex.db-wal"), b"").unwrap();
assert!(
data_dir.join("cortex.db-wal").exists(),
"fixture should exercise a present WAL sidecar before backup"
);
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 0);
assert_backup_bundle(&output);
if let Ok(metadata) = fs::metadata(data_dir.join("cortex.db-wal")) {
assert_eq!(
metadata.len(),
0,
"backup should checkpoint and truncate active WAL frames before copying"
);
}
let stdout = String::from_utf8_lossy(&out.stdout);
assert!(
stdout.contains("copied JSONL audit = verified"),
"stdout: {stdout}"
);
}
#[test]
fn backup_rejects_stale_staging_dir_without_creating_output() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let output = tmp.path().join("backup-bundle");
let staging = tmp.path().join(".backup-bundle.staging");
fs::create_dir(&staging).unwrap();
fs::write(staging.join("sentinel"), b"stale").unwrap();
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 7);
assert!(
!output.exists(),
"stale staging must not create final output"
);
assert_eq!(fs::read(staging.join("sentinel")).unwrap(), b"stale");
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("staging path")
&& stderr.contains("already exists")
&& stderr.contains("no state was changed"),
"stderr: {stderr}"
);
}
#[test]
fn backup_concurrent_same_output_publishes_once_without_partial_final_output() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let output = tmp.path().join("backup-bundle");
let mut children = Vec::new();
for _ in 0..4 {
children.push(
Command::new(cortex_bin())
.current_dir(tmp.path())
.env("CORTEX_DATA_DIR", default_data_dir(tmp.path()))
.env("XDG_DATA_HOME", tmp.path().join("xdg"))
.env("HOME", tmp.path())
.env("APPDATA", tmp.path().join("AppData").join("Roaming"))
.args(["backup", "--output", output.to_str().unwrap()])
.spawn()
.expect("spawn concurrent cortex backup"),
);
}
let outputs: Vec<_> = children
.into_iter()
.map(|child| child.wait_with_output().expect("wait for cortex backup"))
.collect();
let success_count = outputs.iter().filter(|out| out.status.success()).count();
let precondition_count = outputs
.iter()
.filter(|out| out.status.code() == Some(7))
.count();
assert_eq!(
success_count, 1,
"expected exactly one successful publisher\noutputs: {outputs:#?}"
);
assert_eq!(
precondition_count, 3,
"expected losing publishers to fail closed\noutputs: {outputs:#?}"
);
assert_backup_bundle(&output);
assert_no_staging_dir(tmp.path(), "backup-bundle");
}
#[test]
fn backup_rejects_missing_sqlite_store_without_creating_output() {
let tmp = tempfile::tempdir().unwrap();
let data_dir = default_data_dir(tmp.path());
fs::create_dir_all(&data_dir).unwrap();
fs::write(data_dir.join("events.jsonl"), b"").unwrap();
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 7);
assert!(!output.exists(), "missing source must not create output");
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(stderr.contains("SQLite store"), "stderr: {stderr}");
}
#[test]
fn backup_rejects_missing_jsonl_mirror_without_creating_output() {
let tmp = tempfile::tempdir().unwrap();
let data_dir = default_data_dir(tmp.path());
fs::create_dir_all(&data_dir).unwrap();
fs::write(data_dir.join("cortex.db"), b"sqlite-bytes").unwrap();
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 7);
assert!(!output.exists(), "missing source must not create output");
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(stderr.contains("JSONL mirror"), "stderr: {stderr}");
}
#[test]
fn backup_rejects_corrupt_copied_jsonl_without_publishing() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
let data_dir = default_data_dir(tmp.path());
fs::write(data_dir.join("events.jsonl"), b"{not-json}\n").unwrap();
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 6);
assert!(
!output.exists(),
"corrupt copied JSONL must not publish final output"
);
assert_no_staging_dir(tmp.path(), "backup-bundle");
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("copied JSONL audit verification failed")
&& stderr.contains("backup bundle was not published"),
"stderr: {stderr}"
);
}
#[test]
fn generated_backup_manifest_is_accepted_by_migrate_v2_cutover() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
ingest_minimal_session(tmp.path());
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 0);
let manifest = output.join("BACKUP_MANIFEST");
let attestation = produce_operator_attestation(tmp.path());
seed_migrate_operator_authority_for_backup(tmp.path());
let out = run_in(
tmp.path(),
&[
"migrate",
"v2",
"--backup-manifest",
manifest.to_str().unwrap(),
"--operator-attestation",
attestation.to_str().unwrap(),
],
);
assert_exit(&out, 0);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("stage=backup-preflight-ready status=ready"),
"stderr: {stderr}"
);
assert!(
stderr.contains("boundary_audit=ok")
&& stderr.contains("post_migrate_mixed_chain_audit=ok")
&& stderr.contains("post_cutover_audit_dispatch=available"),
"stderr: {stderr}"
);
assert!(
stderr.contains("cutover_authority=ok")
&& stderr.contains("cutover_approved=true")
&& stderr.contains("cutover_guard=committed")
&& stderr.contains("cutover_readiness=ready"),
"stderr: {stderr}"
);
assert!(
stderr.contains("schema cutover complete. SCHEMA_VERSION=2 active"),
"stderr: {stderr}"
);
assert!(
output.join("POST_V2_MIGRATE_MANIFEST").is_file(),
"post-migrate manifest must be written next to the backup manifest"
);
}
#[test]
fn tampered_generated_backup_manifest_fails_closed_before_migrate_staging() {
let tmp = tempfile::tempdir().unwrap();
init_default(tmp.path());
ingest_minimal_session(tmp.path());
let output = tmp.path().join("backup-bundle");
let out = run_in(
tmp.path(),
&["backup", "--output", output.to_str().unwrap()],
);
assert_exit(&out, 0);
let manifest = output.join("BACKUP_MANIFEST");
let mut manifest_json: Value =
serde_json::from_slice(&fs::read(&manifest).unwrap()).expect("parse generated manifest");
manifest_json["jsonl_mirror"] = json!("missing-events.jsonl");
fs::write(
&manifest,
serde_json::to_vec_pretty(&manifest_json).expect("serialize tampered manifest"),
)
.expect("write tampered manifest");
let out = run_in(
tmp.path(),
&[
"migrate",
"v2",
"--backup-manifest",
manifest.to_str().unwrap(),
],
);
assert_exit(&out, 7);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("missing `jsonl_mirror` artifact")
&& stderr.contains("missing-events.jsonl")
&& stderr.contains("no state was changed"),
"stderr: {stderr}"
);
let out = run_in(
tmp.path(),
&["audit", "verify", "--require-v1-to-v2-boundary"],
);
assert_exit(&out, 4);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("schema_migration.v1_to_v2.boundary.missing"),
"stderr: {stderr}"
);
}