pub mod intent;
pub mod intent_build;
pub mod lock;
pub mod policy;
pub mod production;
use std::fs::{self, File, OpenOptions};
use std::io::{ErrorKind, Read, Write};
use std::path::{Path, PathBuf};
use clap::{Args, Subcommand};
use cortex_core::{
effective_ceiling, AuditRecordId, AuthorityClass, ClaimCeiling, ClaimProofState, RuntimeMode,
};
use cortex_ledger::{
anchor::{verify_anchor_history, AnchorHistoryVerifyError},
audit::verify_chain,
parse_anchor, verify_anchor, AnchorVerifyError, JsonlError, Report,
};
use cortex_store::repo::{AuditEntry, AuditRepo};
use cortex_store::semantic_diff::{
semantic_snapshot_from_store, RestoreDecision, SemanticSnapshot,
};
use serde::Deserialize;
use serde_json::json;
fn restore_truth_ceiling_object(
proof_state: ClaimProofState,
authority_class: AuthorityClass,
) -> serde_json::Value {
let runtime_mode = RuntimeMode::LocalUnsigned;
let claim_ceiling = effective_ceiling(
runtime_mode,
authority_class,
proof_state,
ClaimCeiling::LocalUnsigned,
);
json!({
"runtime_mode": runtime_mode,
"proof_state": proof_state,
"claim_ceiling": claim_ceiling,
"authority_class": authority_class,
})
}
fn restore_truth_ceiling_fail_closed() -> serde_json::Value {
restore_truth_ceiling_object(ClaimProofState::Unknown, AuthorityClass::Observed)
}
use crate::cmd::open_default_store;
use crate::cmd::temporal::{revalidate_operator_temporal_authority, revalidation_failed_invariant};
use crate::exit::Exit;
use crate::paths::DataLayout;
use cortex_core::{Attestor, InMemoryAttestor, TrustTier};
const APPLY_RECOVERY_DIR_NAME: &str = ".restore-apply-recovery";
const APPLY_RECOVERY_MANIFEST_NAME: &str = "RECOVERY_MANIFEST.json";
const RESTORE_APPLY_STAGE_COMMAND_AUDIT_OPERATION: &str = "command.restore.apply_stage";
const RESTORE_RECOVER_APPLY_COMMAND_AUDIT_OPERATION: &str = "command.restore.recover_apply";
#[derive(Debug, Subcommand)]
pub enum RestoreSub {
Snapshot(SnapshotArgs),
SemanticDiff(SemanticDiffArgs),
VerifyBackup(VerifyBackupArgs),
VerifyPostMigrateManifest(VerifyPostMigrateManifestArgs),
Preflight(PreflightArgs),
Stage(StageArgs),
Intent {
#[command(subcommand)]
sub: IntentSub,
},
Apply(Box<production::ApplyArgs>),
ApplyStage(ApplyStageArgs),
RecoverApply(RecoverApplyArgs),
}
#[derive(Debug, Subcommand)]
pub enum IntentSub {
Build(intent_build::BuildArgs),
}
#[derive(Debug, Args)]
pub struct SnapshotArgs {
#[arg(long)]
pub store: Option<PathBuf>,
}
#[derive(Debug, Args)]
pub struct SemanticDiffArgs {
#[arg(long)]
pub current: Option<PathBuf>,
#[arg(long)]
pub restored: Option<PathBuf>,
#[arg(long)]
pub current_store: Option<PathBuf>,
#[arg(long)]
pub restored_store: Option<PathBuf>,
#[arg(long)]
pub acknowledge_recovery_risk: bool,
}
#[derive(Debug, Args)]
pub struct VerifyBackupArgs {
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
}
#[derive(Debug, Args)]
pub struct VerifyPostMigrateManifestArgs {
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
}
#[derive(Debug, Args)]
pub struct PreflightArgs {
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
#[arg(long)]
pub current_store: Option<PathBuf>,
#[arg(long)]
pub candidate_store: Option<PathBuf>,
#[arg(long)]
pub acknowledge_recovery_risk: bool,
#[arg(long)]
pub production_active_store_plan: bool,
#[arg(long, value_name = "PATH")]
pub active_store_lock_marker: Option<PathBuf>,
}
#[derive(Debug, Args)]
pub struct StageArgs {
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
#[arg(long, value_name = "DIR")]
pub stage_dir: PathBuf,
#[arg(long)]
pub current_store: Option<PathBuf>,
#[arg(long)]
pub acknowledge_destructive_restore: bool,
#[arg(long)]
pub acknowledge_recovery_risk: bool,
}
#[derive(Debug, Args)]
pub struct ApplyStageArgs {
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
#[arg(long, value_name = "DIR")]
pub stage_dir: PathBuf,
#[arg(long)]
pub acknowledge_active_store_replacement: bool,
#[arg(long)]
pub acknowledge_temp_test_data_dir: bool,
#[arg(long)]
pub acknowledge_recovery_risk: bool,
#[arg(long = "post-restore-anchor", value_name = "ANCHOR_PATH")]
pub post_restore_anchor: Option<PathBuf>,
#[arg(
long = "post-restore-anchor-history",
value_name = "ANCHOR_HISTORY_PATH"
)]
pub post_restore_anchor_history: Option<PathBuf>,
}
#[derive(Debug, Args)]
pub struct RecoverApplyArgs {
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
#[arg(long)]
pub acknowledge_current_backup_restore: bool,
#[arg(long)]
pub acknowledge_temp_test_data_dir: bool,
#[arg(long, value_name = "KEY_PATH")]
pub attestation: Option<PathBuf>,
}
#[derive(Debug, Deserialize)]
struct PreV2BackupManifest {
kind: String,
schema_version: u16,
sqlite_store: String,
jsonl_mirror: String,
tool_version: String,
backup_timestamp: String,
sqlite_store_size_bytes: u64,
sqlite_store_blake3: String,
jsonl_mirror_size_bytes: u64,
jsonl_mirror_blake3: String,
}
#[derive(Debug)]
struct VerifiedBackup {
schema_version: u16,
sqlite_store: VerifiedArtifact,
jsonl_mirror: VerifiedArtifact,
}
#[derive(Debug)]
struct VerifiedArtifact {
manifest_field: &'static str,
path: PathBuf,
size_bytes: u64,
blake3: String,
}
#[derive(Debug)]
pub(super) struct ApplyRecoveryEvidence {
pub(super) manifest_path: PathBuf,
pub(super) active_db_backup: PathBuf,
pub(super) active_event_log_backup: PathBuf,
pub(super) active_db_backup_blake3: String,
pub(super) active_event_log_backup_blake3: String,
}
#[derive(Debug, Deserialize)]
struct ApplyRecoveryManifest {
kind: String,
schema_version: u16,
scope: String,
status: String,
active: ApplyRecoveryPaths,
active_backups: ApplyRecoveryBackups,
}
#[derive(Debug, Deserialize)]
struct ApplyRecoveryPaths {
db: PathBuf,
event_log: PathBuf,
}
#[derive(Debug, Deserialize)]
struct ApplyRecoveryBackups {
db: ApplyRecoveryBackupArtifact,
event_log: ApplyRecoveryBackupArtifact,
}
#[derive(Debug, Deserialize)]
struct ApplyRecoveryBackupArtifact {
path: PathBuf,
size_bytes: u64,
blake3: String,
}
pub fn run(sub: RestoreSub) -> Exit {
match sub {
RestoreSub::Snapshot(args) => run_snapshot(args),
RestoreSub::SemanticDiff(args) => run_semantic_diff(args),
RestoreSub::VerifyBackup(args) => run_verify_backup(args),
RestoreSub::VerifyPostMigrateManifest(args) => run_verify_post_migrate_manifest(args),
RestoreSub::Preflight(args) => run_preflight(args),
RestoreSub::Stage(args) => run_stage(args),
RestoreSub::Intent { sub } => match sub {
IntentSub::Build(args) => intent_build::run(args),
},
RestoreSub::Apply(args) => production::run_apply(*args),
RestoreSub::ApplyStage(args) => run_apply_stage(args),
RestoreSub::RecoverApply(args) => run_recover_apply(args),
}
}
fn run_snapshot(args: SnapshotArgs) -> Exit {
let pool = match args.store {
Some(path) => match open_readonly_store(&path, "restore snapshot") {
Ok(pool) => pool,
Err(exit) => return exit,
},
None => match open_default_store("restore snapshot") {
Ok(pool) => pool,
Err(exit) => return exit,
},
};
let snapshot = match semantic_snapshot_from_store(&pool) {
Ok(snapshot) => snapshot,
Err(err) => {
eprintln!("cortex restore snapshot: failed to extract semantic snapshot: {err}");
return Exit::PreconditionUnmet;
}
};
match serde_json::to_string_pretty(&snapshot) {
Ok(output) => {
println!("{output}");
Exit::Ok
}
Err(err) => {
eprintln!("cortex restore snapshot: failed to serialize snapshot: {err}");
Exit::Internal
}
}
}
fn open_readonly_store(path: &Path, command: &str) -> Result<cortex_store::Pool, Exit> {
if !path.exists() {
eprintln!(
"cortex {command}: precondition unmet: store {} does not exist; no state was changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
let pool = cortex_store::open_existing_readonly(path).map_err(|err| {
eprintln!(
"cortex {command}: failed to open read-only store {}: {err}",
path.display()
);
Exit::PreconditionUnmet
})?;
let report = cortex_store::verify::verify_schema_version(&pool, cortex_core::SCHEMA_VERSION)
.map_err(|err| {
eprintln!("cortex {command}: failed to verify schema preconditions: {err}");
Exit::PreconditionUnmet
})?;
if !report.is_ok() {
for failure in &report.failures {
eprintln!(
"cortex {command}: {}: {}",
failure.invariant(),
failure.detail()
);
}
return Err(Exit::SchemaMismatch);
}
Ok(pool)
}
fn run_semantic_diff(args: SemanticDiffArgs) -> Exit {
let current = match read_semantic_snapshot_source(
args.current.as_deref(),
args.current_store.as_deref(),
"current",
) {
Ok(snapshot) => snapshot,
Err(exit) => return exit,
};
let restored = match read_semantic_snapshot_source(
args.restored.as_deref(),
args.restored_store.as_deref(),
"restored",
) {
Ok(snapshot) => snapshot,
Err(exit) => return exit,
};
let diff = current.diff_against_restore(&restored);
let composed = policy::compose_semantic_diff_decision(
&diff,
args.acknowledge_recovery_risk,
"restore.semantic_diff",
"snapshot:current_vs_restored",
);
let decision = composed.legacy;
let truth_ceiling = restore_truth_ceiling_fail_closed();
let report = json!({
"command": "restore.semantic_diff",
"current_snapshot_id": diff.current_snapshot_id,
"restored_snapshot_id": diff.restored_snapshot_id,
"severity": diff.severity(),
"decision": decision,
"policy_decision": policy::policy_decision_report(&composed.decision),
"change_count": diff.changes.len(),
"changes": diff.changes,
"mutated_store": false,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => println!("{output}"),
Err(err) => {
eprintln!("cortex restore semantic-diff: failed to serialize report: {err}");
return Exit::Internal;
}
}
match decision {
RestoreDecision::Clean | RestoreDecision::Warning { .. } => Exit::Ok,
RestoreDecision::PreconditionUnmet { .. } => Exit::PreconditionUnmet,
}
}
const POST_MIGRATE_MANIFEST_MISSING_DIGEST_INVARIANT: &str =
"restore.post_migrate_manifest.missing_digest_field";
const POST_MIGRATE_MANIFEST_DIGEST_MISMATCH_INVARIANT: &str =
"restore.post_migrate_manifest.digest_mismatch";
const POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT: &str = "restore.post_migrate_manifest.unreadable";
pub const RESTORE_RECOVER_ROLLBACK_BACKUP_DIGEST_MISMATCH_INVARIANT: &str =
"restore.recover.rollback.backup_digest_mismatch";
const VERIFY_POST_MIGRATE_MANIFEST_COMMAND: &str = "cortex.restore.verify_post_migrate_manifest";
const POST_MIGRATE_MANIFEST_DIGEST_FIELD: &str = "manifest_blake3";
fn run_verify_post_migrate_manifest(args: VerifyPostMigrateManifestArgs) -> Exit {
let raw = match fs::read_to_string(&args.manifest) {
Ok(raw) => raw,
Err(err) => {
eprintln!(
"cortex restore verify-post-migrate-manifest: {POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT}: failed to read manifest `{}`: {err}. no state was changed.",
args.manifest.display()
);
return emit_verify_post_migrate_manifest_envelope(
&args.manifest,
None,
None,
Some(POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT),
Exit::PreconditionUnmet,
);
}
};
let parsed: serde_json::Value = match serde_json::from_str(&raw) {
Ok(value) => value,
Err(err) => {
eprintln!(
"cortex restore verify-post-migrate-manifest: {POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT}: manifest `{}` is not valid JSON: {err}. no state was changed.",
args.manifest.display()
);
return emit_verify_post_migrate_manifest_envelope(
&args.manifest,
None,
None,
Some(POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT),
Exit::PreconditionUnmet,
);
}
};
let serde_json::Value::Object(mut object) = parsed else {
eprintln!(
"cortex restore verify-post-migrate-manifest: {POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT}: manifest `{}` is not a JSON object. no state was changed.",
args.manifest.display()
);
return emit_verify_post_migrate_manifest_envelope(
&args.manifest,
None,
None,
Some(POST_MIGRATE_MANIFEST_UNREADABLE_INVARIANT),
Exit::PreconditionUnmet,
);
};
let recorded_digest = match object.remove(POST_MIGRATE_MANIFEST_DIGEST_FIELD) {
Some(serde_json::Value::String(value)) => value,
Some(_) | None => {
eprintln!(
"cortex restore verify-post-migrate-manifest: {POST_MIGRATE_MANIFEST_MISSING_DIGEST_INVARIANT}: manifest `{}` is missing required `{POST_MIGRATE_MANIFEST_DIGEST_FIELD}` field (or it is not a string). Pre-Decision-#6 manifests are not verifiable; refusing fail-closed. no state was changed.",
args.manifest.display()
);
return emit_verify_post_migrate_manifest_envelope(
&args.manifest,
None,
None,
Some(POST_MIGRATE_MANIFEST_MISSING_DIGEST_INVARIANT),
Exit::IntegrityFailure,
);
}
};
let body_without_digest = serde_json::Value::Object(object);
let recomputed_digest = canonical_blake3_hex(&body_without_digest);
if recomputed_digest != recorded_digest {
eprintln!(
"cortex restore verify-post-migrate-manifest: {POST_MIGRATE_MANIFEST_DIGEST_MISMATCH_INVARIANT}: manifest `{}` BLAKE3 digest mismatch (recorded={}, recomputed={}). Refusing fail-closed; the manifest body was modified after the cutover. no state was changed.",
args.manifest.display(),
recorded_digest,
recomputed_digest
);
return emit_verify_post_migrate_manifest_envelope(
&args.manifest,
Some(&recorded_digest),
Some(&recomputed_digest),
Some(POST_MIGRATE_MANIFEST_DIGEST_MISMATCH_INVARIANT),
Exit::IntegrityFailure,
);
}
eprintln!(
"cortex restore verify-post-migrate-manifest: manifest `{}` BLAKE3 digest verified ({}).",
args.manifest.display(),
recorded_digest
);
emit_verify_post_migrate_manifest_envelope(
&args.manifest,
Some(&recorded_digest),
Some(&recomputed_digest),
None,
Exit::Ok,
)
}
fn canonical_blake3_hex(value: &serde_json::Value) -> String {
let bytes = cortex_ledger::canonical_payload_bytes(value);
format!("blake3:{}", blake3::hash(&bytes).to_hex())
}
fn emit_verify_post_migrate_manifest_envelope(
manifest: &Path,
recorded_digest: Option<&str>,
recomputed_digest: Option<&str>,
invariant: Option<&'static str>,
exit: Exit,
) -> Exit {
let proof_state = if exit == Exit::Ok && invariant.is_none() {
ClaimProofState::FullChainVerified
} else {
ClaimProofState::Unknown
};
let truth_ceiling = restore_truth_ceiling_object(proof_state, AuthorityClass::Observed);
let report = json!({
"manifest": manifest.display().to_string(),
"manifest_blake3_recorded": recorded_digest,
"manifest_blake3_recomputed": recomputed_digest,
"invariant": invariant,
"mutated_store": false,
"truth_ceiling": truth_ceiling,
});
let envelope = crate::output::Envelope::new(VERIFY_POST_MIGRATE_MANIFEST_COMMAND, exit, report);
crate::output::emit(&envelope, exit)
}
fn run_verify_backup(args: VerifyBackupArgs) -> Exit {
let verified = match verify_pre_v2_backup(&args.manifest) {
Ok(verified) => verified,
Err(exit) => return exit,
};
let truth_ceiling =
restore_truth_ceiling_object(ClaimProofState::FullChainVerified, AuthorityClass::Observed);
let report = json!({
"command": "restore.verify_backup",
"manifest": args.manifest,
"kind": "cortex_pre_v2_backup",
"schema_version": verified.schema_version,
"artifacts": [
{
"field": verified.sqlite_store.manifest_field,
"path": verified.sqlite_store.path,
"size_bytes": verified.sqlite_store.size_bytes,
"blake3": verified.sqlite_store.blake3,
},
{
"field": verified.jsonl_mirror.manifest_field,
"path": verified.jsonl_mirror.path,
"size_bytes": verified.jsonl_mirror.size_bytes,
"blake3": verified.jsonl_mirror.blake3,
}
],
"restore_performed": false,
"cutover_performed": false,
"destructive_restore_supported": false,
"mutated_store": false,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => {
println!("{output}");
eprintln!(
"cortex restore verify-backup: backup structure verified; destructive restore/cutover is not implemented. no state was changed."
);
Exit::Ok
}
Err(err) => {
eprintln!("cortex restore verify-backup: failed to serialize report: {err}");
Exit::Internal
}
}
}
fn run_preflight(args: PreflightArgs) -> Exit {
let verified = match verify_pre_v2_backup(&args.manifest) {
Ok(verified) => verified,
Err(exit) => return exit,
};
let structural = verified_backup_report(&verified);
let (semantic_diff, exit) = match args.candidate_store.as_deref() {
None => (
json!({
"required": true,
"executed": false,
"status": "required_not_executed",
"reason": "pass --candidate-store with an expanded candidate store to run read-only semantic diff",
}),
Exit::PreconditionUnmet,
),
Some(candidate_store) => match semantic_preflight_report(
args.current_store.as_deref(),
candidate_store,
args.acknowledge_recovery_risk,
) {
Ok(result) => result,
Err(exit) => return exit,
},
};
let production_active_store_plan = if args.production_active_store_plan {
Some(production_active_store_plan_report(
exit,
args.current_store.as_deref(),
args.active_store_lock_marker.as_deref(),
))
} else {
None
};
let final_exit = if args.production_active_store_plan && exit == Exit::Ok {
Exit::PreconditionUnmet
} else {
exit
};
let preflight_semantic_decision =
semantic_decision_for_preflight_composition(&semantic_diff, exit);
let preflight_decision = policy::compose_preflight_decision(
preflight_semantic_decision.as_ref(),
args.production_active_store_plan,
);
let preflight_proof_state = if exit == Exit::Ok {
ClaimProofState::FullChainVerified
} else {
ClaimProofState::Unknown
};
let truth_ceiling =
restore_truth_ceiling_object(preflight_proof_state, AuthorityClass::Observed);
let report = json!({
"command": "restore.preflight",
"manifest": args.manifest,
"structural_verification": structural,
"semantic_diff": semantic_diff,
"production_active_store_plan": production_active_store_plan.unwrap_or_else(|| json!({
"requested": false,
})),
"policy_decision": policy::policy_decision_report(&preflight_decision),
"restore_performed": false,
"cutover_performed": false,
"destructive_restore_supported": false,
"mutated_store": false,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => {
println!("{output}");
if args.production_active_store_plan {
eprintln!(
"cortex restore preflight: production active-store restore plan is blocked by unsatisfied production gates. no state was changed."
);
} else {
eprintln!(
"cortex restore preflight: structural verification completed; semantic diff status is reported above. no state was changed."
);
}
final_exit
}
Err(err) => {
eprintln!("cortex restore preflight: failed to serialize report: {err}");
Exit::Internal
}
}
}
fn run_stage(args: StageArgs) -> Exit {
if !args.acknowledge_destructive_restore {
eprintln!(
"cortex restore stage: --acknowledge-destructive-restore is required; no state was changed."
);
return Exit::PreconditionUnmet;
}
if args.stage_dir.exists() {
eprintln!(
"cortex restore stage: stage directory `{}` already exists; no state was changed.",
args.stage_dir.display()
);
return Exit::PreconditionUnmet;
}
let verified = match verify_pre_v2_backup(&args.manifest) {
Ok(verified) => verified,
Err(exit) => return exit,
};
let tmp_stage_dir = temporary_stage_dir(&args.stage_dir);
if tmp_stage_dir.exists() {
eprintln!(
"cortex restore stage: temporary stage directory `{}` already exists; no state was changed.",
tmp_stage_dir.display()
);
return Exit::PreconditionUnmet;
}
if let Some(parent) = tmp_stage_dir.parent() {
if !parent.exists() {
eprintln!(
"cortex restore stage: parent directory `{}` does not exist; no state was changed.",
parent.display()
);
return Exit::PreconditionUnmet;
}
}
if let Err(err) = fs::create_dir(&tmp_stage_dir) {
eprintln!(
"cortex restore stage: failed to create temporary stage directory `{}`: {err}. no state was changed.",
tmp_stage_dir.display()
);
return Exit::PreconditionUnmet;
}
let staged_sqlite = tmp_stage_dir.join("cortex.db");
let staged_jsonl = tmp_stage_dir.join("events.jsonl");
if let Err(exit) = copy_verified_artifact(&verified.sqlite_store.path, &staged_sqlite) {
cleanup_temporary_stage(&tmp_stage_dir);
return exit;
}
if let Err(exit) = copy_verified_artifact(&verified.jsonl_mirror.path, &staged_jsonl) {
cleanup_temporary_stage(&tmp_stage_dir);
return exit;
}
let audit = match audit_verify_staged_jsonl(&staged_jsonl) {
Ok(report) => report,
Err(exit) => {
cleanup_temporary_stage(&tmp_stage_dir);
return exit;
}
};
let (semantic_diff, semantic_exit) = match semantic_preflight_report(
args.current_store.as_deref(),
&staged_sqlite,
args.acknowledge_recovery_risk,
) {
Ok(result) => result,
Err(exit) => {
cleanup_temporary_stage(&tmp_stage_dir);
return exit;
}
};
if semantic_exit != Exit::Ok {
cleanup_temporary_stage(&tmp_stage_dir);
return semantic_exit;
}
if let Err(err) = fs::rename(&tmp_stage_dir, &args.stage_dir) {
cleanup_temporary_stage(&tmp_stage_dir);
eprintln!(
"cortex restore stage: failed to finalize stage directory `{}`: {err}. active store was not changed.",
args.stage_dir.display()
);
return Exit::Internal;
}
let stage_semantic_decision =
semantic_decision_for_preflight_composition(&semantic_diff, semantic_exit);
let stage_decision = policy::compose_stage_decision(
args.acknowledge_destructive_restore,
stage_semantic_decision.as_ref(),
args.acknowledge_recovery_risk,
&format!("stage_dir:{}", args.stage_dir.display()),
);
let stage_authority = if args.acknowledge_recovery_risk {
AuthorityClass::Verified
} else {
AuthorityClass::Observed
};
let truth_ceiling =
restore_truth_ceiling_object(ClaimProofState::FullChainVerified, stage_authority);
let report = json!({
"command": "restore.stage",
"manifest": args.manifest,
"stage_dir": args.stage_dir,
"structural_verification": verified_backup_report(&verified),
"audit_verification": audit_report(&audit),
"semantic_diff": semantic_diff,
"policy_decision": policy::policy_decision_report(&stage_decision),
"restore_performed": false,
"cutover_performed": false,
"destructive_restore_supported": false,
"destructive_restore_staged": true,
"mutated_store": false,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => {
println!("{output}");
eprintln!(
"cortex restore stage: restore candidate staged after structural, audit, and semantic guards. active store was not changed."
);
Exit::Ok
}
Err(err) => {
eprintln!("cortex restore stage: failed to serialize report: {err}");
Exit::Internal
}
}
}
fn run_apply_stage(args: ApplyStageArgs) -> Exit {
if !args.acknowledge_active_store_replacement || !args.acknowledge_temp_test_data_dir {
eprintln!(
"cortex restore apply-stage: --acknowledge-active-store-replacement and --acknowledge-temp-test-data-dir are required; active store was not changed."
);
return Exit::PreconditionUnmet;
}
if !args.stage_dir.is_dir() {
eprintln!(
"cortex restore apply-stage: stage directory `{}` does not exist; active store was not changed.",
args.stage_dir.display()
);
return Exit::PreconditionUnmet;
}
let layout = match DataLayout::resolve(None, None) {
Ok(layout) => layout,
Err(exit) => return exit,
};
if !is_temp_test_data_dir(&layout.data_dir) {
eprintln!(
"cortex restore apply-stage: active data directory `{}` is not under the system temp directory; active store was not changed.",
layout.data_dir.display()
);
return Exit::PreconditionUnmet;
}
let verified = match verify_pre_v2_backup(&args.manifest) {
Ok(verified) => verified,
Err(exit) => return exit,
};
let staged_sqlite = args.stage_dir.join("cortex.db");
let staged_jsonl = args.stage_dir.join("events.jsonl");
if let Err(exit) = verify_staged_artifact(&staged_sqlite, &verified.sqlite_store) {
return exit;
}
if let Err(exit) = verify_staged_artifact(&staged_jsonl, &verified.jsonl_mirror) {
return exit;
}
let audit = match audit_verify_staged_jsonl(&staged_jsonl) {
Ok(report) => report,
Err(exit) => return exit,
};
let (semantic_diff, semantic_exit) = match semantic_preflight_report(
Some(&layout.db_path),
&staged_sqlite,
args.acknowledge_recovery_risk,
) {
Ok(result) => result,
Err(exit) => return exit,
};
if semantic_exit != Exit::Ok {
eprintln!(
"cortex restore apply-stage: semantic restore gate rejected the candidate; active store was not changed."
);
return semantic_exit;
}
let active_db_before_hash = match blake3_file(&layout.db_path, "active_db_before_restore") {
Ok(hash) => Some(hash),
Err(exit) => return exit,
};
let audit_entry =
match restore_apply_stage_audit_entry(&args, &layout, &verified, active_db_before_hash) {
Ok(entry) => entry,
Err(exit) => return exit,
};
let recovery_evidence =
match apply_staged_active_store(&layout, &staged_sqlite, &staged_jsonl, &audit_entry) {
Ok(recovery_evidence) => recovery_evidence,
Err(exit) => return exit,
};
let post_restore_verification = match post_restore_verification_report(
&verified,
&layout,
&staged_sqlite,
args.acknowledge_recovery_risk,
args.post_restore_anchor.as_deref(),
args.post_restore_anchor_history.as_deref(),
) {
Ok(report) => report,
Err(exit) => {
return match restore_current_backups(&layout, &recovery_evidence) {
Ok(()) => {
eprintln!(
"cortex restore apply-stage: post-restore verification failed after active replacement; active backups were restored from recovery evidence `{}`. active store was returned to its pre-apply state.",
recovery_evidence.manifest_path.display()
);
exit
}
Err(err) => {
eprintln!(
"cortex restore apply-stage: post-restore verification failed after active replacement and rollback from recovery evidence `{}` failed: {err}. active store may require manual recovery before continuing writes.",
recovery_evidence.manifest_path.display()
);
err.to_exit()
}
};
}
};
let apply_decision = policy::compose_apply_decision(
post_restore_anchor_outcome_from_report(&post_restore_verification),
"post-restore anchor verified against active JSONL (temp-test scope)",
cortex_core::PolicyOutcome::Allow,
"operator temporal authority bound by temp-test data dir scope",
args.acknowledge_recovery_risk,
"restore.apply_stage",
&format!("active_db:{}", layout.db_path.display()),
);
let truth_ceiling =
restore_truth_ceiling_object(ClaimProofState::FullChainVerified, AuthorityClass::Verified);
let report = json!({
"command": "restore.apply_stage",
"manifest": args.manifest,
"stage_dir": args.stage_dir,
"active_db": layout.db_path,
"active_event_log": layout.event_log_path,
"structural_verification": verified_backup_report(&verified),
"staged_artifacts": "verified",
"audit_verification": audit_report(&audit),
"semantic_diff": semantic_diff,
"post_restore_verification": post_restore_verification,
"policy_decision": policy::policy_decision_report(&apply_decision),
"recovery_evidence": {
"status": "prepared_before_active_replacement",
"manifest": recovery_evidence.manifest_path,
"active_db_backup": recovery_evidence.active_db_backup,
"active_event_log_backup": recovery_evidence.active_event_log_backup,
},
"audit_record_id": audit_entry.id,
"audit_operation": audit_entry.operation,
"restore_performed": true,
"cutover_performed": true,
"destructive_restore_supported": true,
"mutated_store": true,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => {
println!("{output}");
eprintln!(
"cortex restore apply-stage: staged candidate applied to temp-test active store after manifest, audit, and semantic guards. active store was mutated."
);
Exit::Ok
}
Err(err) => {
eprintln!("cortex restore apply-stage: failed to serialize report: {err}");
Exit::Internal
}
}
}
fn run_recover_apply(args: RecoverApplyArgs) -> Exit {
if !args.acknowledge_current_backup_restore || !args.acknowledge_temp_test_data_dir {
eprintln!(
"cortex restore recover-apply: --acknowledge-current-backup-restore and --acknowledge-temp-test-data-dir are required; active store was not changed."
);
return Exit::PreconditionUnmet;
}
let manifest = match read_apply_recovery_manifest(&args.manifest) {
Ok(manifest) => manifest,
Err(exit) => return exit,
};
if let Err(exit) = verify_apply_recovery_scope(&manifest) {
return exit;
}
if let Err(exit) = verify_apply_recovery_backup(
&args.manifest,
"active_db_backup",
&manifest.active_backups.db,
) {
return exit;
}
if let Err(exit) = verify_apply_recovery_backup(
&args.manifest,
"active_event_log_backup",
&manifest.active_backups.event_log,
) {
return exit;
}
if let Err(exit) = verify_apply_recovery_targets(&manifest) {
return exit;
}
let operator_temporal_ok =
match revalidate_recover_apply_operator_temporal_authority(args.attestation.as_deref()) {
Ok(ok) => ok,
Err(exit) => return exit,
};
let active_db_before_hash = match blake3_file(&manifest.active.db, "active_db_before_recovery")
{
Ok(hash) => Some(hash),
Err(exit) => return exit,
};
let audit_entry =
match restore_recover_apply_audit_entry(&args, &manifest, active_db_before_hash) {
Ok(entry) => entry,
Err(exit) => return exit,
};
if let Err(exit) = restore_apply_recovery_backups(&manifest, &audit_entry) {
return exit;
}
let recover_decision = policy::compose_recover_apply_decision(
true,
true,
operator_temporal_ok,
false,
&format!("recovery_manifest:{}", args.manifest.display()),
);
let truth_ceiling =
restore_truth_ceiling_object(ClaimProofState::FullChainVerified, AuthorityClass::Verified);
let report = json!({
"command": "restore.recover_apply",
"manifest": args.manifest,
"scope": manifest.scope,
"active_db": manifest.active.db,
"active_event_log": manifest.active.event_log,
"active_db_backup": manifest.active_backups.db.path,
"active_event_log_backup": manifest.active_backups.event_log.path,
"recovery_status": "current_backups_restored_with_command_audit_row",
"policy_decision": policy::policy_decision_report(&recover_decision),
"audit_record_id": audit_entry.id,
"audit_operation": audit_entry.operation,
"restore_performed": true,
"cutover_performed": true,
"destructive_restore_supported": true,
"mutated_store": true,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => {
println!("{output}");
eprintln!(
"cortex restore recover-apply: current backups restored to temp-test active store from apply-stage recovery manifest, then a command audit row was appended. active store was mutated."
);
Exit::Ok
}
Err(err) => {
eprintln!("cortex restore recover-apply: failed to serialize report: {err}");
Exit::Internal
}
}
}
fn revalidate_recover_apply_operator_temporal_authority(
attestation_path: Option<&Path>,
) -> Result<bool, Exit> {
let invariant = revalidation_failed_invariant("restore.recover_apply");
let Some(path) = attestation_path else {
eprintln!(
"cortex restore recover-apply: {invariant}: --attestation <PATH> is required; recover-apply is a destructive doctrine root and requires durable operator-key timeline revalidation (ADR 0023 / ADR 0026 §4 hard wall). active store was not changed."
);
return Err(Exit::PreconditionUnmet);
};
let bytes = fs::read(path).map_err(|err| {
eprintln!(
"cortex restore recover-apply: {invariant}: cannot read --attestation key file `{}`: {err}. active store was not changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
if bytes.len() != 32 {
eprintln!(
"cortex restore recover-apply: {invariant}: --attestation key file `{}` must be exactly 32 raw bytes (Ed25519 seed); got {} bytes. active store was not changed.",
path.display(),
bytes.len()
);
return Err(Exit::PreconditionUnmet);
}
let mut seed = [0u8; 32];
seed.copy_from_slice(&bytes);
let attestor = InMemoryAttestor::from_seed(&seed);
let pool = open_default_store("restore recover-apply")?;
let now = chrono::Utc::now();
let contribution = revalidate_operator_temporal_authority(
&pool,
policy::RECOVER_APPLY_OPERATOR_TEMPORAL_AUTHORITY_RULE_ID,
attestor.key_id(),
now,
TrustTier::Operator,
)
.map_err(|err| {
eprintln!(
"cortex restore recover-apply: {invariant}: failed to read authority timeline for key {}: {err}. active store was not changed.",
attestor.key_id(),
);
Exit::PreconditionUnmet
})?;
if !contribution.report.valid_now {
let reasons = contribution
.report
.reasons
.iter()
.map(|reason| reason.wire_str())
.collect::<Vec<_>>()
.join(",");
eprintln!(
"cortex restore recover-apply: {invariant}: operator temporal authority current use blocked for key {} (reasons: {reasons}). active store was not changed.",
contribution.report.key_id,
);
return Err(Exit::PreconditionUnmet);
}
Ok(true)
}
fn verified_backup_report(verified: &VerifiedBackup) -> serde_json::Value {
json!({
"status": "verified",
"kind": "cortex_pre_v2_backup",
"schema_version": verified.schema_version,
"artifacts": [
{
"field": verified.sqlite_store.manifest_field,
"path": verified.sqlite_store.path,
"size_bytes": verified.sqlite_store.size_bytes,
"blake3": verified.sqlite_store.blake3,
},
{
"field": verified.jsonl_mirror.manifest_field,
"path": verified.jsonl_mirror.path,
"size_bytes": verified.jsonl_mirror.size_bytes,
"blake3": verified.jsonl_mirror.blake3,
}
],
})
}
fn production_active_store_plan_report(
preflight_exit: Exit,
current_store: Option<&Path>,
lock_marker: Option<&Path>,
) -> serde_json::Value {
let semantic_diff_gate = if preflight_exit == Exit::Ok {
"satisfied"
} else {
"blocked"
};
let active_store_lock = active_store_lock_preflight_report(current_store, lock_marker);
let active_store_lock_gate = if active_store_lock["exclusive_lock_acquire_release_verified"]
.as_bool()
.unwrap_or(false)
{
json!({
"gate": "active_store_lock",
"status": "preflight_only",
"reason": "exclusive temp marker acquire/release preflight succeeded, but production restore still lacks a held lock and writer-freeze protocol for the mutation window",
})
} else {
json!({
"gate": "active_store_lock",
"status": "missing",
"reason": "production restore must exclude concurrent Cortex writers before active path replacement",
})
};
json!({
"requested": true,
"status": "blocked",
"decision": "precondition_unmet",
"restore_mode": "production_active_store",
"mutation_supported": false,
"mutation_command_available": false,
"preflight_gates": {
"manifest_and_artifact_verification": "satisfied",
"semantic_diff": semantic_diff_gate,
"active_store_lock": active_store_lock["status"],
"schema_v2_cutover": "blocked",
"post_restore_verification": "blocked",
},
"active_store_lock": active_store_lock,
"post_restore_verification_gates": production_post_restore_verification_gates(),
"required_gates": [
{
"gate": "schema_v2_cutover",
"status": "missing",
"reason": format!("production active-store restore remains blocked while cortex_core::SCHEMA_VERSION is {} and the schema v2 atomic cutover is incomplete", cortex_core::SCHEMA_VERSION),
},
{
"gate": "production_identity_attestation",
"status": "missing",
"reason": "production active-store restore must bind the operator and recovery authority before mutation",
},
active_store_lock_gate,
{
"gate": "atomic_cutover_protocol",
"status": "missing",
"reason": "production restore must define crash-safe SQLite and JSONL replacement with rollback evidence",
},
{
"gate": "external_anchor_revalidation",
"status": "missing",
"reason": "production restore must revalidate audit chain authority against the selected anchor strategy",
},
{
"gate": "post_restore_audit_and_semantic_recheck",
"status": "missing",
"reason": "production restore must verify chain, schema, and semantic snapshot after cutover before reopening writes",
},
{
"gate": "post_restore_anchor_revalidation",
"status": "missing",
"reason": "production restore must revalidate the restored JSONL against the configured external anchor surface after cutover",
}
],
"safe_next_step": "stage and apply the candidate only in a temp-test data directory; production active-store mutation remains unavailable",
})
}
fn production_post_restore_verification_gates() -> serde_json::Value {
json!({
"status": "blocked",
"executed": false,
"reason": "post-restore gates require a completed production cutover protocol and configured external anchor authority",
"manifest_artifacts": {
"required": true,
"status": "blocked_until_cutover",
},
"jsonl_audit": {
"required": true,
"status": "blocked_until_cutover",
},
"semantic_diff": {
"required": true,
"status": "blocked_until_cutover",
},
"anchors": {
"required": true,
"status": "missing_external_anchor_authority",
},
})
}
fn active_store_lock_preflight_report(
current_store: Option<&Path>,
lock_marker: Option<&Path>,
) -> serde_json::Value {
let marker_path = lock_marker
.map(Path::to_path_buf)
.unwrap_or_else(|| default_active_store_lock_marker(current_store));
let marker_exists = marker_path.exists();
if marker_exists {
return json!({
"status": "blocked_existing_lock_marker",
"lock_marker": marker_path,
"marker_exists": true,
"absence_required": true,
"absence_satisfied": false,
"exclusive_lock_acquired": false,
"exclusive_lock_acquire_attempted": false,
"exclusive_lock_acquired_during_probe": false,
"exclusive_lock_released": false,
"exclusive_lock_acquire_release_verified": false,
"probe_scope": "not_attempted_existing_marker",
"reason": "an active-store lock marker already exists; production restore must not proceed while another writer or restore may hold the active store",
});
}
let marker_parent = marker_path.parent().unwrap_or_else(|| Path::new("."));
if !marker_parent.exists() {
return json!({
"status": "lock_marker_parent_missing",
"lock_marker": marker_path,
"marker_exists": false,
"absence_required": true,
"absence_satisfied": true,
"exclusive_lock_acquired": false,
"exclusive_lock_acquire_attempted": false,
"exclusive_lock_acquired_during_probe": false,
"exclusive_lock_released": false,
"exclusive_lock_acquire_release_verified": false,
"probe_scope": "not_attempted_missing_parent",
"reason": "active-store lock marker parent does not exist; production restore still lacks a usable lock path",
});
}
if !is_temp_test_data_dir(marker_parent) {
return json!({
"status": "temp_lock_probe_not_executed",
"lock_marker": marker_path,
"marker_exists": false,
"absence_required": true,
"absence_satisfied": true,
"exclusive_lock_acquired": false,
"exclusive_lock_acquire_attempted": false,
"exclusive_lock_acquired_during_probe": false,
"exclusive_lock_released": false,
"exclusive_lock_acquire_release_verified": false,
"probe_scope": "not_attempted_non_temp_marker",
"reason": "exclusive lock acquisition is implemented only as a temp-test/report preflight probe; production restore still lacks a held lock protocol",
});
}
match acquire_release_active_store_lock_probe(&marker_path) {
Ok(()) => json!({
"status": "temp_lock_acquire_release_verified",
"lock_marker": marker_path,
"marker_exists": false,
"absence_required": true,
"absence_satisfied": true,
"exclusive_lock_acquired": false,
"exclusive_lock_acquire_attempted": true,
"exclusive_lock_acquired_during_probe": true,
"exclusive_lock_released": true,
"exclusive_lock_acquire_release_verified": true,
"probe_scope": "temp_test_report_only",
"reason": "exclusive temp marker create_new acquisition and release succeeded; production restore remains blocked because no lock is held across mutation",
}),
Err(ActiveStoreLockProbeError::AlreadyExists) => json!({
"status": "blocked_existing_lock_marker",
"lock_marker": marker_path,
"marker_exists": true,
"absence_required": true,
"absence_satisfied": false,
"exclusive_lock_acquired": false,
"exclusive_lock_acquire_attempted": true,
"exclusive_lock_acquired_during_probe": false,
"exclusive_lock_released": false,
"exclusive_lock_acquire_release_verified": false,
"probe_scope": "temp_test_report_only",
"reason": "exclusive lock probe observed an existing active-store marker; production restore must not proceed while another writer or restore may hold the active store",
}),
Err(ActiveStoreLockProbeError::Io(message)) => json!({
"status": "lock_probe_failed",
"lock_marker": marker_path,
"marker_exists": marker_path.exists(),
"absence_required": true,
"absence_satisfied": !marker_path.exists(),
"exclusive_lock_acquired": false,
"exclusive_lock_acquire_attempted": true,
"exclusive_lock_acquired_during_probe": false,
"exclusive_lock_released": false,
"exclusive_lock_acquire_release_verified": false,
"probe_scope": "temp_test_report_only",
"reason": message,
}),
}
}
fn default_active_store_lock_marker(current_store: Option<&Path>) -> PathBuf {
if let Some(parent) = current_store.and_then(Path::parent) {
return parent.join(".cortex-restore-active-store.lock");
}
DataLayout::resolve(None, None)
.map(|layout| layout.data_dir.join(".cortex-restore-active-store.lock"))
.unwrap_or_else(|_| PathBuf::from(".cortex-restore-active-store.lock"))
}
#[derive(Debug)]
enum ActiveStoreLockProbeError {
AlreadyExists,
Io(String),
}
fn acquire_release_active_store_lock_probe(
marker_path: &Path,
) -> Result<(), ActiveStoreLockProbeError> {
let mut file = OpenOptions::new()
.write(true)
.create_new(true)
.open(marker_path)
.map_err(|err| {
if err.kind() == ErrorKind::AlreadyExists {
ActiveStoreLockProbeError::AlreadyExists
} else {
ActiveStoreLockProbeError::Io(format!(
"failed to acquire exclusive active-store lock probe `{}`: {err}",
marker_path.display()
))
}
})?;
let payload = format!(
"cortex restore active-store lock preflight\npid={}\nscope=temp-test-report-only\n",
std::process::id()
);
if let Err(err) = file
.write_all(payload.as_bytes())
.and_then(|()| file.sync_all())
{
let _ = fs::remove_file(marker_path);
return Err(ActiveStoreLockProbeError::Io(format!(
"failed to write exclusive active-store lock probe `{}`: {err}",
marker_path.display()
)));
}
drop(file);
fs::remove_file(marker_path).map_err(|err| {
ActiveStoreLockProbeError::Io(format!(
"failed to release exclusive active-store lock probe `{}`: {err}",
marker_path.display()
))
})
}
fn post_restore_anchor_outcome_from_report(
post_restore: &serde_json::Value,
) -> cortex_core::PolicyOutcome {
let executed = post_restore
.get("anchors")
.and_then(|a| a.get("executed"))
.and_then(|v| v.as_bool())
.unwrap_or(false);
if executed {
cortex_core::PolicyOutcome::Allow
} else {
cortex_core::PolicyOutcome::Warn
}
}
fn semantic_decision_for_preflight_composition(
semantic_diff: &serde_json::Value,
exit: Exit,
) -> Option<cortex_core::PolicyDecision> {
use cortex_core::{compose_policy_outcomes, PolicyContribution, PolicyOutcome};
if semantic_diff
.get("executed")
.and_then(|v| v.as_bool())
.unwrap_or(false)
{
let outcome = match exit {
Exit::Ok => PolicyOutcome::Allow,
_ => PolicyOutcome::Reject,
};
let contribution = PolicyContribution::new(
policy::PREFLIGHT_SEMANTIC_DIFF_RULE_ID,
outcome,
"synthesized from semantic_preflight_report exit code",
)
.expect("static rule id is non-empty");
Some(compose_policy_outcomes(vec![contribution], None))
} else {
None
}
}
fn semantic_preflight_report(
current_store: Option<&Path>,
candidate_store: &Path,
acknowledge_recovery_risk: bool,
) -> Result<(serde_json::Value, Exit), Exit> {
let current = match current_store {
Some(current_store) => read_store_snapshot(current_store, "current")?,
None => {
let layout = DataLayout::resolve(None, None)?;
read_store_snapshot(&layout.db_path, "current")?
}
};
let candidate = read_store_snapshot(candidate_store, "candidate")?;
let diff = current.diff_against_restore(&candidate);
let composed = policy::compose_semantic_diff_decision(
&diff,
acknowledge_recovery_risk,
"restore.semantic_diff",
&format!("candidate_store:{}", candidate_store.display()),
);
let decision = composed.legacy;
let exit = match decision {
RestoreDecision::Clean | RestoreDecision::Warning { .. } => Exit::Ok,
RestoreDecision::PreconditionUnmet { .. } => Exit::PreconditionUnmet,
};
Ok((
json!({
"required": true,
"executed": true,
"status": diff.severity(),
"current_snapshot_id": diff.current_snapshot_id,
"candidate_snapshot_id": diff.restored_snapshot_id,
"decision": decision,
"policy_decision": policy::policy_decision_report(&composed.decision),
"change_count": diff.changes.len(),
"changes": diff.changes,
}),
exit,
))
}
fn temporary_stage_dir(stage_dir: &Path) -> PathBuf {
let file_name = stage_dir
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("restore-stage");
stage_dir.with_file_name(format!(".{file_name}.tmp-{}", std::process::id()))
}
fn copy_verified_artifact(source: &Path, destination: &Path) -> Result<(), Exit> {
fs::copy(source, destination).map(|_| ()).map_err(|err| {
eprintln!(
"cortex restore stage: failed to copy verified artifact `{}` to `{}`: {err}. active store was not changed.",
source.display(),
destination.display()
);
Exit::Internal
})
}
fn verify_staged_artifact(path: &Path, expected: &VerifiedArtifact) -> Result<(), Exit> {
let metadata = fs::metadata(path).map_err(|_| {
eprintln!(
"cortex restore apply-stage: staged `{}` artifact `{}` is missing. active store was not changed.",
expected.manifest_field,
path.display()
);
Exit::PreconditionUnmet
})?;
if !metadata.is_file() {
eprintln!(
"cortex restore apply-stage: staged `{}` artifact `{}` is not a file. active store was not changed.",
expected.manifest_field,
path.display()
);
return Err(Exit::PreconditionUnmet);
}
if metadata.len() != expected.size_bytes {
eprintln!(
"cortex restore apply-stage: staged `{}` artifact `{}` size mismatch: manifest={}, actual={}. active store was not changed.",
expected.manifest_field,
path.display(),
expected.size_bytes,
metadata.len()
);
return Err(Exit::QuarantinedInput);
}
let actual_blake3 = blake3_file(path, expected.manifest_field)?;
if actual_blake3 != expected.blake3 {
eprintln!(
"cortex restore apply-stage: staged `{}` artifact `{}` digest mismatch: manifest={}, actual={}. active store was not changed.",
expected.manifest_field,
path.display(),
expected.blake3,
actual_blake3
);
return Err(Exit::QuarantinedInput);
}
Ok(())
}
fn verify_active_jsonl_artifact(path: &Path, expected: &VerifiedArtifact) -> Result<(), Exit> {
let metadata = fs::metadata(path).map_err(|_| {
eprintln!(
"cortex restore apply-stage: post-restore active `{}` artifact `{}` is missing. recovery evidence may be required before continuing writes.",
expected.manifest_field,
path.display()
);
Exit::PreconditionUnmet
})?;
if !metadata.is_file() {
eprintln!(
"cortex restore apply-stage: post-restore active `{}` artifact `{}` is not a file. recovery evidence may be required before continuing writes.",
expected.manifest_field,
path.display()
);
return Err(Exit::PreconditionUnmet);
}
if metadata.len() != expected.size_bytes {
eprintln!(
"cortex restore apply-stage: post-restore active `{}` artifact `{}` size mismatch: manifest={}, actual={}. recovery evidence may be required before continuing writes.",
expected.manifest_field,
path.display(),
expected.size_bytes,
metadata.len()
);
return Err(Exit::QuarantinedInput);
}
let actual_blake3 = blake3_file(path, expected.manifest_field)?;
if actual_blake3 != expected.blake3 {
eprintln!(
"cortex restore apply-stage: post-restore active `{}` artifact `{}` digest mismatch: manifest={}, actual={}. recovery evidence may be required before continuing writes.",
expected.manifest_field,
path.display(),
expected.blake3,
actual_blake3
);
return Err(Exit::QuarantinedInput);
}
Ok(())
}
fn post_restore_verification_report(
verified: &VerifiedBackup,
layout: &DataLayout,
staged_sqlite: &Path,
acknowledge_recovery_risk: bool,
anchor: Option<&Path>,
anchor_history: Option<&Path>,
) -> Result<serde_json::Value, Exit> {
verify_active_jsonl_artifact(&layout.event_log_path, &verified.jsonl_mirror)?;
let audit = audit_verify_active_jsonl(&layout.event_log_path)?;
let (mut semantic_diff, semantic_exit) = semantic_preflight_report(
Some(staged_sqlite),
&layout.db_path,
acknowledge_recovery_risk,
)?;
if semantic_exit != Exit::Ok {
eprintln!(
"cortex restore apply-stage: post-restore semantic gate rejected the active store. recovery evidence may be required before continuing writes."
);
return Err(semantic_exit);
}
if let Some(object) = semantic_diff.as_object_mut() {
object.insert(
"comparison".to_string(),
json!("staged_candidate_to_restored_active_store"),
);
}
let anchors = post_restore_anchor_report(&layout.event_log_path, anchor, anchor_history)?;
let production_anchor_authority = anchors["production_anchor_authority"]
.as_bool()
.unwrap_or(false);
Ok(json!({
"status": if production_anchor_authority {
"verified"
} else {
"verified_for_temp_test_anchor_authority_unproven"
},
"manifest_artifacts": {
"status": "source_verified_active_jsonl_verified_sqlite_payload_semantically_verified",
"sqlite_store": {
"status": "source_verified_before_cutover",
"restored_payload_blake3": verified.sqlite_store.blake3,
"active_exact_digest": "not_claimed_final_sqlite_contains_command_audit_row",
"final_active_digest_claimed": false,
"path": layout.db_path,
"manifest_blake3": verified.sqlite_store.blake3,
},
"jsonl_mirror": {
"status": "active_digest_verified",
"path": layout.event_log_path,
"manifest_blake3": verified.jsonl_mirror.blake3,
},
},
"jsonl_audit": audit_report(&audit),
"semantic_diff": semantic_diff,
"anchors": anchors,
"production_eligible": false,
"production_blockers": [
"schema_v2_cutover_incomplete",
"production_lock_not_held_across_mutation",
"external_anchor_authority_unproven",
"production_identity_attestation_missing",
],
}))
}
fn post_restore_anchor_report(
event_log_path: &Path,
anchor: Option<&Path>,
anchor_history: Option<&Path>,
) -> Result<serde_json::Value, Exit> {
if anchor.is_none() && anchor_history.is_none() {
return Ok(json!({
"required_for_production": true,
"executed": false,
"status": "not_configured",
"single_anchor": null,
"anchor_history": null,
"production_anchor_authority": false,
"reason": "no post-restore anchor or anchor history was provided; temp-test apply may continue, but production restore remains blocked",
}));
}
let single_anchor = match anchor {
Some(anchor_path) => verify_post_restore_anchor(event_log_path, anchor_path)?,
None => json!({
"requested": false,
}),
};
let anchor_history = match anchor_history {
Some(history_path) => verify_post_restore_anchor_history(event_log_path, history_path)?,
None => json!({
"requested": false,
}),
};
Ok(json!({
"required_for_production": true,
"executed": true,
"status": "verified_local_weak_anchor_evidence",
"single_anchor": single_anchor,
"anchor_history": anchor_history,
"production_anchor_authority": false,
"reason": "local anchor verification passed where provided, but local anchors do not prove disjoint external append-only authority for production restore",
}))
}
fn verify_post_restore_anchor(
event_log_path: &Path,
anchor_path: &Path,
) -> Result<serde_json::Value, Exit> {
let text = fs::read_to_string(anchor_path).map_err(|err| {
eprintln!(
"cortex restore apply-stage: cannot read post-restore anchor `{}`: {err}. recovery evidence may be required before continuing writes.",
anchor_path.display()
);
Exit::PreconditionUnmet
})?;
let anchor = parse_anchor(&text).map_err(|err| {
eprintln!(
"cortex restore apply-stage: invalid post-restore anchor `{}`: {err}. recovery evidence may be required before continuing writes.",
anchor_path.display()
);
Exit::PreconditionUnmet
})?;
match verify_anchor(event_log_path, &anchor) {
Ok(verified) => Ok(json!({
"requested": true,
"status": "verified",
"path": anchor_path,
"event_count": verified.anchor.event_count,
"rows_scanned": verified.db_count,
})),
Err(err) => {
eprintln!(
"cortex restore apply-stage: post-restore anchor verification failed for `{}` against `{}`: {err}. recovery evidence may be required before continuing writes.",
anchor_path.display(),
event_log_path.display()
);
Err(map_anchor_verify_err(&err))
}
}
}
fn verify_post_restore_anchor_history(
event_log_path: &Path,
history_path: &Path,
) -> Result<serde_json::Value, Exit> {
match verify_anchor_history(event_log_path, history_path) {
Ok(verified) => Ok(json!({
"requested": true,
"status": "verified",
"path": history_path,
"anchors_verified": verified.anchors_verified,
"latest_event_count": verified.latest_anchor.event_count,
"rows_scanned": verified.db_count,
})),
Err(err) => {
eprintln!(
"cortex restore apply-stage: post-restore anchor history verification failed for `{}` against `{}`: {err}. recovery evidence may be required before continuing writes.",
history_path.display(),
event_log_path.display()
);
Err(map_anchor_history_verify_err(&err))
}
}
}
fn apply_staged_active_store(
layout: &DataLayout,
staged_sqlite: &Path,
staged_jsonl: &Path,
audit_entry: &AuditEntry,
) -> Result<ApplyRecoveryEvidence, Exit> {
let active_parent = layout.db_path.parent().unwrap_or(&layout.data_dir);
if !active_parent.exists() {
eprintln!(
"cortex restore apply-stage: active data directory `{}` does not exist; active store was not changed.",
active_parent.display()
);
return Err(Exit::PreconditionUnmet);
}
let recovery_dir = active_parent.join(APPLY_RECOVERY_DIR_NAME);
if recovery_dir.exists() {
eprintln!(
"cortex restore apply-stage: recovery evidence directory `{}` already exists; active store was not changed. inspect or remove it before retrying.",
recovery_dir.display()
);
return Err(Exit::PreconditionUnmet);
}
let tmp_db = layout
.db_path
.with_file_name(format!(".cortex.db.apply-stage-{}", std::process::id()));
let tmp_jsonl = layout
.event_log_path
.with_file_name(format!(".events.jsonl.apply-stage-{}", std::process::id()));
if tmp_db.exists() || tmp_jsonl.exists() {
eprintln!(
"cortex restore apply-stage: temporary apply files already exist; active store was not changed."
);
return Err(Exit::PreconditionUnmet);
}
if let Err(err) = fs::copy(staged_sqlite, &tmp_db) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore apply-stage: failed to prepare staged SQLite copy `{}`: {err}. active store was not changed.",
tmp_db.display()
);
return Err(Exit::Internal);
}
if let Err(err) = fs::copy(staged_jsonl, &tmp_jsonl) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore apply-stage: failed to prepare staged JSONL copy `{}`: {err}. active store was not changed.",
tmp_jsonl.display()
);
return Err(Exit::Internal);
}
if let Err(exit) = append_restore_command_audit(&tmp_db, audit_entry, "restore apply-stage") {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
return Err(exit);
}
let recovery_evidence = match prepare_apply_recovery_evidence(
layout,
staged_sqlite,
staged_jsonl,
&tmp_db,
&tmp_jsonl,
) {
Ok(recovery_evidence) => recovery_evidence,
Err(exit) => {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
return Err(exit);
}
};
if let Err(err) = fs::rename(&tmp_db, &layout.db_path) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore apply-stage: failed to replace active SQLite store `{}`: {err}. active store was not changed. recovery evidence is available at `{}`.",
layout.db_path.display(),
recovery_evidence.manifest_path.display()
);
return Err(Exit::Internal);
}
if let Err(err) = fs::rename(&tmp_jsonl, &layout.event_log_path) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
let rollback = restore_current_backups(layout, &recovery_evidence);
match rollback {
Ok(()) => {
eprintln!(
"cortex restore apply-stage: failed to replace active JSONL mirror `{}` after SQLite replacement: {err}. active backups were restored; recovery evidence is available at `{}`.",
layout.event_log_path.display(),
recovery_evidence.manifest_path.display()
);
return Err(Exit::Internal);
}
Err(rollback_err) => {
eprintln!(
"cortex restore apply-stage: failed to replace active JSONL mirror `{}` after SQLite replacement: {err}. rollback from recovery backups failed: {rollback_err}. active store may be partially mutated; recovery evidence is available at `{}`.",
layout.event_log_path.display(),
recovery_evidence.manifest_path.display()
);
return Err(rollback_err.to_exit());
}
}
}
Ok(recovery_evidence)
}
fn prepare_apply_recovery_evidence(
layout: &DataLayout,
staged_sqlite: &Path,
staged_jsonl: &Path,
tmp_db: &Path,
tmp_jsonl: &Path,
) -> Result<ApplyRecoveryEvidence, Exit> {
let active_parent = layout.db_path.parent().unwrap_or(&layout.data_dir);
let recovery_dir = active_parent.join(APPLY_RECOVERY_DIR_NAME);
fs::create_dir(&recovery_dir).map_err(|err| {
eprintln!(
"cortex restore apply-stage: failed to create recovery evidence directory `{}`: {err}. active store was not changed.",
recovery_dir.display()
);
Exit::Internal
})?;
let active_db_backup = recovery_dir.join("current.cortex.db");
let active_event_log_backup = recovery_dir.join("current.events.jsonl");
if let Err(err) = fs::copy(&layout.db_path, &active_db_backup) {
cleanup_recovery_dir(&recovery_dir);
eprintln!(
"cortex restore apply-stage: failed to back up active SQLite store `{}` to `{}`: {err}. active store was not changed.",
layout.db_path.display(),
active_db_backup.display()
);
return Err(Exit::Internal);
}
if let Err(err) = fs::copy(&layout.event_log_path, &active_event_log_backup) {
cleanup_recovery_dir(&recovery_dir);
eprintln!(
"cortex restore apply-stage: failed to back up active JSONL mirror `{}` to `{}`: {err}. active store was not changed.",
layout.event_log_path.display(),
active_event_log_backup.display()
);
return Err(Exit::Internal);
}
let manifest_path = recovery_dir.join(APPLY_RECOVERY_MANIFEST_NAME);
let active_db_backup_blake3 = blake3_file(&active_db_backup, "active_db_backup")?;
let active_event_log_backup_blake3 =
blake3_file(&active_event_log_backup, "active_event_log_backup")?;
let manifest = json!({
"kind": "cortex_restore_apply_stage_recovery_manifest",
"schema_version": 1,
"scope": "temp-test",
"status": "prepared_before_active_replacement",
"active": {
"db": layout.db_path,
"event_log": layout.event_log_path,
},
"active_backups": {
"db": {
"path": active_db_backup,
"size_bytes": fs::metadata(&active_db_backup).map(|m| m.len()).unwrap_or(0),
"blake3": active_db_backup_blake3,
},
"event_log": {
"path": active_event_log_backup,
"size_bytes": fs::metadata(&active_event_log_backup).map(|m| m.len()).unwrap_or(0),
"blake3": active_event_log_backup_blake3,
},
},
"staged": {
"db": staged_sqlite,
"event_log": staged_jsonl,
},
"temporary_apply": {
"db": tmp_db,
"event_log": tmp_jsonl,
},
"operator_action": "If apply-stage exits non-zero after active replacement begins, restore active paths from active_backups before continuing temp-test writes.",
});
let manifest_bytes = serde_json::to_vec_pretty(&manifest).map_err(|err| {
cleanup_recovery_dir(&recovery_dir);
eprintln!(
"cortex restore apply-stage: failed to serialize recovery manifest `{}`: {err}. active store was not changed.",
manifest_path.display()
);
Exit::Internal
})?;
if let Err(err) = fs::write(&manifest_path, manifest_bytes) {
cleanup_recovery_dir(&recovery_dir);
eprintln!(
"cortex restore apply-stage: failed to write recovery manifest `{}`: {err}. active store was not changed.",
manifest_path.display()
);
return Err(Exit::Internal);
}
Ok(ApplyRecoveryEvidence {
manifest_path,
active_db_backup,
active_event_log_backup,
active_db_backup_blake3,
active_event_log_backup_blake3,
})
}
pub(super) fn restore_current_backups(
layout: &DataLayout,
recovery: &ApplyRecoveryEvidence,
) -> Result<(), RestoreCurrentBackupsError> {
fs::copy(&recovery.active_db_backup, &layout.db_path)
.map_err(RestoreCurrentBackupsError::Io)?;
verify_rollback_backup_digest(
&layout.db_path,
&recovery.active_db_backup_blake3,
"active_db",
)?;
fs::copy(&recovery.active_event_log_backup, &layout.event_log_path)
.map_err(RestoreCurrentBackupsError::Io)?;
verify_rollback_backup_digest(
&layout.event_log_path,
&recovery.active_event_log_backup_blake3,
"active_event_log",
)?;
Ok(())
}
fn verify_rollback_backup_digest(
destination: &Path,
expected_blake3: &str,
field_label: &str,
) -> Result<(), RestoreCurrentBackupsError> {
let observed = blake3_file(destination, field_label)
.map_err(|_| RestoreCurrentBackupsError::DigestRead)?;
if observed == expected_blake3 {
return Ok(());
}
eprintln!(
"cortex restore rollback: {invariant}: {field_label} backup digest mismatch on `{path}`: expected {expected}, observed {observed}. \
the pre-cutover backup recorded a different BLAKE3 than the file the rollback `fs::copy` produced; \
refusing to mark the rollback complete. active store may be in a partial state — manual recovery from \
a verified backup is required before continuing writes.",
invariant = RESTORE_RECOVER_ROLLBACK_BACKUP_DIGEST_MISMATCH_INVARIANT,
path = destination.display(),
expected = expected_blake3,
observed = observed,
);
Err(RestoreCurrentBackupsError::DigestMismatch {
field: field_label.to_string(),
expected: expected_blake3.to_string(),
observed,
})
}
#[derive(Debug)]
pub(super) enum RestoreCurrentBackupsError {
Io(std::io::Error),
DigestMismatch {
field: String,
expected: String,
observed: String,
},
DigestRead,
}
impl std::fmt::Display for RestoreCurrentBackupsError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Io(err) => write!(f, "rollback copy failed: {err}"),
Self::DigestMismatch {
field,
expected,
observed,
} => {
let invariant = RESTORE_RECOVER_ROLLBACK_BACKUP_DIGEST_MISMATCH_INVARIANT;
write!(
f,
"{invariant}: {field} expected {expected}, observed {observed}",
)
}
Self::DigestRead => {
write!(f, "rollback destination unreadable for digest re-check")
}
}
}
}
impl std::error::Error for RestoreCurrentBackupsError {}
impl RestoreCurrentBackupsError {
pub(super) fn to_exit(&self) -> Exit {
match self {
Self::Io(_) => Exit::Internal,
Self::DigestMismatch { .. } | Self::DigestRead => Exit::IntegrityFailure,
}
}
}
fn read_apply_recovery_manifest(path: &Path) -> Result<ApplyRecoveryManifest, Exit> {
let raw = fs::read_to_string(path).map_err(|err| {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` could not be read: {err}. active store was not changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
let manifest: ApplyRecoveryManifest = serde_json::from_str(&raw).map_err(|err| {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` is malformed: {err}. active store was not changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
if manifest.kind != "cortex_restore_apply_stage_recovery_manifest" {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` has invalid kind `{}`; expected cortex_restore_apply_stage_recovery_manifest. active store was not changed.",
path.display(),
manifest.kind
);
return Err(Exit::PreconditionUnmet);
}
if manifest.schema_version != 1 {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` has schema_version {}; expected schema_version 1. active store was not changed.",
path.display(),
manifest.schema_version
);
return Err(Exit::SchemaMismatch);
}
if manifest.scope != "temp-test" {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` has scope `{}`; expected temp-test. active store was not changed.",
path.display(),
manifest.scope
);
return Err(Exit::PreconditionUnmet);
}
if manifest.status != "prepared_before_active_replacement" {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` has unsupported status `{}`. active store was not changed.",
path.display(),
manifest.status
);
return Err(Exit::PreconditionUnmet);
}
Ok(manifest)
}
fn verify_apply_recovery_scope(manifest: &ApplyRecoveryManifest) -> Result<(), Exit> {
let Some(active_db_parent) = manifest.active.db.parent() else {
eprintln!(
"cortex restore recover-apply: active SQLite path has no parent directory; active store was not changed."
);
return Err(Exit::PreconditionUnmet);
};
if manifest.active.event_log.parent() != Some(active_db_parent) {
eprintln!(
"cortex restore recover-apply: active SQLite and JSONL paths do not share a data directory; active store was not changed."
);
return Err(Exit::PreconditionUnmet);
}
if !is_temp_test_data_dir(active_db_parent) {
eprintln!(
"cortex restore recover-apply: active data directory `{}` is not under the system temp directory; active store was not changed.",
active_db_parent.display()
);
return Err(Exit::PreconditionUnmet);
}
Ok(())
}
fn verify_apply_recovery_backup(
manifest_path: &Path,
field: &str,
backup: &ApplyRecoveryBackupArtifact,
) -> Result<(), Exit> {
let manifest_dir = manifest_path.parent().unwrap_or_else(|| Path::new("."));
let metadata = fs::metadata(&backup.path).map_err(|_| {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` references missing `{field}` artifact `{}`. active store was not changed.",
manifest_path.display(),
backup.path.display()
);
Exit::PreconditionUnmet
})?;
if !metadata.is_file() {
eprintln!(
"cortex restore recover-apply: recovery manifest `{}` references non-file `{field}` artifact `{}`. active store was not changed.",
manifest_path.display(),
backup.path.display()
);
return Err(Exit::PreconditionUnmet);
}
if !path_is_within(&backup.path, manifest_dir) {
eprintln!(
"cortex restore recover-apply: `{field}` artifact `{}` is outside the recovery manifest directory. active store was not changed.",
backup.path.display()
);
return Err(Exit::PreconditionUnmet);
}
if metadata.len() != backup.size_bytes {
eprintln!(
"cortex restore recover-apply: `{field}` artifact `{}` size mismatch: manifest={}, actual={}. active store was not changed.",
backup.path.display(),
backup.size_bytes,
metadata.len()
);
return Err(Exit::QuarantinedInput);
}
let actual_blake3 = blake3_file(&backup.path, field)?;
if backup.blake3 != actual_blake3 {
eprintln!(
"cortex restore recover-apply: `{field}` artifact `{}` digest mismatch: manifest={}, actual={}. active store was not changed.",
backup.path.display(),
backup.blake3,
actual_blake3
);
return Err(Exit::QuarantinedInput);
}
Ok(())
}
fn verify_apply_recovery_targets(manifest: &ApplyRecoveryManifest) -> Result<(), Exit> {
verify_apply_recovery_target("active SQLite", &manifest.active.db)?;
verify_apply_recovery_target("active JSONL", &manifest.active.event_log)
}
fn verify_apply_recovery_target(label: &str, path: &Path) -> Result<(), Exit> {
let Ok(metadata) = fs::symlink_metadata(path) else {
return Ok(());
};
if metadata.file_type().is_symlink() {
eprintln!(
"cortex restore recover-apply: {label} path `{}` is a symlink; active store was not changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
if !metadata.file_type().is_file() {
eprintln!(
"cortex restore recover-apply: {label} path `{}` is not a regular file; active store was not changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
Ok(())
}
fn restore_apply_recovery_backups(
manifest: &ApplyRecoveryManifest,
audit_entry: &AuditEntry,
) -> Result<(), Exit> {
let tmp_db = manifest
.active
.db
.with_file_name(format!(".cortex.db.recover-apply-{}", std::process::id()));
let tmp_jsonl = manifest.active.event_log.with_file_name(format!(
".events.jsonl.recover-apply-{}",
std::process::id()
));
if tmp_db.exists() || tmp_jsonl.exists() {
eprintln!(
"cortex restore recover-apply: temporary recovery files already exist; active store was not changed."
);
return Err(Exit::PreconditionUnmet);
}
if let Err(err) = fs::copy(&manifest.active_backups.db.path, &tmp_db) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore recover-apply: failed to prepare active SQLite backup copy `{}`: {err}. active store was not changed.",
tmp_db.display()
);
return Err(Exit::Internal);
}
if let Err(err) = verify_rollback_backup_digest(
&tmp_db,
&manifest.active_backups.db.blake3,
"active_db_backup_copy",
) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
return Err(err.to_exit());
}
if let Err(err) = fs::copy(&manifest.active_backups.event_log.path, &tmp_jsonl) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore recover-apply: failed to prepare active JSONL backup copy `{}`: {err}. active store was not changed.",
tmp_jsonl.display()
);
return Err(Exit::Internal);
}
if let Err(err) = verify_rollback_backup_digest(
&tmp_jsonl,
&manifest.active_backups.event_log.blake3,
"active_event_log_backup_copy",
) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
return Err(err.to_exit());
}
if let Err(exit) = append_restore_command_audit(&tmp_db, audit_entry, "restore recover-apply") {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
return Err(exit);
}
if let Err(err) = fs::copy(&tmp_db, &manifest.active.db) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore recover-apply: failed to restore active SQLite backup to `{}`: {err}. active store may require manual recovery from the manifest backups.",
manifest.active.db.display()
);
return Err(Exit::Internal);
}
if let Err(err) = fs::copy(&tmp_jsonl, &manifest.active.event_log) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
eprintln!(
"cortex restore recover-apply: failed to restore active JSONL backup to `{}`: {err}. active store may require manual recovery from the manifest backups.",
manifest.active.event_log.display()
);
return Err(Exit::Internal);
}
if let Err(err) = verify_rollback_backup_digest(
&manifest.active.event_log,
&manifest.active_backups.event_log.blake3,
"active_event_log_post_restore",
) {
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
return Err(err.to_exit());
}
cleanup_temporary_apply(&tmp_db, &tmp_jsonl);
Ok(())
}
fn restore_apply_stage_audit_entry(
args: &ApplyStageArgs,
layout: &DataLayout,
verified: &VerifiedBackup,
before_hash: Option<String>,
) -> Result<AuditEntry, Exit> {
let source_refs_json = json!({
"manifest": args.manifest,
"stage_dir": args.stage_dir,
"active_db": layout.db_path,
"active_event_log": layout.event_log_path,
"sqlite_store_blake3": verified.sqlite_store.blake3,
"jsonl_mirror_blake3": verified.jsonl_mirror.blake3,
"after_hash_semantics": "verified restored SQLite payload before command audit row append; final active SQLite digest is not self-claimed by the row",
"scope": "temp-test",
});
Ok(AuditEntry {
id: AuditRecordId::new(),
operation: RESTORE_APPLY_STAGE_COMMAND_AUDIT_OPERATION.to_string(),
target_ref: format!("active_store:{}", layout.db_path.display()),
before_hash,
after_hash: verified.sqlite_store.blake3.clone(),
reason: "temp-test restore apply-stage replaced active store after manifest, audit, and semantic guards".to_string(),
actor_json: json!({"kind": "cli", "command": "restore apply-stage", "scope": "temp-test"}),
source_refs_json,
created_at: chrono::Utc::now(),
})
}
fn restore_recover_apply_audit_entry(
args: &RecoverApplyArgs,
manifest: &ApplyRecoveryManifest,
before_hash: Option<String>,
) -> Result<AuditEntry, Exit> {
let source_refs_json = json!({
"manifest": args.manifest,
"active_db": manifest.active.db,
"active_event_log": manifest.active.event_log,
"active_db_backup": manifest.active_backups.db.path,
"active_event_log_backup": manifest.active_backups.event_log.path,
"active_db_backup_blake3": manifest.active_backups.db.blake3,
"active_event_log_backup_blake3": manifest.active_backups.event_log.blake3,
"after_hash_semantics": "verified recovered SQLite backup payload before command audit row append; final active SQLite digest is not self-claimed by the row",
"scope": manifest.scope,
});
Ok(AuditEntry {
id: AuditRecordId::new(),
operation: RESTORE_RECOVER_APPLY_COMMAND_AUDIT_OPERATION.to_string(),
target_ref: format!("active_store:{}", manifest.active.db.display()),
before_hash,
after_hash: manifest.active_backups.db.blake3.clone(),
reason: "temp-test restore recover-apply restored active backups after recovery manifest verification".to_string(),
actor_json: json!({"kind": "cli", "command": "restore recover-apply", "scope": manifest.scope}),
source_refs_json,
created_at: chrono::Utc::now(),
})
}
fn append_restore_command_audit(
db_path: &Path,
audit_entry: &AuditEntry,
command: &str,
) -> Result<(), Exit> {
let pool = cortex_store::Pool::open(db_path).map_err(|err| {
eprintln!(
"cortex {command}: failed to open SQLite store `{}` for persisted command audit row: {err}. active store was not changed.",
db_path.display()
);
Exit::PreconditionUnmet
})?;
AuditRepo::new(&pool).append(audit_entry).map_err(|err| {
eprintln!(
"cortex {command}: failed to append persisted command audit row to `{}`: {err}. active store was not changed.",
db_path.display()
);
Exit::Internal
})
}
fn path_is_within(path: &Path, parent: &Path) -> bool {
let Ok(path) = path.canonicalize() else {
return false;
};
let Ok(parent) = parent.canonicalize() else {
return false;
};
path.starts_with(parent)
}
fn audit_verify_staged_jsonl(path: &Path) -> Result<Report, Exit> {
match verify_chain(path) {
Ok(report) if report.ok() => Ok(report),
Ok(report) => {
for failure in &report.failures {
eprintln!(
"cortex restore stage: copied JSONL audit failure at line {}: {:?}",
failure.line, failure.reason
);
}
Err(Exit::IntegrityFailure)
}
Err(err) => {
eprintln!(
"cortex restore stage: copied JSONL audit verification failed for `{}`: {err}. active store was not changed.",
path.display()
);
Err(map_jsonl_verify_err(&err))
}
}
}
fn audit_verify_active_jsonl(path: &Path) -> Result<Report, Exit> {
match verify_chain(path) {
Ok(report) if report.ok() => Ok(report),
Ok(report) => {
for failure in &report.failures {
eprintln!(
"cortex restore apply-stage: post-restore active JSONL audit failure at line {}: {:?}",
failure.line, failure.reason
);
}
Err(Exit::IntegrityFailure)
}
Err(err) => {
eprintln!(
"cortex restore apply-stage: post-restore active JSONL audit verification failed for `{}`: {err}. recovery evidence may be required before continuing writes.",
path.display()
);
Err(map_jsonl_verify_err(&err))
}
}
}
fn audit_report(report: &Report) -> serde_json::Value {
json!({
"status": "verified",
"path": report.path,
"rows_scanned": report.rows_scanned,
"failures": report.failures.len(),
})
}
fn map_jsonl_verify_err(err: &JsonlError) -> Exit {
match err {
JsonlError::Decode { .. } | JsonlError::ChainBroken(_) => Exit::ChainCorruption,
JsonlError::Validation(_) => Exit::PreconditionUnmet,
JsonlError::Io { .. } | JsonlError::Encode(_) => Exit::Internal,
}
}
fn map_anchor_verify_err(err: &AnchorVerifyError) -> Exit {
match err {
AnchorVerifyError::Jsonl(err) => map_jsonl_verify_err(err),
AnchorVerifyError::EmptyLedger { .. }
| AnchorVerifyError::InternalAnchorBuild { .. }
| AnchorVerifyError::ChainBroken { .. }
| AnchorVerifyError::Truncated { .. }
| AnchorVerifyError::MissingPosition { .. }
| AnchorVerifyError::PositionHashMismatch { .. } => Exit::IntegrityFailure,
}
}
fn map_anchor_history_verify_err(err: &AnchorHistoryVerifyError) -> Exit {
match err {
AnchorHistoryVerifyError::ReadHistory { .. } | AnchorHistoryVerifyError::Parse { .. } => {
Exit::PreconditionUnmet
}
AnchorHistoryVerifyError::Anchor { source, .. } => map_anchor_verify_err(source),
AnchorHistoryVerifyError::NonMonotonic { .. } => Exit::IntegrityFailure,
}
}
fn cleanup_temporary_stage(path: &Path) {
if path.exists() {
let _ = fs::remove_dir_all(path);
}
}
fn cleanup_temporary_apply(tmp_db: &Path, tmp_jsonl: &Path) {
if tmp_db.exists() {
let _ = fs::remove_file(tmp_db);
}
if tmp_jsonl.exists() {
let _ = fs::remove_file(tmp_jsonl);
}
}
fn cleanup_recovery_dir(recovery_dir: &Path) {
if recovery_dir.exists() {
let _ = fs::remove_dir_all(recovery_dir);
}
}
fn is_temp_test_data_dir(data_dir: &Path) -> bool {
let Ok(data_dir) = data_dir.canonicalize() else {
return false;
};
let Ok(temp_dir) = std::env::temp_dir().canonicalize() else {
return false;
};
data_dir.starts_with(temp_dir)
}
fn verify_pre_v2_backup(path: &Path) -> Result<VerifiedBackup, Exit> {
if !path.is_file() {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` was not found; no state was changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
let raw = fs::read_to_string(path).map_err(|err| {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` could not be read: {err}. no state was changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
let manifest: PreV2BackupManifest = serde_json::from_str(&raw).map_err(|err| {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` is malformed: {err}. no state was changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
if manifest.kind != "cortex_pre_v2_backup" {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has invalid kind `{}`; expected cortex_pre_v2_backup. no state was changed.",
path.display(),
manifest.kind
);
return Err(Exit::PreconditionUnmet);
}
if manifest.schema_version != 1 {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has schema_version {}; expected pre-v2 schema_version 1. no state was changed.",
path.display(),
manifest.schema_version
);
return Err(Exit::SchemaMismatch);
}
if manifest.tool_version.trim().is_empty() {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has empty `tool_version`. no state was changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
if chrono::DateTime::parse_from_rfc3339(&manifest.backup_timestamp).is_err() {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has invalid `backup_timestamp`; expected RFC3339. no state was changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
let sqlite_store = verify_manifest_artifact(
path,
"sqlite_store",
&manifest.sqlite_store,
manifest.sqlite_store_size_bytes,
&manifest.sqlite_store_blake3,
)?;
let jsonl_mirror = verify_manifest_artifact(
path,
"jsonl_mirror",
&manifest.jsonl_mirror,
manifest.jsonl_mirror_size_bytes,
&manifest.jsonl_mirror_blake3,
)?;
Ok(VerifiedBackup {
schema_version: manifest.schema_version,
sqlite_store,
jsonl_mirror,
})
}
fn verify_manifest_artifact(
manifest_path: &Path,
field: &'static str,
artifact: &str,
expected_size_bytes: u64,
expected_blake3: &str,
) -> Result<VerifiedArtifact, Exit> {
validate_manifest_artifact_ref(manifest_path, field, artifact)?;
let path = manifest_artifact_path(manifest_path, artifact);
let metadata = fs::metadata(&path).map_err(|_| {
eprintln!(
"cortex restore verify-backup: manifest `{}` references missing `{field}` artifact `{}`. no state was changed.",
manifest_path.display(),
path.display()
);
Exit::PreconditionUnmet
})?;
if !metadata.is_file() {
eprintln!(
"cortex restore verify-backup: manifest `{}` references non-file `{field}` artifact `{}`. no state was changed.",
manifest_path.display(),
path.display()
);
return Err(Exit::PreconditionUnmet);
}
if metadata.len() != expected_size_bytes {
eprintln!(
"cortex restore verify-backup: `{field}` artifact `{}` size mismatch: manifest={}, actual={}. no state was changed.",
path.display(),
expected_size_bytes,
metadata.len()
);
return Err(Exit::QuarantinedInput);
}
let actual_blake3 = blake3_file(&path, field)?;
if expected_blake3 != actual_blake3 {
eprintln!(
"cortex restore verify-backup: `{field}` artifact `{}` digest mismatch: manifest={}, actual={}. no state was changed.",
path.display(),
expected_blake3,
actual_blake3
);
return Err(Exit::QuarantinedInput);
}
Ok(VerifiedArtifact {
manifest_field: field,
path,
size_bytes: expected_size_bytes,
blake3: actual_blake3,
})
}
fn validate_manifest_artifact_ref(
manifest_path: &Path,
field: &str,
artifact: &str,
) -> Result<(), Exit> {
if artifact.trim().is_empty() {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has empty `{field}`. no state was changed.",
manifest_path.display()
);
return Err(Exit::PreconditionUnmet);
}
let artifact_path = Path::new(artifact);
if artifact_path.is_absolute() {
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has absolute `{field}` artifact reference `{artifact}`. no state was changed.",
manifest_path.display()
);
return Err(Exit::PreconditionUnmet);
}
if artifact_path
.components()
.any(|component| matches!(component, std::path::Component::ParentDir))
{
eprintln!(
"cortex restore verify-backup: backup manifest `{}` has parent-directory `{field}` artifact reference `{artifact}`. no state was changed.",
manifest_path.display()
);
return Err(Exit::PreconditionUnmet);
}
Ok(())
}
fn manifest_artifact_path(manifest_path: &Path, artifact: &str) -> PathBuf {
manifest_path
.parent()
.unwrap_or_else(|| Path::new("."))
.join(artifact)
}
fn blake3_file(path: &Path, field: &str) -> Result<String, Exit> {
let mut file = File::open(path).map_err(|err| {
eprintln!(
"cortex restore verify-backup: failed to open `{field}` artifact {}: {err}. no state was changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
let mut hasher = blake3::Hasher::new();
let mut buffer = [0_u8; 16 * 1024];
loop {
let read = file.read(&mut buffer).map_err(|err| {
eprintln!(
"cortex restore verify-backup: failed to read `{field}` artifact {}: {err}. no state was changed.",
path.display()
);
Exit::PreconditionUnmet
})?;
if read == 0 {
break;
}
hasher.update(&buffer[..read]);
}
Ok(format!("blake3:{}", hasher.finalize().to_hex()))
}
fn read_semantic_snapshot_source(
snapshot_path: Option<&Path>,
store_path: Option<&Path>,
label: &str,
) -> Result<SemanticSnapshot, Exit> {
match (snapshot_path, store_path) {
(Some(path), None) => read_snapshot(path, label),
(None, Some(path)) => read_store_snapshot(path, label),
(None, None) => {
eprintln!(
"cortex restore semantic-diff: missing {label} source; pass --{label} or --{label}-store. no state was changed."
);
Err(Exit::PreconditionUnmet)
}
(Some(_), Some(_)) => {
eprintln!(
"cortex restore semantic-diff: ambiguous {label} source; pass only one of --{label} or --{label}-store. no state was changed."
);
Err(Exit::PreconditionUnmet)
}
}
}
fn read_store_snapshot(path: &Path, label: &str) -> Result<SemanticSnapshot, Exit> {
let pool = open_readonly_store(path, "restore semantic-diff")?;
semantic_snapshot_from_store(&pool).map_err(|err| {
eprintln!(
"cortex restore semantic-diff: failed to extract {label} store snapshot {}: {err}",
path.display()
);
Exit::PreconditionUnmet
})
}
fn read_snapshot(path: &Path, label: &str) -> Result<SemanticSnapshot, Exit> {
let file = File::open(path).map_err(|err| {
eprintln!(
"cortex restore semantic-diff: failed to open {label} snapshot {}: {err}",
path.display()
);
Exit::PreconditionUnmet
})?;
serde_json::from_reader(file).map_err(|err| {
eprintln!(
"cortex restore semantic-diff: failed to parse {label} snapshot {}: {err}",
path.display()
);
Exit::QuarantinedInput
})
}
#[cfg(test)]
mod rollback_digest_tests {
use super::*;
use std::io::Write;
use tempfile::NamedTempFile;
fn write_tempfile(contents: &[u8]) -> NamedTempFile {
let mut tf = NamedTempFile::new().expect("create tempfile");
tf.write_all(contents).expect("write tempfile");
tf.flush().expect("flush tempfile");
tf
}
fn blake3_of(bytes: &[u8]) -> String {
format!("blake3:{}", blake3::hash(bytes).to_hex())
}
#[test]
fn verify_rollback_backup_digest_passes_when_bytes_match_manifest() {
let bytes = b"genuine-backup-bytes-for-the-rollback-path";
let tf = write_tempfile(bytes);
let expected = blake3_of(bytes);
verify_rollback_backup_digest(tf.path(), &expected, "active_db_backup")
.expect("matching BLAKE3 must pass");
}
#[test]
fn verify_rollback_backup_digest_refuses_when_file_was_tampered() {
let genuine = b"the-bytes-the-manifest-recorded";
let tampered = b"attacker-controlled-bytes-on-rollback-path";
let tf = write_tempfile(tampered);
let expected = blake3_of(genuine);
let err = verify_rollback_backup_digest(tf.path(), &expected, "active_db_backup")
.expect_err("tampered backup must refuse");
match &err {
RestoreCurrentBackupsError::DigestMismatch {
field,
expected: e,
observed,
} => {
assert_eq!(field, "active_db_backup");
assert_eq!(e, &expected);
assert_eq!(observed, &blake3_of(tampered));
}
other => panic!("expected DigestMismatch, got {other:?}"),
}
assert_eq!(err.to_exit(), Exit::IntegrityFailure);
let rendered = err.to_string();
assert!(
rendered.contains(RESTORE_RECOVER_ROLLBACK_BACKUP_DIGEST_MISMATCH_INVARIANT),
"Display must surface the stable invariant; got: {rendered}",
);
}
#[test]
fn restore_current_backups_error_exits_are_well_typed() {
let io_err = RestoreCurrentBackupsError::Io(std::io::Error::other("synthetic"));
assert_eq!(io_err.to_exit(), Exit::Internal);
let read_err = RestoreCurrentBackupsError::DigestRead;
assert_eq!(read_err.to_exit(), Exit::IntegrityFailure);
let mismatch = RestoreCurrentBackupsError::DigestMismatch {
field: "active_db_backup".to_string(),
expected: "blake3:aa".to_string(),
observed: "blake3:bb".to_string(),
};
assert_eq!(mismatch.to_exit(), Exit::IntegrityFailure);
}
}