use std::fs;
use std::path::{Path, PathBuf};
#[cfg(test)]
use chrono::NaiveDate;
use chrono::{DateTime, Utc};
use clap::Args;
use cortex_core::{AuditRecordId, PolicyOutcome, TrustTier, SCHEMA_VERSION};
use cortex_ledger::{
current_anchor, parse_external_receipt, rekor_submit, rekor_verify_receipt, ExternalReceipt,
ExternalSink, LedgerAnchor, RekorError, TrustRootStalenessAnchor, TrustRootStalenessError,
TrustedRoot, DEFAULT_MAX_TRUST_ROOT_AGE, EMBEDDED_TRUSTED_ROOT_SNAPSHOT_DATE,
REKOR_DEFAULT_ENDPOINT, REKOR_SUBMIT_FAILED_INVARIANT,
};
use cortex_store::repo::AuditEntry;
use ed25519_dalek::VerifyingKey;
use serde_json::json;
use crate::cmd::open_default_store;
use crate::cmd::temporal::{
revalidate_operator_temporal_authority, revalidation_failed_invariant,
TemporalAuthorityContribution,
};
use crate::exit::Exit;
use crate::paths::DataLayout;
use super::intent::{
self, ExpectedIntent, ExpectedTakeover, IntentError, VerifiedRestoreIntent,
VerifiedTakeoverAttestation, RESTORE_INTENT_PRINCIPAL_NOT_BOUND_INVARIANT,
};
use super::lock::{self, ActiveStoreLockGuard, LockError, LockMarkerPayload};
use super::policy::{
compose_apply_decision, compose_semantic_diff_decision, policy_decision_report,
APPLY_STAGE_OPERATOR_TEMPORAL_AUTHORITY_RULE_ID,
};
const SINK_KIND_EXTERNAL_APPEND_ONLY: &str = "external-append-only";
const SINK_KIND_REKOR: &str = "rekor";
pub const RESTORE_PRODUCTION_REKOR_SUBMIT_FAILED_INVARIANT: &str =
"restore.production.sink.rekor.submit_failed";
pub const RESTORE_PRODUCTION_REKOR_VERIFY_FAILED_INVARIANT: &str =
"restore.production.sink.rekor.verify_failed";
pub const RESTORE_PRODUCTION_REKOR_TRUSTED_ROOT_STALE_INVARIANT: &str =
"restore.production.sink.rekor.trusted_root_stale";
pub const RESTORE_PRODUCTION_REKOR_TRUST_ROOT_SNAPSHOT_STALE_INVARIANT: &str =
"restore.production.sink.rekor.trust_root_snapshot_stale";
pub const RESTORE_PRODUCTION_REKOR_TRUST_ROOT_CACHE_STALE_INVARIANT: &str =
"restore.production.sink.rekor.trust_root_cache_stale";
pub const RESTORE_PRODUCTION_REKOR_PERSISTED_STATUS: &str =
"restore.production.sink.rekor.persisted";
pub const RESTORE_PRODUCTION_SINK_KIND_UNKNOWN_INVARIANT: &str =
"restore.production.sink_kind.unknown";
pub const RESTORE_PRODUCTION_SINK_EXTERNAL_APPEND_ONLY_NOT_AUTHORIZED_INVARIANT: &str =
"restore.production.sink_kind.external_append_only_not_authorized";
pub const RESTORE_PRODUCTION_SINK_EXTERNAL_APPEND_ONLY_PRECONDITION_VIOLATED_INVARIANT: &str =
"restore.production.sink_kind.external_append_only.parser_refusal_precondition_violated";
#[allow(dead_code)]
pub const RESTORE_PRODUCTION_OPERATOR_TEMPORAL_AUTHORITY_REVALIDATION_FAILED_INVARIANT: &str =
"restore.production.operator_temporal_authority.revalidation_failed";
pub const RESTORE_PRODUCTION_REKOR_FIXTURE_RECEIPT_ENV_FORBIDDEN_IN_PRODUCTION_INVARIANT: &str =
"restore.production.rekor_fixture_receipt_env_forbidden_in_production";
pub const RESTORE_PRODUCTION_REKOR_TRUST_ROOT_CACHE_FUTURE_DATED_INVARIANT: &str =
"restore.production.sink.rekor.trust_root_cache_future_dated";
const REKOR_FIXTURE_RECEIPT_ENV: &str = "CORTEX_REKOR_FIXTURE_RECEIPT";
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum SinkKind {
ExternalAppendOnly,
Rekor,
}
impl SinkKind {
fn parse(raw: &str) -> Option<Self> {
match raw {
SINK_KIND_EXTERNAL_APPEND_ONLY => Some(Self::ExternalAppendOnly),
SINK_KIND_REKOR => Some(Self::Rekor),
_ => None,
}
}
}
pub const RESTORE_APPLY_PRODUCTION_AUDIT_OPERATION: &str = "command.restore.apply.production";
#[allow(dead_code)]
pub const RESTORE_APPLY_LOCK_TAKEOVER_AUDIT_OPERATION: &str = "command.restore.apply.lock_takeover";
pub const RESTORE_APPLY_ROLLED_BACK_AUDIT_OPERATION: &str = "command.restore.apply.rolled_back";
#[derive(Debug, Args)]
pub struct ApplyArgs {
#[arg(long)]
pub production: bool,
#[arg(long, value_name = "PATH")]
pub manifest: PathBuf,
#[arg(long, value_name = "DIR")]
pub stage_dir: PathBuf,
#[arg(long, value_name = "RESTORE_INTENT_JSON")]
pub restore_intent: PathBuf,
#[arg(long, value_name = "RESTORE_INTENT_SIG")]
pub restore_intent_signature: PathBuf,
#[arg(long, value_name = "OPERATOR_VERIFICATION_KEY")]
pub operator_verification_key: PathBuf,
#[arg(long, value_name = "ANCHOR_PATH")]
pub against: PathBuf,
#[arg(long = "against-history", value_name = "ANCHOR_HISTORY_PATH")]
pub against_history: PathBuf,
#[arg(long = "anchor-sink", value_name = "KIND")]
pub anchor_sink: String,
#[arg(long = "sink-path", value_name = "PATH")]
pub sink_path: PathBuf,
#[arg(long)]
pub acknowledge_production_destructive_restore: bool,
#[arg(long)]
pub acknowledge_active_store_replacement: bool,
#[arg(long)]
pub force_lock_takeover: bool,
#[arg(long, value_name = "TAKEOVER_ATTESTATION_JSON")]
pub takeover_attestation: Option<PathBuf>,
#[arg(long, value_name = "TAKEOVER_ATTESTATION_SIG")]
pub takeover_attestation_signature: Option<PathBuf>,
#[arg(long)]
pub acknowledge_recovery_risk: bool,
}
pub fn run_apply(args: ApplyArgs) -> Exit {
if !args.production {
eprintln!(
"cortex restore apply: --production is required; only the production destructive path is supported by `restore apply`. use `restore apply-stage` for temp-test."
);
return Exit::PreconditionUnmet;
}
if std::env::var_os(REKOR_FIXTURE_RECEIPT_ENV).is_some() {
let invariant =
RESTORE_PRODUCTION_REKOR_FIXTURE_RECEIPT_ENV_FORBIDDEN_IN_PRODUCTION_INVARIANT;
eprintln!(
"cortex restore apply: invariant={invariant}: production destructive restore refuses CORTEX_REKOR_FIXTURE_RECEIPT. \
the fixture-receipt path is test-only for the non-`--production` apply-stage path; honoring it on production would short-circuit \
live Rekor submission and ship a witness whose anchor is not bound to the active JSONL. unset the env var and retry. active store was not changed.",
);
return Exit::PreconditionUnmet;
}
if !args.acknowledge_production_destructive_restore
|| !args.acknowledge_active_store_replacement
{
eprintln!(
"cortex restore apply: --acknowledge-production-destructive-restore and --acknowledge-active-store-replacement are required; active store was not changed."
);
return Exit::PreconditionUnmet;
}
if !args.stage_dir.is_dir() {
eprintln!(
"cortex restore apply: stage directory `{}` does not exist; active store was not changed.",
args.stage_dir.display()
);
return Exit::PreconditionUnmet;
}
let sink_kind = match SinkKind::parse(&args.anchor_sink) {
Some(kind) => kind,
None => {
let invariant = RESTORE_PRODUCTION_SINK_KIND_UNKNOWN_INVARIANT;
let external = SINK_KIND_EXTERNAL_APPEND_ONLY;
let rekor = SINK_KIND_REKOR;
let observed = &args.anchor_sink;
eprintln!(
"cortex restore apply: {invariant}: --anchor-sink must be one of `{external}`, `{rekor}`; got `{observed}`. active store was not changed."
);
return Exit::PreconditionUnmet;
}
};
match sink_kind {
SinkKind::ExternalAppendOnly => {
let invariant = RESTORE_PRODUCTION_SINK_EXTERNAL_APPEND_ONLY_NOT_AUTHORIZED_INVARIANT;
let external = SINK_KIND_EXTERNAL_APPEND_ONLY;
let rekor = SINK_KIND_REKOR;
eprintln!(
"cortex restore apply: {invariant}: --anchor-sink `{external}` is not authorized for the production destructive restore path. Council Q1 (2026-05-12) made Rekor the disjoint-authority gate; ADR 0013 §\"Mechanism C / Council Decision #1\" governs the sink kind decision. Use `--anchor-sink {rekor}` instead. active store was not changed."
);
return Exit::PreconditionUnmet;
}
SinkKind::Rekor => {
if args.sink_path.as_os_str().is_empty() {
eprintln!(
"cortex restore apply: --sink-path is required with --anchor-sink rekor (it is the destination for the Rekor receipt JSON). active store was not changed."
);
return Exit::PreconditionUnmet;
}
if args.sink_path.exists() {
eprintln!(
"cortex restore apply: --sink-path `{}` already exists; refusing to overwrite a prior Rekor receipt. active store was not changed.",
args.sink_path.display()
);
return Exit::PreconditionUnmet;
}
let parent = args
.sink_path
.parent()
.filter(|p| !p.as_os_str().is_empty())
.unwrap_or(Path::new("."));
if !parent.is_dir() {
eprintln!(
"cortex restore apply: parent of --sink-path `{}` does not exist or is not a directory. active store was not changed.",
args.sink_path.display()
);
return Exit::PreconditionUnmet;
}
let cache_path = crate::cmd::audit::trust_root_cache_path();
if let Err(exit) = enforce_rekor_trust_root_freshness(Utc::now(), cache_path.as_deref())
{
return exit;
}
}
}
let layout = match DataLayout::resolve(None, None) {
Ok(layout) => layout,
Err(exit) => return exit,
};
let verified_backup = match super::verify_pre_v2_backup(&args.manifest) {
Ok(verified) => verified,
Err(exit) => return exit,
};
let staged_sqlite = args.stage_dir.join("cortex.db");
let staged_jsonl = args.stage_dir.join("events.jsonl");
if let Err(exit) = super::verify_staged_artifact(&staged_sqlite, &verified_backup.sqlite_store)
{
return exit;
}
if let Err(exit) = super::verify_staged_artifact(&staged_jsonl, &verified_backup.jsonl_mirror) {
return exit;
}
let audit = match super::audit_verify_staged_jsonl(&staged_jsonl) {
Ok(audit) => audit,
Err(exit) => return exit,
};
let manifest_blake3 = match super::blake3_file(&args.manifest, "backup_manifest") {
Ok(hash) => hash,
Err(exit) => return exit,
};
let verifying_key = match load_operator_verification_key(&args.operator_verification_key) {
Ok((key, fingerprint)) => (key, fingerprint),
Err(exit) => return exit,
};
let (verifying_key, key_fingerprint) = verifying_key;
let deployment_id = derive_deployment_id(&layout);
let now = Utc::now();
let expected_intent = ExpectedIntent {
deployment_id: &deployment_id,
active_db_path: &layout.db_path,
active_event_log_path: &layout.event_log_path,
backup_manifest_blake3: &manifest_blake3,
staged_sqlite_blake3: &verified_backup.sqlite_store.blake3,
staged_jsonl_blake3: &verified_backup.jsonl_mirror.blake3,
now,
verifying_key,
verifying_key_fingerprint: &key_fingerprint,
};
let verified_intent = match intent::verify_restore_intent(
&args.restore_intent,
&args.restore_intent_signature,
&expected_intent,
) {
Ok(verified) => verified,
Err(err) => {
if matches!(err, IntentError::KeyMismatch { .. }) {
eprintln!(
"cortex restore apply: invariant={RESTORE_INTENT_PRINCIPAL_NOT_BOUND_INVARIANT}: {err}. active store was not changed.",
);
} else {
eprintln!("cortex restore apply: {err}. active store was not changed.");
}
return Exit::QuarantinedInput;
}
};
let operator_temporal_contribution = match revalidate_production_operator_temporal_authority(
&key_fingerprint,
verified_intent.payload.not_before,
) {
Ok(contribution) => contribution,
Err(exit) => return exit,
};
let pre_anchor_state = match revalidate_external_anchor_pre(&layout.event_log_path, &args) {
Ok(report) => report,
Err(exit) => return exit,
};
let lock_marker_path = active_store_lock_marker_for(&layout);
let lock_guard = match acquire_production_lock(
&lock_marker_path,
&verified_intent,
&args,
&verifying_key,
&key_fingerprint,
&deployment_id,
now,
) {
Ok(guard) => guard,
Err(exit) => return exit,
};
let mut lock_guard = lock_guard;
let semantic_diff_decision = match build_semantic_diff_decision(&staged_sqlite, &layout) {
Ok(decision) => decision,
Err(exit) => return exit,
};
if semantic_diff_decision.final_outcome == PolicyOutcome::Reject {
eprintln!(
"cortex restore apply: semantic diff rejected the candidate; active store was not changed."
);
return Exit::PreconditionUnmet;
}
let active_db_before_hash =
match super::blake3_file(&layout.db_path, "active_db_before_restore") {
Ok(hash) => Some(hash),
Err(exit) => return exit,
};
let audit_entry = match restore_apply_production_audit_entry(
&args,
&layout,
&verified_backup,
&verified_intent,
active_db_before_hash.clone(),
) {
Ok(entry) => entry,
Err(exit) => return exit,
};
let recovery_evidence = match super::apply_staged_active_store(
&layout,
&staged_sqlite,
&staged_jsonl,
&audit_entry,
) {
Ok(evidence) => evidence,
Err(exit) => return exit,
};
let post_verify = match run_post_verify_chain(
&args,
&layout,
&staged_sqlite,
&verified_backup,
&pre_anchor_state,
) {
Ok(report) => report,
Err(exit) => {
return rollback_after_post_verify_failure(
&layout,
&recovery_evidence,
&mut lock_guard,
exit,
&args,
&verified_intent,
);
}
};
let sink_report = match sink_kind {
SinkKind::ExternalAppendOnly => {
let invariant =
RESTORE_PRODUCTION_SINK_EXTERNAL_APPEND_ONLY_PRECONDITION_VIOLATED_INVARIANT;
eprintln!(
"cortex restore apply: {invariant}: control reached the legacy `external-append-only` sink arm after cutover; the parser refusal precondition has been violated by a refactor. rolling back to pre-restore state."
);
return rollback_after_post_verify_failure(
&layout,
&recovery_evidence,
&mut lock_guard,
Exit::Internal,
&args,
&verified_intent,
);
}
SinkKind::Rekor => match emit_rekor_sink_receipt(&layout, &args) {
Ok(report) => report,
Err(exit) => {
return rollback_after_post_verify_failure(
&layout,
&recovery_evidence,
&mut lock_guard,
exit,
&args,
&verified_intent,
);
}
},
};
let temporal_outcome = operator_temporal_contribution.outcome();
let temporal_reason = operator_temporal_contribution.reason();
let apply_decision = compose_apply_decision(
post_verify_anchor_outcome(&post_verify),
"post-restore anchor and history extended monotonically",
temporal_outcome,
&temporal_reason,
args.acknowledge_recovery_risk,
"restore.apply.production",
&format!("active_db:{}", layout.db_path.display()),
);
let truth_ceiling = super::restore_truth_ceiling_object(
cortex_core::ClaimProofState::FullChainVerified,
cortex_core::AuthorityClass::Verified,
);
let report = json!({
"command": "restore.apply.production",
"manifest": args.manifest,
"stage_dir": args.stage_dir,
"active_db": layout.db_path,
"active_event_log": layout.event_log_path,
"structural_verification": super::verified_backup_report(&verified_backup),
"staged_artifacts": "verified",
"audit_verification": super::audit_report(&audit),
"semantic_diff": policy_decision_report(&semantic_diff_decision),
"post_restore_verification": post_verify,
"external_anchor_sink": sink_report,
"policy_decision": policy_decision_report(&apply_decision),
"lock": {
"marker": lock_guard.marker_path(),
"scope": "production",
"released": true,
},
"restore_intent": {
"verified": true,
"principal_id": verified_intent.payload.operator_principal_id,
"deployment_id": verified_intent.payload.deployment_id,
"canonical_blake3": verified_intent.canonical_blake3,
},
"recovery_evidence": {
"status": "prepared_before_active_replacement",
"manifest": recovery_evidence.manifest_path,
"active_db_backup": recovery_evidence.active_db_backup,
"active_event_log_backup": recovery_evidence.active_event_log_backup,
},
"audit_record_id": audit_entry.id,
"audit_operation": audit_entry.operation,
"schema_version_match": {
"manifest": verified_backup.schema_version,
"active": SCHEMA_VERSION,
},
"restore_performed": true,
"cutover_performed": true,
"rollback_performed": false,
"destructive_restore_supported": true,
"mutated_store": true,
"production_eligible": true,
"truth_ceiling": truth_ceiling,
});
match serde_json::to_string_pretty(&report) {
Ok(output) => {
println!("{output}");
Exit::Ok
}
Err(err) => {
eprintln!("cortex restore apply: failed to serialize report: {err}");
Exit::Internal
}
}
}
fn revalidate_production_operator_temporal_authority(
key_fingerprint: &str,
event_time: DateTime<Utc>,
) -> Result<TemporalAuthorityContribution, Exit> {
let pool = open_default_store("restore apply")?;
let invariant = revalidation_failed_invariant("restore.production");
let contribution = revalidate_operator_temporal_authority(
&pool,
APPLY_STAGE_OPERATOR_TEMPORAL_AUTHORITY_RULE_ID,
key_fingerprint,
event_time,
TrustTier::Operator,
)
.map_err(|err| {
eprintln!(
"cortex restore apply: {invariant}: failed to read authority timeline for key {key_fingerprint}: {err}. active store was not changed.",
);
Exit::PreconditionUnmet
})?;
if !contribution.report.valid_now {
let reasons = contribution
.report
.reasons
.iter()
.map(|reason| reason.wire_str())
.collect::<Vec<_>>()
.join(",");
let outcome = contribution.outcome();
eprintln!(
"cortex restore apply: {invariant}: operator temporal authority current use blocked for key {} (outcome={outcome:?}; reasons: {reasons}). active store was not changed.",
contribution.report.key_id,
);
return Err(Exit::PreconditionUnmet);
}
eprintln!(
"cortex restore apply: operator_temporal_authority_revalidated=true key_id={} valid_now={}",
contribution.report.key_id, contribution.report.valid_now,
);
Ok(contribution)
}
fn enforce_rekor_trust_root_freshness(
now: DateTime<Utc>,
cache_path: Option<&Path>,
) -> Result<(), Exit> {
let root = TrustedRoot::embedded().map_err(|err| {
eprintln!(
"cortex restore apply: embedded Sigstore trust root failed to parse: {err}. active store was not changed."
);
Exit::Internal
})?;
let max_age = DEFAULT_MAX_TRUST_ROOT_AGE;
let max_age_days = max_age.as_secs() / (24 * 60 * 60);
let now_rfc = now.to_rfc3339();
if let Some(path) = cache_path {
match path.metadata() {
Ok(_) => {
let anchor = TrustRootStalenessAnchor::cache_file_mtime(path);
match root.is_stale_at(now, max_age, anchor) {
Ok(true) => {
let generic = RESTORE_PRODUCTION_REKOR_TRUSTED_ROOT_STALE_INVARIANT;
let specific = RESTORE_PRODUCTION_REKOR_TRUST_ROOT_CACHE_STALE_INVARIANT;
eprintln!(
"cortex restore apply: {specific}: cached trusted_root.json `{}` is older than {max_age_days} days at now={now_rfc}. run `cortex audit refresh-trust` before re-running the production drill. active store was not changed.",
path.display()
);
eprintln!("cortex restore apply: {generic}");
return Err(Exit::PreconditionUnmet);
}
Ok(false) => return Ok(()),
Err(err) => {
return Err(map_staleness_error(err, path));
}
}
}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
}
Err(err) => {
eprintln!(
"cortex restore apply: cannot stat cached trusted_root.json `{}`: {err}. active store was not changed.",
path.display()
);
return Err(Exit::PreconditionUnmet);
}
}
}
let anchor = TrustRootStalenessAnchor::embedded_snapshot();
match root.is_stale_at(now, max_age, anchor) {
Ok(true) => {
let generic = RESTORE_PRODUCTION_REKOR_TRUSTED_ROOT_STALE_INVARIANT;
let specific = RESTORE_PRODUCTION_REKOR_TRUST_ROOT_SNAPSHOT_STALE_INVARIANT;
let snapshot_iso = EMBEDDED_TRUSTED_ROOT_SNAPSHOT_DATE;
eprintln!(
"cortex restore apply: {specific}: embedded trusted_root.json snapshot captured at {snapshot_iso} is older than {max_age_days} days at now={now_rfc}. refresh the embedded snapshot or stage a cached trusted_root.json via `cortex audit refresh-trust`. active store was not changed."
);
eprintln!("cortex restore apply: {generic}");
Err(Exit::PreconditionUnmet)
}
Ok(false) => Ok(()),
Err(err) => {
eprintln!(
"cortex restore apply: embedded snapshot anchor failed to resolve: {err}. active store was not changed."
);
Err(Exit::Internal)
}
}
}
fn map_staleness_error(err: TrustRootStalenessError, path: &Path) -> Exit {
match err {
TrustRootStalenessError::CacheMetadata { source, .. }
| TrustRootStalenessError::CacheMtime { source, .. } => {
eprintln!(
"cortex restore apply: cannot resolve mtime on cached trusted_root.json `{}`: {source}. active store was not changed.",
path.display()
);
Exit::PreconditionUnmet
}
TrustRootStalenessError::MalformedEmbeddedSnapshotDate { observed, reason } => {
eprintln!(
"cortex restore apply: EMBEDDED_TRUSTED_ROOT_SNAPSHOT_DATE `{observed}` is not RFC 3339 YYYY-MM-DD: {reason}. active store was not changed."
);
Exit::Internal
}
TrustRootStalenessError::EmbeddedSnapshotMidnightConstruction => {
eprintln!(
"cortex restore apply: EMBEDDED_TRUSTED_ROOT_SNAPSHOT_DATE midnight construction failed. active store was not changed."
);
Exit::Internal
}
TrustRootStalenessError::CacheFutureDated {
anchor,
anchor_ts,
now,
tolerance_seconds,
} => {
let production_invariant =
RESTORE_PRODUCTION_REKOR_TRUST_ROOT_CACHE_FUTURE_DATED_INVARIANT;
let upstream_invariant =
cortex_ledger::STABLE_INVARIANT_TRUSTED_ROOT_CACHE_FUTURE_DATED;
eprintln!(
"cortex restore apply: {production_invariant}: trusted_root.json cache mtime for anchor `{anchor}` is {anchor_ts}, more than {tolerance_seconds}s ahead of now={now}. \
cached `{path}` may have been touched into the future (deliberately or by a buggy refresh tool); refuse to mark the freshness gate as passed. active store was not changed.",
path = path.display()
);
eprintln!("cortex restore apply: {upstream_invariant}");
Exit::PreconditionUnmet
}
}
}
fn emit_rekor_sink_receipt(
layout: &DataLayout,
args: &ApplyArgs,
) -> Result<serde_json::Value, Exit> {
let now = Utc::now();
let anchor = current_anchor(&layout.event_log_path, now).map_err(|err| {
eprintln!(
"cortex restore apply: cannot derive post-restore anchor for Rekor submission from `{}`: {err}. rolling back.",
layout.event_log_path.display()
);
Exit::IntegrityFailure
})?;
let receipt = match acquire_rekor_receipt(&anchor) {
Ok(envelope) => envelope,
Err(err) => {
let invariant = match &err {
RekorError::SubmitHttp { .. } | RekorError::SubmitBody { .. } => {
RESTORE_PRODUCTION_REKOR_SUBMIT_FAILED_INVARIANT
}
_ => RESTORE_PRODUCTION_REKOR_VERIFY_FAILED_INVARIANT,
};
eprintln!(
"cortex restore apply: {invariant}: Rekor sink emission failed: {err}. rolling back."
);
return Err(Exit::IntegrityFailure);
}
};
let trusted_root = TrustedRoot::embedded().map_err(|err| {
eprintln!(
"cortex restore apply: embedded Sigstore trust root failed to parse during verify: {err}. rolling back."
);
Exit::Internal
})?;
let verify_invariant = RESTORE_PRODUCTION_REKOR_VERIFY_FAILED_INVARIANT;
let verification = rekor_verify_receipt(&receipt, &trusted_root).map_err(|err| {
eprintln!(
"cortex restore apply: {verify_invariant}: Rekor receipt failed offline verification against the embedded trust root: {err}. rolling back."
);
Exit::IntegrityFailure
})?;
let text = receipt.to_record_text().map_err(|err| {
eprintln!(
"cortex restore apply: {verify_invariant}: failed to render Rekor receipt as v1 record text: {err}. rolling back."
);
Exit::Internal
})?;
let submit_invariant = RESTORE_PRODUCTION_REKOR_SUBMIT_FAILED_INVARIANT;
let sink_path_display = args.sink_path.display();
fs::write(&args.sink_path, text.as_bytes()).map_err(|err| {
eprintln!(
"cortex restore apply: {submit_invariant}: failed to persist Rekor receipt to `{sink_path_display}`: {err}. rolling back."
);
Exit::Internal
})?;
let persisted_status = RESTORE_PRODUCTION_REKOR_PERSISTED_STATUS;
let log_index = verification.log_index;
let uuid = &verification.uuid;
eprintln!(
"cortex restore apply: {persisted_status}: persisted Rekor receipt to `{sink_path_display}` log_index={log_index} uuid={uuid}"
);
Ok(json!({
"kind": SINK_KIND_REKOR,
"status": RESTORE_PRODUCTION_REKOR_PERSISTED_STATUS,
"sink_path": args.sink_path,
"sink_endpoint": receipt.sink_endpoint,
"anchor_event_count": receipt.anchor_event_count,
"anchor_chain_head_hash": receipt.anchor_chain_head_hash,
"anchor_text_sha256": receipt.anchor_text_sha256,
"log_index": verification.log_index,
"uuid": verification.uuid,
"trust_root_status": "embedded_snapshot",
}))
}
fn acquire_rekor_receipt(anchor: &LedgerAnchor) -> Result<ExternalReceipt, RekorError> {
if let Ok(fixture_path) = std::env::var(REKOR_FIXTURE_RECEIPT_ENV) {
let text = fs::read_to_string(&fixture_path).map_err(|err| RekorError::SubmitBody {
invariant: REKOR_SUBMIT_FAILED_INVARIANT,
reason: format!("fixture receipt at {fixture_path}: read failed: {err}"),
})?;
let receipt = parse_external_receipt(&text).map_err(|err| RekorError::SubmitBody {
invariant: REKOR_SUBMIT_FAILED_INVARIANT,
reason: format!("fixture receipt at {fixture_path}: parse failed: {err}"),
})?;
if receipt.sink != ExternalSink::Rekor {
return Err(RekorError::SubmitBody {
invariant: REKOR_SUBMIT_FAILED_INVARIANT,
reason: format!(
"fixture receipt at {fixture_path}: sink is {} not rekor",
receipt.sink
),
});
}
let _ = anchor; return Ok(receipt);
}
rekor_submit(anchor, REKOR_DEFAULT_ENDPOINT)
}
fn revalidate_external_anchor_pre(
event_log_path: &Path,
args: &ApplyArgs,
) -> Result<serde_json::Value, Exit> {
let anchor = super::verify_post_restore_anchor(event_log_path, &args.against)?;
let history = super::verify_post_restore_anchor_history(event_log_path, &args.against_history)?;
Ok(json!({
"scope": "pre_mutation",
"single_anchor": anchor,
"anchor_history": history,
"anchor_sink": args.anchor_sink,
"sink_path": args.sink_path,
}))
}
fn build_semantic_diff_decision(
staged_sqlite: &Path,
layout: &DataLayout,
) -> Result<cortex_core::PolicyDecision, Exit> {
let current_snapshot = super::read_store_snapshot(&layout.db_path, "current")?;
let staged_snapshot = super::read_store_snapshot(staged_sqlite, "staged")?;
let diff = current_snapshot.diff_against_restore(&staged_snapshot);
let policy = compose_semantic_diff_decision(
&diff,
false,
"restore.apply.production.semantic_diff",
&format!("active_db:{}", layout.db_path.display()),
);
Ok(policy.decision)
}
fn run_post_verify_chain(
args: &ApplyArgs,
layout: &DataLayout,
staged_sqlite: &Path,
verified_backup: &super::VerifiedBackup,
pre_anchor_state: &serde_json::Value,
) -> Result<serde_json::Value, Exit> {
super::verify_active_jsonl_artifact(&layout.event_log_path, &verified_backup.jsonl_mirror)?;
let audit = super::audit_verify_active_jsonl(&layout.event_log_path)?;
let current_snapshot = super::read_store_snapshot(&layout.db_path, "current_after_restore")?;
let staged_snapshot = super::read_store_snapshot(staged_sqlite, "staged")?;
let diff = staged_snapshot.diff_against_restore(¤t_snapshot);
let semantic_diff_policy = compose_semantic_diff_decision(
&diff,
false,
"restore.apply.production.post_restore_semantic_diff",
&format!("active_db:{}", layout.db_path.display()),
);
if matches!(
semantic_diff_policy.decision.final_outcome,
PolicyOutcome::Reject | PolicyOutcome::Quarantine
) {
eprintln!(
"cortex restore apply: post-restore semantic diff rejected the active store; rolling back."
);
return Err(Exit::IntegrityFailure);
}
if verified_backup.schema_version != SCHEMA_VERSION {
eprintln!(
"cortex restore apply: post-restore schema_version mismatch: manifest={}, active={}. ADR 0033 §5 mandates Reject. rolling back.",
verified_backup.schema_version, SCHEMA_VERSION,
);
return Err(Exit::SchemaMismatch);
}
let post_anchor = super::verify_post_restore_anchor(&layout.event_log_path, &args.against)?;
let post_history =
super::verify_post_restore_anchor_history(&layout.event_log_path, &args.against_history)?;
let identity = post_restore_identity_gate(layout)?;
Ok(json!({
"status": "verified",
"manifest_artifacts": {
"status": "active_jsonl_digest_verified",
"sqlite_store": {
"manifest_blake3": verified_backup.sqlite_store.blake3,
"active_exact_digest": "not_claimed_final_sqlite_contains_command_audit_row",
},
"jsonl_mirror": {
"path": layout.event_log_path,
"manifest_blake3": verified_backup.jsonl_mirror.blake3,
},
},
"jsonl_audit": super::audit_report(&audit),
"semantic_diff": policy_decision_report(&semantic_diff_policy.decision),
"schema_version_match": {
"manifest": verified_backup.schema_version,
"active": SCHEMA_VERSION,
},
"anchors": {
"pre_mutation": pre_anchor_state,
"post_mutation_single_anchor": post_anchor,
"post_mutation_anchor_history": post_history,
"monotonic_history_extended": true,
},
"identity_inconsistent": identity,
"production_eligible": true,
}))
}
fn post_verify_anchor_outcome(post_verify: &serde_json::Value) -> PolicyOutcome {
let post_status = post_verify
.get("anchors")
.and_then(|a| a.get("post_mutation_single_anchor"))
.and_then(|a| a.get("status"))
.and_then(|s| s.as_str())
.unwrap_or("not_verified");
if post_status == "verified" {
PolicyOutcome::Allow
} else {
PolicyOutcome::Reject
}
}
fn post_restore_identity_gate(layout: &DataLayout) -> Result<serde_json::Value, Exit> {
use cortex_store::Pool;
use rusqlite::OptionalExtension;
let pool = Pool::open(&layout.db_path).map_err(|err| {
eprintln!(
"cortex restore apply: failed to open active store `{}` for identity gate: {err}. rolling back.",
layout.db_path.display()
);
Exit::Internal
})?;
let active_principal = pool
.query_row(
"SELECT principal_id, key_id FROM authority_key_timeline
WHERE state = 'active'
ORDER BY effective_at DESC LIMIT 1;",
[],
|row| Ok((row.get::<_, String>(0)?, row.get::<_, String>(1)?)),
)
.optional()
.map_err(|err| {
eprintln!(
"cortex restore apply: identity gate query failed against `{}`: {err}. rolling back.",
layout.db_path.display()
);
Exit::Internal
})?;
Ok(json!({
"status": match active_principal.as_ref() {
Some(_) => "verified_active_identity_present",
None => "fresh_init_no_active_identity_recorded",
},
"active_principal_id": active_principal.as_ref().map(|(pid, _)| pid.clone()),
"key_id": active_principal.as_ref().map(|(_, kid)| kid.clone()),
"adr_0028_residual_risk": "rotation-state cross-check against verified_intent.operator_principal_id is a follow-on (DESIGN §residual)",
}))
}
fn rollback_after_post_verify_failure(
layout: &DataLayout,
recovery_evidence: &super::ApplyRecoveryEvidence,
lock_guard: &mut ActiveStoreLockGuard,
failure_exit: Exit,
args: &ApplyArgs,
verified_intent: &VerifiedRestoreIntent,
) -> Exit {
match super::restore_current_backups(layout, recovery_evidence) {
Ok(()) => {
let _ = append_rolled_back_audit_row(layout, args, verified_intent, recovery_evidence);
eprintln!(
"cortex restore apply: post-restore verification failed after active replacement; active backups were restored from recovery evidence `{}`. lock released. exit={:?}",
recovery_evidence.manifest_path.display(),
failure_exit,
);
failure_exit
}
Err(err) => {
lock_guard.leak_for_rollback_failure();
eprintln!(
"cortex restore apply: post-restore verification failed AND rollback from recovery evidence `{}` failed: {err}. lock marker preserved for forensic recovery; active store requires manual recovery before continuing writes.",
recovery_evidence.manifest_path.display(),
);
err.to_exit()
}
}
}
fn append_rolled_back_audit_row(
layout: &DataLayout,
args: &ApplyArgs,
verified_intent: &VerifiedRestoreIntent,
recovery_evidence: &super::ApplyRecoveryEvidence,
) -> Result<(), Exit> {
let source_refs_json = json!({
"manifest": args.manifest,
"stage_dir": args.stage_dir,
"recovery_manifest": recovery_evidence.manifest_path,
"restore_intent_blake3": verified_intent.canonical_blake3,
"rollback_reason": "post_restore_verification_failed",
});
let audit_entry = AuditEntry {
id: AuditRecordId::new(),
operation: RESTORE_APPLY_ROLLED_BACK_AUDIT_OPERATION.to_string(),
target_ref: format!("active_store:{}", layout.db_path.display()),
before_hash: None,
after_hash: verified_intent.canonical_blake3.clone(),
reason: "production restore auto-rollback after post-verify failure".to_string(),
actor_json: json!({
"kind": "cli",
"command": "restore apply --production",
"scope": "production",
}),
source_refs_json,
created_at: Utc::now(),
};
super::append_restore_command_audit(&layout.db_path, &audit_entry, "restore apply --production")
}
fn restore_apply_production_audit_entry(
args: &ApplyArgs,
layout: &DataLayout,
verified: &super::VerifiedBackup,
verified_intent: &VerifiedRestoreIntent,
before_hash: Option<String>,
) -> Result<AuditEntry, Exit> {
let source_refs_json = json!({
"manifest": args.manifest,
"stage_dir": args.stage_dir,
"active_db": layout.db_path,
"active_event_log": layout.event_log_path,
"sqlite_store_blake3": verified.sqlite_store.blake3,
"jsonl_mirror_blake3": verified.jsonl_mirror.blake3,
"restore_intent_blake3": verified_intent.canonical_blake3,
"operator_principal_id": verified_intent.payload.operator_principal_id,
"deployment_id": verified_intent.payload.deployment_id,
"anchor_sink": args.anchor_sink,
"sink_path": args.sink_path,
"scope": "production",
});
Ok(AuditEntry {
id: AuditRecordId::new(),
operation: RESTORE_APPLY_PRODUCTION_AUDIT_OPERATION.to_string(),
target_ref: format!("active_store:{}", layout.db_path.display()),
before_hash,
after_hash: verified.sqlite_store.blake3.clone(),
reason: "production restore apply replaced active store after intent, anchor, audit, and semantic guards".to_string(),
actor_json: json!({
"kind": "cli",
"command": "restore apply --production",
"scope": "production",
"operator_principal_id": verified_intent.payload.operator_principal_id,
}),
source_refs_json,
created_at: Utc::now(),
})
}
fn acquire_production_lock(
marker_path: &Path,
verified_intent: &VerifiedRestoreIntent,
args: &ApplyArgs,
verifying_key: &VerifyingKey,
verifying_key_fingerprint: &str,
deployment_id: &str,
now: DateTime<Utc>,
) -> Result<ActiveStoreLockGuard, Exit> {
let payload = LockMarkerPayload {
deployment_id: verified_intent.payload.deployment_id.clone(),
operator_principal_id: verified_intent.payload.operator_principal_id.clone(),
restore_intent_blake3: verified_intent.canonical_blake3.clone(),
acquired_at: now,
host: resolve_host(),
};
match ActiveStoreLockGuard::acquire(marker_path, payload.clone()) {
Ok(guard) => Ok(guard),
Err(LockError::MarkerAlreadyExists { .. }) | Err(LockError::Contended { .. })
if args.force_lock_takeover =>
{
attempt_lock_takeover(
marker_path,
payload,
args,
verifying_key,
verifying_key_fingerprint,
deployment_id,
now,
)
}
Err(err @ LockError::MarkerAlreadyExists { .. }) => {
eprintln!(
"cortex restore apply: {err}. pass --force-lock-takeover with an Ed25519-attested --takeover-attestation if the prior process is confirmed dead. active store was not changed."
);
Err(Exit::PreconditionUnmet)
}
Err(err @ LockError::Contended { .. }) => {
eprintln!("cortex restore apply: {err}. active store was not changed.");
Err(Exit::PreconditionUnmet)
}
Err(err) => {
eprintln!(
"cortex restore apply: lock acquisition failed: {err}. active store was not changed."
);
Err(Exit::Internal)
}
}
}
fn attempt_lock_takeover(
marker_path: &Path,
payload: LockMarkerPayload,
args: &ApplyArgs,
verifying_key: &VerifyingKey,
verifying_key_fingerprint: &str,
deployment_id: &str,
now: DateTime<Utc>,
) -> Result<ActiveStoreLockGuard, Exit> {
let attestation_path = match args.takeover_attestation.as_deref() {
Some(path) => path,
None => {
eprintln!(
"cortex restore apply: --force-lock-takeover requires --takeover-attestation. active store was not changed."
);
return Err(Exit::PreconditionUnmet);
}
};
let signature_path = match args.takeover_attestation_signature.as_deref() {
Some(path) => path,
None => {
eprintln!(
"cortex restore apply: --force-lock-takeover requires --takeover-attestation-signature. active store was not changed."
);
return Err(Exit::PreconditionUnmet);
}
};
let marker_body = lock::read_marker_file(marker_path)
.map_err(|err| {
eprintln!("cortex restore apply: cannot read stale marker for takeover: {err}");
Exit::PreconditionUnmet
})?
.ok_or_else(|| {
eprintln!(
"cortex restore apply: --force-lock-takeover requested but marker `{}` does not exist; nothing to take over.",
marker_path.display()
);
Exit::PreconditionUnmet
})?;
let stale = parse_marker_fields(&marker_body).ok_or_else(|| {
eprintln!(
"cortex restore apply: stale marker `{}` is malformed; refuse to take over without verifiable identity.",
marker_path.display()
);
Exit::PreconditionUnmet
})?;
let expected_takeover = ExpectedTakeover {
deployment_id,
stale_pid: stale.pid,
stale_acquired_at: stale.acquired_at,
now,
verifying_key: *verifying_key,
verifying_key_fingerprint,
};
let verified_takeover =
intent::verify_takeover_attestation(attestation_path, signature_path, &expected_takeover)
.map_err(|err| {
if matches!(err, IntentError::KeyMismatch { .. }) {
eprintln!(
"cortex restore apply: invariant={RESTORE_INTENT_PRINCIPAL_NOT_BOUND_INVARIANT}: {err}. active store was not changed.",
);
} else {
eprintln!("cortex restore apply: {err}. active store was not changed.");
}
Exit::QuarantinedInput
})?;
let quarantined = lock::quarantine_stale_marker(marker_path).map_err(|err| {
eprintln!("cortex restore apply: takeover failed: cannot quarantine stale marker: {err}");
Exit::Internal
})?;
eprintln!(
"cortex restore apply: attested lock takeover: stale marker preserved at `{}`",
quarantined.display()
);
record_takeover_audit_row(&verified_takeover, marker_path, &quarantined);
ActiveStoreLockGuard::acquire(marker_path, payload).map_err(|err| {
eprintln!("cortex restore apply: takeover failed after stale-marker quarantine: {err}");
Exit::Internal
})
}
fn record_takeover_audit_row(
verified: &VerifiedTakeoverAttestation,
marker_path: &Path,
quarantined: &Path,
) {
eprintln!(
"cortex restore apply: lock_takeover canonical_blake3={} stale_marker=`{}` quarantined_at=`{}` operator={} justification=\"{}\"",
verified.canonical_blake3,
marker_path.display(),
quarantined.display(),
verified.payload.operator_principal_id,
verified.payload.justification,
);
}
#[derive(Debug)]
struct StaleMarkerFields {
pid: u32,
acquired_at: DateTime<Utc>,
}
fn parse_marker_fields(body: &str) -> Option<StaleMarkerFields> {
let mut pid: Option<u32> = None;
let mut acquired_at: Option<DateTime<Utc>> = None;
for line in body.lines() {
if let Some(rest) = line.strip_prefix("pid=") {
pid = rest.trim().parse::<u32>().ok();
} else if let Some(rest) = line.strip_prefix("acquired_at=") {
acquired_at = DateTime::parse_from_rfc3339(rest.trim())
.ok()
.map(|dt| dt.with_timezone(&Utc));
}
}
Some(StaleMarkerFields {
pid: pid?,
acquired_at: acquired_at?,
})
}
fn load_operator_verification_key(path: &Path) -> Result<(VerifyingKey, String), Exit> {
let bytes = fs::read(path).map_err(|err| {
eprintln!(
"cortex restore apply: cannot read --operator-verification-key `{}`: {err}",
path.display()
);
Exit::PreconditionUnmet
})?;
let key_bytes: [u8; 32] = bytes.as_slice().try_into().map_err(|_| {
eprintln!(
"cortex restore apply: --operator-verification-key `{}` must be exactly 32 bytes; got {}",
path.display(),
bytes.len()
);
Exit::PreconditionUnmet
})?;
let key = VerifyingKey::from_bytes(&key_bytes).map_err(|err| {
eprintln!(
"cortex restore apply: --operator-verification-key `{}` is not a valid Ed25519 verifying key: {err}",
path.display()
);
Exit::PreconditionUnmet
})?;
let fingerprint = hex_lower(&key_bytes);
Ok((key, fingerprint))
}
fn hex_lower(bytes: &[u8]) -> String {
const HEX: &[u8; 16] = b"0123456789abcdef";
let mut out = String::with_capacity(bytes.len() * 2);
for byte in bytes {
out.push(HEX[(byte >> 4) as usize] as char);
out.push(HEX[(byte & 0x0f) as usize] as char);
}
out
}
fn active_store_lock_marker_for(layout: &DataLayout) -> PathBuf {
layout.data_dir.join(".cortex-restore-active-store.lock")
}
fn derive_deployment_id(layout: &DataLayout) -> String {
let canon = layout
.data_dir
.canonicalize()
.unwrap_or_else(|_| layout.data_dir.clone());
let digest = blake3::hash(canon.to_string_lossy().as_bytes());
format!("deployment:{}", digest.to_hex())
}
fn resolve_host() -> String {
if let Ok(name) = std::env::var("HOSTNAME") {
if !name.trim().is_empty() {
return name;
}
}
if let Ok(name) = std::env::var("COMPUTERNAME") {
if !name.trim().is_empty() {
return name;
}
}
"unknown".to_string()
}
#[must_use]
#[allow(dead_code)]
pub fn active_store_lock_marker(layout: &DataLayout) -> PathBuf {
active_store_lock_marker_for(layout)
}
#[must_use]
pub fn deployment_id_for(layout: &DataLayout) -> String {
derive_deployment_id(layout)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_marker_fields_extracts_pid_and_time() {
let body = "cortex-restore-active-store-lock-v1\n\
pid=4242\n\
host=test\n\
deployment_id=dep-1\n\
operator_principal_id=op-1\n\
acquired_at=2026-05-12T10:00:00+00:00\n\
scope=production\n\
restore_intent_blake3=blake3:00\n";
let fields = parse_marker_fields(body).unwrap();
assert_eq!(fields.pid, 4242);
assert_eq!(fields.acquired_at.to_rfc3339(), "2026-05-12T10:00:00+00:00");
}
#[test]
fn parse_marker_fields_returns_none_when_pid_missing() {
let body = "cortex-restore-active-store-lock-v1\n\
host=test\n\
acquired_at=2026-05-12T10:00:00+00:00\n";
assert!(parse_marker_fields(body).is_none());
}
#[test]
fn sink_kind_parser_accepts_whitelisted_values() {
assert_eq!(
SinkKind::parse(SINK_KIND_EXTERNAL_APPEND_ONLY),
Some(SinkKind::ExternalAppendOnly)
);
assert_eq!(SinkKind::parse(SINK_KIND_REKOR), Some(SinkKind::Rekor));
}
#[test]
fn sink_kind_parser_rejects_unknown_values() {
assert_eq!(SinkKind::parse(""), None);
assert_eq!(SinkKind::parse("opentimestamps"), None);
assert_eq!(SinkKind::parse("Rekor"), None); assert_eq!(SinkKind::parse("s3-object-lock"), None);
}
fn embedded_snapshot_date_utc() -> DateTime<Utc> {
NaiveDate::parse_from_str(EMBEDDED_TRUSTED_ROOT_SNAPSHOT_DATE, "%Y-%m-%d")
.expect("EMBEDDED_TRUSTED_ROOT_SNAPSHOT_DATE parses as YYYY-MM-DD")
.and_hms_opt(0, 0, 0)
.expect("midnight construction is valid")
.and_utc()
}
#[test]
fn rekor_trust_root_freshness_embedded_passes_within_30_days_of_snapshot_date() {
let snapshot = embedded_snapshot_date_utc();
let now = snapshot + chrono::Duration::days(3);
enforce_rekor_trust_root_freshness(now, None)
.expect("embedded snapshot 3 days old must pass");
}
#[test]
fn rekor_trust_root_freshness_embedded_fails_when_snapshot_too_old() {
let snapshot = embedded_snapshot_date_utc();
let now = snapshot + chrono::Duration::days(34);
let exit = enforce_rekor_trust_root_freshness(now, None).unwrap_err();
assert!(matches!(exit, Exit::PreconditionUnmet));
}
#[test]
fn rekor_trust_root_freshness_cached_passes_within_30_days_of_mtime() {
let tmp = tempfile::tempdir().expect("tempdir");
let cache_path = tmp.path().join("trusted_root.json");
let root = TrustedRoot::embedded().expect("embedded root parses");
root.write_atomic(&cache_path)
.expect("write cached trust root");
let now = Utc::now();
let mtime = now - chrono::Duration::days(5);
let mtime_systemtime = std::time::SystemTime::UNIX_EPOCH
+ std::time::Duration::from_secs(mtime.timestamp() as u64);
std::fs::File::options()
.write(true)
.open(&cache_path)
.expect("open cache for mtime set")
.set_modified(mtime_systemtime)
.expect("set mtime");
enforce_rekor_trust_root_freshness(now, Some(cache_path.as_path()))
.expect("cached mtime 5 days old must pass");
}
#[test]
fn rekor_trust_root_freshness_cached_fails_when_mtime_too_old() {
let tmp = tempfile::tempdir().expect("tempdir");
let cache_path = tmp.path().join("trusted_root.json");
let root = TrustedRoot::embedded().expect("embedded root parses");
root.write_atomic(&cache_path)
.expect("write cached trust root");
let now = Utc::now();
let mtime = now - chrono::Duration::days(40);
let mtime_systemtime = std::time::SystemTime::UNIX_EPOCH
+ std::time::Duration::from_secs(mtime.timestamp() as u64);
std::fs::File::options()
.write(true)
.open(&cache_path)
.expect("open cache for mtime set")
.set_modified(mtime_systemtime)
.expect("set mtime");
let exit = enforce_rekor_trust_root_freshness(now, Some(cache_path.as_path())).unwrap_err();
assert!(matches!(exit, Exit::PreconditionUnmet));
}
#[test]
fn rekor_trust_root_freshness_falls_back_to_embedded_when_cache_missing() {
let tmp = tempfile::tempdir().expect("tempdir");
let cache_path = tmp.path().join("does-not-exist.json");
let snapshot = embedded_snapshot_date_utc();
let now = snapshot + chrono::Duration::days(7);
enforce_rekor_trust_root_freshness(now, Some(cache_path.as_path()))
.expect("missing cache should fall back to embedded path");
}
fn write_cache_with_mtime(dir: &Path, mtime: std::time::SystemTime) -> PathBuf {
let path = dir.join("trusted_root.json");
TrustedRoot::embedded()
.expect("embedded trust root parses")
.write_atomic(&path)
.expect("write trusted_root.json cache");
let file = std::fs::File::options()
.write(true)
.open(&path)
.expect("open cache for mtime touch");
file.set_modified(mtime).expect("set_modified on cache");
drop(file);
path
}
#[test]
fn rekor_trust_root_freshness_refuses_future_dated_cache_mtime() {
let tmp = tempfile::tempdir().expect("tempdir");
let now = embedded_snapshot_date_utc() + chrono::Duration::days(7);
let future_mtime = std::time::SystemTime::from(now + chrono::Duration::days(365 * 70));
let cache_path = write_cache_with_mtime(tmp.path(), future_mtime);
let exit = enforce_rekor_trust_root_freshness(now, Some(cache_path.as_path()))
.expect_err("future-dated cache mtime must refuse");
assert_eq!(exit, Exit::PreconditionUnmet);
}
#[test]
fn rekor_trust_root_freshness_tolerates_tiny_clock_skew_on_cache_mtime() {
let tmp = tempfile::tempdir().expect("tempdir");
let now = embedded_snapshot_date_utc() + chrono::Duration::days(7);
let tiny_skew = std::time::SystemTime::from(now + chrono::Duration::seconds(10));
let cache_path = write_cache_with_mtime(tmp.path(), tiny_skew);
enforce_rekor_trust_root_freshness(now, Some(cache_path.as_path()))
.expect("10-second skew must not trip the future-dated guard");
}
#[test]
fn rekor_fixture_receipt_env_constant_is_pinned() {
assert_eq!(REKOR_FIXTURE_RECEIPT_ENV, "CORTEX_REKOR_FIXTURE_RECEIPT");
}
#[test]
fn rekor_fixture_receipt_env_forbidden_in_production_invariant_is_stable() {
assert_eq!(
RESTORE_PRODUCTION_REKOR_FIXTURE_RECEIPT_ENV_FORBIDDEN_IN_PRODUCTION_INVARIANT,
"restore.production.rekor_fixture_receipt_env_forbidden_in_production"
);
}
#[test]
fn rekor_trust_root_cache_future_dated_invariant_is_stable() {
assert_eq!(
RESTORE_PRODUCTION_REKOR_TRUST_ROOT_CACHE_FUTURE_DATED_INVARIANT,
"restore.production.sink.rekor.trust_root_cache_future_dated"
);
}
}