use std::path::{Path, PathBuf};
use std::process::Command;
use chrono::{Duration, TimeZone, Utc};
use cortex_core::{Event, EventSource, EventType, MemoryId, SCHEMA_VERSION};
use cortex_llm::{
ReplaySummaryFixture, ReplaySummaryFixtureEntry, SummaryRequest, SummaryResponse,
};
use cortex_memory::decay::summary::{
canonical_signing_input, LlmSummaryOperatorAttestationEnvelope,
};
use cortex_memory::decay::{
DECAY_LLM_SUMMARY_ATTESTATION_PURPOSE, DECAY_LLM_SUMMARY_ATTESTATION_SCHEMA_VERSION,
};
use cortex_store::migrate::apply_pending;
use cortex_store::repo::{EventRepo, MemoryCandidate, MemoryRepo};
use ed25519_dalek::{Signer, SigningKey};
use rusqlite::Connection;
use serde_json::json;
fn cortex_bin() -> PathBuf {
PathBuf::from(env!("CARGO_BIN_EXE_cortex"))
}
fn run_in(cwd: &Path, args: &[&str]) -> std::process::Output {
Command::new(cortex_bin())
.current_dir(cwd)
.env("XDG_DATA_HOME", cwd.join("xdg"))
.env("HOME", cwd)
.args(args)
.output()
.expect("spawn cortex")
}
fn assert_exit(out: &std::process::Output, expected: i32) {
let code = out.status.code().expect("process exited via signal");
assert_eq!(
code,
expected,
"expected exit {expected}, got {code}\nstdout: {}\nstderr: {}",
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr),
);
}
fn parse_envelope(stdout: &[u8]) -> serde_json::Value {
let text = std::str::from_utf8(stdout).expect("stdout is utf-8");
serde_json::from_str(text).unwrap_or_else(|err| {
panic!("expected --json stdout to parse as JSON envelope; err={err}; stdout={text}")
})
}
fn init(tmp: &Path) -> PathBuf {
let out = run_in(tmp, &["init"]);
assert_exit(&out, 0);
let stdout = String::from_utf8_lossy(&out.stdout);
let db_line = stdout
.lines()
.find(|line| line.starts_with("cortex init: db"))
.expect("init stdout includes db path");
let path = db_line
.split_once('=')
.expect("db line has equals")
.1
.trim()
.split_once(" (")
.expect("db line has status suffix")
.0;
PathBuf::from(path)
}
fn at(second: u32) -> chrono::DateTime<Utc> {
Utc.with_ymd_and_hms(2026, 5, 13, 12, 0, second).unwrap()
}
fn ensure_source_event(pool: &Connection, second: u32) {
let event_id = "evt_01ARZ3NDEKTSV4RRFFQ69G5FAV".parse().unwrap();
let repo = EventRepo::new(pool);
if repo
.get_by_id(&event_id)
.expect("query source event")
.is_some()
{
return;
}
repo.append(&Event {
id: event_id,
schema_version: SCHEMA_VERSION,
observed_at: at(second),
recorded_at: at(second),
source: EventSource::Tool {
name: "decay-test".into(),
},
event_type: EventType::ToolResult,
trace_id: None,
session_id: Some("decay-test".into()),
domain_tags: vec!["test".into()],
payload: json!({"source": "decay-test", "second": second}),
payload_hash: format!("payload-decay-{second}"),
prev_event_hash: None,
event_hash: format!("event-decay-{second}"),
})
.expect("append source event");
}
fn seed_candidate_memory(db_path: &Path, claim: &str, second: u32) -> MemoryId {
let pool = Connection::open(db_path).expect("open initialised sqlite db");
apply_pending(&pool).expect("apply migrations");
ensure_source_event(&pool, second);
let id = MemoryId::new();
let candidate = MemoryCandidate {
id,
memory_type: "semantic".into(),
claim: claim.into(),
source_episodes_json: json!([]),
source_events_json: json!(["evt_01ARZ3NDEKTSV4RRFFQ69G5FAV"]),
domains_json: json!(["decay-test"]),
salience_json: json!({"score": 0.5}),
confidence: 0.75,
authority: "candidate".into(),
applies_when_json: json!({}),
does_not_apply_when_json: json!([]),
created_at: at(second),
updated_at: at(second),
};
MemoryRepo::new(&pool)
.insert_candidate(&candidate)
.expect("insert candidate memory");
id
}
fn schedule_candidate_compression(tmp: &Path, memory_ids: &str) -> serde_json::Value {
let out = run_in(
tmp,
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
memory_ids,
],
);
assert_exit(&out, 0);
parse_envelope(&out.stdout)
}
#[test]
fn decay_schedule_candidate_compression_emits_job_id() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let m2 = seed_candidate_memory(&db_path, "beta", 2);
let ids = format!("{m1},{m2}");
let envelope = schedule_candidate_compression(tmp.path(), &ids);
assert_eq!(envelope["command"], "cortex.decay.schedule");
assert_eq!(envelope["exit_code"], 0);
assert_eq!(envelope["outcome"], "ok");
let job_id = envelope["report"]["job_id"].as_str().expect("job_id");
assert!(
job_id.starts_with("dcy_"),
"decay job ids must carry the `dcy_` prefix; got `{job_id}`"
);
assert_eq!(envelope["report"]["kind"], "candidate_compression");
assert_eq!(envelope["report"]["state"], "pending");
assert_eq!(
envelope["report"]["summary_method"],
"deterministic_concatenate"
);
let refs = envelope["report"]["source_ids"]
.as_array()
.expect("source_ids is array");
assert_eq!(refs.len(), 2);
assert_eq!(refs[0].as_str().unwrap(), m1.to_string());
assert_eq!(refs[1].as_str().unwrap(), m2.to_string());
}
#[test]
fn decay_schedule_episode_compression_emits_job_id() {
let tmp = tempfile::tempdir().unwrap();
init(tmp.path());
let ep1 = cortex_core::EpisodeId::new();
let ep2 = cortex_core::EpisodeId::new();
let ids = format!("{ep1},{ep2}");
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"episode-compression",
"--episode-ids",
&ids,
],
);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.schedule");
assert_eq!(envelope["report"]["kind"], "episode_compression");
assert_eq!(envelope["report"]["state"], "pending");
}
#[test]
fn decay_schedule_expired_principle_review_emits_job_id() {
let tmp = tempfile::tempdir().unwrap();
init(tmp.path());
let prn = cortex_core::PrincipleId::new();
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"expired-principle-review",
"--principle-id",
&prn.to_string(),
],
);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.schedule");
assert_eq!(envelope["report"]["kind"], "expired_principle_review");
assert_eq!(envelope["report"]["summary_method"], "none");
}
#[test]
fn decay_schedule_refuses_past_scheduled_for_without_run_immediately() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "stale", 1);
let past = (Utc::now() - Duration::hours(1)).to_rfc3339();
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
&m1.to_string(),
"--scheduled-for",
&past,
],
);
assert_exit(&out, 7);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.schedule");
assert_eq!(envelope["exit_code"], 7);
assert_eq!(
envelope["report"]["invariant"],
"decay.schedule.scheduled_for_must_be_present_or_future"
);
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
&m1.to_string(),
"--scheduled-for",
&past,
"--run-immediately",
],
);
assert_exit(&out, 0);
}
#[test]
fn decay_list_filters_by_state() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let m2 = seed_candidate_memory(&db_path, "beta", 2);
let first = schedule_candidate_compression(tmp.path(), &m1.to_string());
let _second = schedule_candidate_compression(tmp.path(), &m2.to_string());
let first_id = first["report"]["job_id"].as_str().unwrap().to_string();
let out = run_in(tmp.path(), &["--json", "decay", "list"]);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.list");
assert_eq!(envelope["report"]["job_count"], 2);
let cancel = run_in(
tmp.path(),
&[
"decay",
"cancel",
&first_id,
"--reason",
"test cancellation",
],
);
assert_exit(&cancel, 0);
let out = run_in(
tmp.path(),
&["--json", "decay", "list", "--state", "pending"],
);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
let filter = envelope["report"]["state_filter"]
.as_array()
.expect("state_filter array");
assert_eq!(filter.len(), 1);
assert_eq!(filter[0].as_str().unwrap(), "pending");
assert_eq!(envelope["report"]["job_count"], 1);
let out = run_in(
tmp.path(),
&["--json", "decay", "list", "--state", "cancelled"],
);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["report"]["job_count"], 1);
assert_eq!(envelope["report"]["jobs"][0]["id"], first_id);
assert_eq!(envelope["report"]["jobs"][0]["state"], "cancelled");
}
#[test]
fn decay_run_specific_job_transitions_to_completed() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let m2 = seed_candidate_memory(&db_path, "beta", 2);
let ids = format!("{m1},{m2}");
let scheduled = schedule_candidate_compression(tmp.path(), &ids);
let job_id = scheduled["report"]["job_id"].as_str().unwrap().to_string();
let out = run_in(tmp.path(), &["--json", "decay", "run", "--job-id", &job_id]);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.run");
assert_eq!(envelope["report"]["job_id"], job_id);
assert_eq!(envelope["report"]["to_state"], "completed");
let result = envelope["report"]["result_memory_id"]
.as_str()
.expect("result_memory_id is set on completion");
assert!(
result.starts_with("mem_"),
"result memory id should be a `mem_` ULID; got `{result}`"
);
let out = run_in(tmp.path(), &["--json", "decay", "status", &job_id]);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["report"]["state"], "completed");
let superseded = envelope["report"]["superseded_memory_ids"]
.as_array()
.expect("superseded_memory_ids is array");
let superseded_ids: Vec<&str> = superseded
.iter()
.map(|v| v.as_str().expect("id is string"))
.collect();
assert!(
superseded_ids.contains(&m1.to_string().as_str()),
"expected superseded set to include source m1; got {superseded_ids:?}"
);
assert!(
superseded_ids.contains(&m2.to_string().as_str()),
"expected superseded set to include source m2; got {superseded_ids:?}"
);
}
#[test]
fn decay_run_next_pending_when_queue_empty_emits_diagnostic() {
let tmp = tempfile::tempdir().unwrap();
init(tmp.path());
let out = run_in(tmp.path(), &["--json", "decay", "run", "--next-pending"]);
assert_exit(&out, 7);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.run");
assert_eq!(envelope["exit_code"], 7);
assert_eq!(envelope["report"]["invariant"], "decay.run.no_pending_jobs");
}
#[test]
fn decay_cancel_pending_job_transitions_to_cancelled() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let scheduled = schedule_candidate_compression(tmp.path(), &m1.to_string());
let job_id = scheduled["report"]["job_id"].as_str().unwrap().to_string();
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"cancel",
&job_id,
"--reason",
"operator changed mind",
],
);
assert_exit(&out, 0);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.cancel");
assert_eq!(envelope["report"]["from_state"], "pending");
assert_eq!(envelope["report"]["to_state"], "cancelled");
assert_eq!(envelope["report"]["reason"], "operator changed mind");
let status = run_in(tmp.path(), &["--json", "decay", "status", &job_id]);
assert_exit(&status, 0);
let envelope = parse_envelope(&status.stdout);
assert_eq!(envelope["report"]["state"], "cancelled");
}
#[test]
fn decay_cancel_completed_job_refuses_with_stable_invariant() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let scheduled = schedule_candidate_compression(tmp.path(), &m1.to_string());
let job_id = scheduled["report"]["job_id"].as_str().unwrap().to_string();
let out = run_in(tmp.path(), &["decay", "run", "--job-id", &job_id]);
assert_exit(&out, 0);
let out = run_in(
tmp.path(),
&["--json", "decay", "cancel", &job_id, "--reason", "too late"],
);
assert_exit(&out, 7);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.cancel");
assert_eq!(envelope["exit_code"], 7);
assert_eq!(
envelope["report"]["invariant"],
"decay.cancel.terminal_state"
);
}
#[test]
fn decay_cancel_cancelled_job_refuses_with_stable_invariant() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let scheduled = schedule_candidate_compression(tmp.path(), &m1.to_string());
let job_id = scheduled["report"]["job_id"].as_str().unwrap().to_string();
let out = run_in(
tmp.path(),
&["decay", "cancel", &job_id, "--reason", "first cancel"],
);
assert_exit(&out, 0);
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"cancel",
&job_id,
"--reason",
"second cancel",
],
);
assert_exit(&out, 7);
let envelope = parse_envelope(&out.stdout);
assert_eq!(
envelope["report"]["invariant"],
"decay.cancel.terminal_state"
);
}
#[test]
fn decay_run_llm_summary_refuses_without_operator_attestation() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let attestation_path = tmp.path().join("attestation.json");
std::fs::write(&attestation_path, "{\"purpose\":\"placeholder\"}").unwrap();
let scheduled = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
&m1.to_string(),
"--summary-method",
"llm",
"--operator-attestation",
attestation_path.to_str().unwrap(),
],
);
assert_exit(&scheduled, 0);
let envelope = parse_envelope(&scheduled.stdout);
let job_id = envelope["report"]["job_id"].as_str().unwrap().to_string();
let out = run_in(tmp.path(), &["--json", "decay", "run", "--job-id", &job_id]);
assert_exit(&out, 7);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.run");
assert_eq!(envelope["exit_code"], 7);
assert_eq!(
envelope["report"]["invariant"],
"decay.run.operator_attestation_required_for_llm"
);
}
#[test]
fn decay_schedule_llm_without_attestation_refuses_at_schedule_time() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
&m1.to_string(),
"--summary-method",
"llm",
],
);
assert_exit(&out, 7);
let envelope = parse_envelope(&out.stdout);
assert_eq!(envelope["command"], "cortex.decay.schedule");
assert_eq!(
envelope["report"]["invariant"],
"decay.run.operator_attestation_required_for_llm"
);
}
#[test]
fn decay_status_for_unknown_job_returns_precondition_unmet() {
let tmp = tempfile::tempdir().unwrap();
init(tmp.path());
let nonexistent = cortex_core::DecayJobId::new().to_string();
let out = run_in(tmp.path(), &["--json", "decay", "status", &nonexistent]);
assert_exit(&out, 7);
}
#[test]
fn decay_status_for_invalid_id_returns_usage() {
let tmp = tempfile::tempdir().unwrap();
init(tmp.path());
let out = run_in(tmp.path(), &["--json", "decay", "status", "not_a_valid_id"]);
assert_exit(&out, 2);
}
fn write_signed_attestation(dir: &Path, job_id: &str, model: &str, prompt_digest: &str) -> PathBuf {
let signing_key = SigningKey::from_bytes(&[42u8; 32]);
let mut s = String::with_capacity(signing_key.verifying_key().as_bytes().len() * 2);
for b in signing_key.verifying_key().as_bytes() {
s.push_str(&format!("{b:02x}"));
}
let verifying_key_hex = s;
let envelope_no_sig = LlmSummaryOperatorAttestationEnvelope {
schema_version: DECAY_LLM_SUMMARY_ATTESTATION_SCHEMA_VERSION,
purpose: DECAY_LLM_SUMMARY_ATTESTATION_PURPOSE.into(),
operator_verifying_key_hex: verifying_key_hex,
operator_key_id: "cortex-operator-cli-test".into(),
signed_at: Utc::now().to_rfc3339(),
decay_job_id: job_id.to_string(),
model_name: model.into(),
prompt_template_blake3: prompt_digest.into(),
signature_hex: String::new(),
};
let input = canonical_signing_input(&envelope_no_sig);
let signature = signing_key.sign(&input);
let mut sig_hex = String::with_capacity(64 * 2);
for b in signature.to_bytes() {
sig_hex.push_str(&format!("{b:02x}"));
}
let envelope_json = json!({
"schema_version": envelope_no_sig.schema_version,
"purpose": envelope_no_sig.purpose,
"operator_verifying_key_hex": envelope_no_sig.operator_verifying_key_hex,
"operator_key_id": envelope_no_sig.operator_key_id,
"signed_at": envelope_no_sig.signed_at,
"decay_job_id": envelope_no_sig.decay_job_id,
"model_name": envelope_no_sig.model_name,
"prompt_template_blake3": envelope_no_sig.prompt_template_blake3,
"signature_hex": sig_hex,
});
let path = dir.join("attestation.json");
std::fs::write(&path, serde_json::to_string(&envelope_json).unwrap()).unwrap();
path
}
fn write_summary_fixture(
dir: &Path,
request: SummaryRequest,
response: SummaryResponse,
) -> PathBuf {
let fixture = ReplaySummaryFixture {
entries: vec![ReplaySummaryFixtureEntry { request, response }],
};
let path = dir.join("fixture.json");
std::fs::write(&path, serde_json::to_string_pretty(&fixture).unwrap()).unwrap();
path
}
const SUMMARY_MAX_CLAIM_BYTES: usize = 4096;
#[test]
fn decay_run_llm_summary_with_fixture_backend_completes() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha source claim", 1);
let m2 = seed_candidate_memory(&db_path, "beta source claim", 2);
let ids = format!("{m1},{m2}");
let placeholder_attest = tmp.path().join("sched-attest.json");
std::fs::write(&placeholder_attest, "{\"purpose\":\"placeholder\"}").unwrap();
let scheduled = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
&ids,
"--summary-method",
"llm",
"--operator-attestation",
placeholder_attest.to_str().unwrap(),
"--run-immediately",
],
);
assert_exit(&scheduled, 0);
let envelope = parse_envelope(&scheduled.stdout);
let job_id = envelope["report"]["job_id"].as_str().unwrap().to_string();
let model = "claude-sonnet-4-7@1";
let prompt_digest = "blake3:0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20";
let summary_text = "LLM-produced summary covering alpha and beta sources.";
let attest_path = write_signed_attestation(tmp.path(), &job_id, model, prompt_digest);
let request = SummaryRequest {
model_name: model.into(),
prompt_template_blake3: prompt_digest.into(),
source_claims: vec![
"alpha source claim".to_string(),
"beta source claim".to_string(),
],
max_output_bytes: Some(SUMMARY_MAX_CLAIM_BYTES),
decay_job_id: Some(job_id.clone()),
};
let response = SummaryResponse {
claim: summary_text.into(),
token_usage: None,
model_name_echoed: model.into(),
};
let fixture_path = write_summary_fixture(tmp.path(), request, response);
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"run",
"--job-id",
&job_id,
"--operator-attestation",
attest_path.to_str().unwrap(),
"--summary-backend-fixture",
fixture_path.to_str().unwrap(),
],
);
assert_exit(&out, 0);
let run_envelope = parse_envelope(&out.stdout);
assert_eq!(run_envelope["command"], "cortex.decay.run");
assert_eq!(run_envelope["exit_code"], 0);
assert_eq!(run_envelope["report"]["to_state"], "completed");
let result_memory_id = run_envelope["report"]["result_memory_id"]
.as_str()
.expect("result_memory_id present on completed LLM-summary run");
assert!(
result_memory_id.starts_with("mem_"),
"result memory id should carry `mem_` prefix; got `{result_memory_id}`"
);
}
#[test]
fn decay_run_llm_summary_with_invalid_fixture_path_refuses() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let placeholder_attest = tmp.path().join("sched-attest.json");
std::fs::write(&placeholder_attest, "{\"purpose\":\"placeholder\"}").unwrap();
let scheduled = run_in(
tmp.path(),
&[
"--json",
"decay",
"schedule",
"--kind",
"candidate-compression",
"--memory-ids",
&m1.to_string(),
"--summary-method",
"llm",
"--operator-attestation",
placeholder_attest.to_str().unwrap(),
"--run-immediately",
],
);
assert_exit(&scheduled, 0);
let scheduled_env = parse_envelope(&scheduled.stdout);
let job_id = scheduled_env["report"]["job_id"]
.as_str()
.unwrap()
.to_string();
let attest_path = write_signed_attestation(
tmp.path(),
&job_id,
"claude-sonnet-4-7@1",
"blake3:0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20",
);
let nonexistent = tmp.path().join("does-not-exist.json");
let out = run_in(
tmp.path(),
&[
"--json",
"decay",
"run",
"--job-id",
&job_id,
"--operator-attestation",
attest_path.to_str().unwrap(),
"--summary-backend-fixture",
nonexistent.to_str().unwrap(),
],
);
assert_exit(&out, 7);
let run_envelope = parse_envelope(&out.stdout);
assert_eq!(run_envelope["command"], "cortex.decay.run");
assert_eq!(run_envelope["exit_code"], 7);
assert_eq!(
run_envelope["report"]["invariant"],
"decay.run.summary_backend_fixture_invalid"
);
}
#[test]
fn decay_json_envelope_carries_canonical_command_and_exit_code() {
let tmp = tempfile::tempdir().unwrap();
let db_path = init(tmp.path());
let m1 = seed_candidate_memory(&db_path, "alpha", 1);
let envelope = schedule_candidate_compression(tmp.path(), &m1.to_string());
assert_eq!(envelope["command"], "cortex.decay.schedule");
assert_eq!(envelope["exit_code"], 0);
assert_eq!(envelope["outcome"], "ok");
assert_eq!(envelope["report"]["kind"], "candidate_compression");
assert_eq!(envelope["report"]["state"], "pending");
assert!(envelope["report"]["scheduled_for"].is_string());
}