use std::collections::{BTreeMap, BTreeSet};
use std::path::Path;
use std::process::ExitCode;
use anyhow::{bail, Result};
use serde::{Deserialize, Serialize};
use crate::commands::{
check,
start::{StartRenderSection, START_RENDER_SECTION_ORDER},
};
use crate::extensions;
use crate::handoff::{self, BranchMode, GitState};
use crate::memory::{entries as memory_entries, promote as memory_promote};
use crate::output::CommandReport;
use crate::paths::state::StateLayout;
use crate::profile::{self, DEFAULT_PROFILE};
use crate::repo::marker as repo_marker;
use crate::session_boundary::{SessionBoundaryAction, SessionBoundaryRecommendation};
use crate::state::compiled as compiled_state;
use crate::state::escalation as escalation_state;
use crate::state::projection_metadata;
use crate::state::runtime as runtime_state;
use crate::state::session as session_state;
use crate::state::session_gates;
use crate::telemetry::{cost as telemetry_cost, host as host_telemetry};
use tracing::{debug, info_span};
const RADAR_STATE_SCHEMA_VERSION: u32 = 21;
const CONTEXT_CHECK_SCHEMA_VERSION: u32 = 1;
const CANONICAL_PROMPT_SURFACE_ORDER: &[StartRenderSection] = &[
StartRenderSection::EffectivePolicy,
StartRenderSection::EffectiveMemory,
StartRenderSection::Backlog,
StartRenderSection::ExecutionGates,
StartRenderSection::Handoff,
];
const CANONICAL_COMPILED_SURFACE_ORDER: &[&str] = &[
"effective_memory",
"workflow_signal",
"handoff",
"execution_gates",
"escalation",
"recovery",
"git_state",
"session_state",
];
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) enum ContextCheckTrigger {
Interval,
Manual,
PreCompaction,
IdleReset,
Resume,
SupervisorPoll,
}
impl ContextCheckTrigger {
fn as_str(self) -> &'static str {
match self {
Self::Interval => "interval",
Self::Manual => "manual",
Self::PreCompaction => "pre_compaction",
Self::IdleReset => "idle_reset",
Self::Resume => "resume",
Self::SupervisorPoll => "supervisor_poll",
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
#[serde(rename_all = "snake_case")]
enum ContextCheckAction {
None,
InjectDelta,
CheckpointNow,
FlushForCompaction,
WrapUpRequired,
Escalate,
}
impl ContextCheckAction {
fn as_str(self) -> &'static str {
match self {
Self::None => "none",
Self::InjectDelta => "inject_delta",
Self::CheckpointNow => "checkpoint_now",
Self::FlushForCompaction => "flush_for_compaction",
Self::WrapUpRequired => "wrap_up_required",
Self::Escalate => "escalate",
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
#[serde(rename_all = "snake_case")]
enum ContextCheckUrgency {
Low,
Moderate,
High,
Critical,
}
impl ContextCheckUrgency {
fn as_str(self) -> &'static str {
match self {
Self::Low => "low",
Self::Moderate => "moderate",
Self::High => "high",
Self::Critical => "critical",
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
enum DriftAggregateStatus {
Aligned,
NeedsRecalibration,
NoSignal,
}
impl DriftAggregateStatus {
fn as_str(self) -> &'static str {
match self {
Self::Aligned => "aligned",
Self::NeedsRecalibration => "needs_recalibration",
Self::NoSignal => "no_signal",
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
enum DriftSignalStatus {
Aligned,
Drift,
NoSignal,
}
impl DriftSignalStatus {
fn as_str(self) -> &'static str {
match self {
Self::Aligned => "aligned",
Self::Drift => "drift",
Self::NoSignal => "no_signal",
}
}
}
#[derive(Serialize)]
pub struct RadarStateReport {
command: &'static str,
mode: &'static str,
schema_version: u32,
ok: bool,
path: String,
profile: String,
project_id: String,
locality_id: String,
handoff: HandoffState,
workflow_signal: WorkflowSignalState,
execution_gates: session_gates::ExecutionGatesView,
effective_memory: MemoryState,
repo_native_checks: RepoNativeChecksState,
git: Option<GitState>,
checkout_state: Option<handoff::CheckoutStateView>,
session_state: RadarSessionStateView,
recovery: runtime_state::RuntimeRecoveryView,
escalation: escalation_state::EscalationView,
context_health: ContextHealth,
session_boundary: SessionBoundaryRecommendation,
behavioral_drift: BehavioralDriftState,
evaluation: RadarEvaluation,
workflow_hints: Vec<WorkflowHint>,
candidate_updates: CandidateUpdates,
approval_intents: Vec<&'static str>,
approval_steps: Vec<ApprovalStep>,
}
#[derive(Serialize)]
pub struct ContextCheckReport {
command: &'static str,
schema_version: u32,
ok: bool,
path: String,
profile: String,
project_id: String,
locality_id: String,
trigger: ContextCheckTrigger,
action: ContextCheckAction,
reason: &'static str,
recommendation: String,
urgency: ContextCheckUrgency,
throttle: ContextCheckThrottle,
payload: ContextCheckPayload,
#[serde(skip_serializing_if = "Vec::is_empty")]
evidence: Vec<String>,
session_state: RadarSessionStateView,
execution_gates: session_gates::ExecutionGatesView,
recovery: runtime_state::RuntimeRecoveryView,
escalation: escalation_state::EscalationView,
context_health: ContextHealth,
session_boundary: SessionBoundaryRecommendation,
behavioral_drift: BehavioralDriftState,
}
#[derive(Serialize)]
struct ContextCheckThrottle {
suppressed: bool,
next_interval_hint_seconds: u64,
}
#[derive(Serialize)]
struct ContextCheckPayload {
policy_digest: String,
focus_digest: String,
state_delta: String,
risk_summary: String,
}
#[derive(Serialize)]
struct HandoffState {
path: String,
title: String,
immediate_actions: Vec<String>,
current_system_state: Vec<String>,
definition_of_done: Vec<String>,
}
#[derive(Serialize)]
struct WorkflowSignalState {
path: String,
status: &'static str,
items: Vec<String>,
}
#[derive(Serialize)]
struct RadarSessionStateView {
path: String,
status: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
session_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
mode: Option<session_state::SessionMode>,
#[serde(skip_serializing_if = "Option::is_none")]
start_count: Option<u32>,
#[serde(flatten)]
lifecycle: session_state::SessionLifecycleProjection,
}
#[derive(Serialize)]
struct MemoryState {
status: &'static str,
profile_path: String,
project_path: String,
locality_path: String,
pod_path: String,
work_stream_path: String,
branch_path: String,
workspace_path: String,
clone_path: String,
content: String,
structured: memory_entries::StructuredMemoryView,
#[serde(skip_serializing)]
contributing_paths: Vec<String>,
}
#[derive(Serialize)]
struct RepoNativeChecksState {
surface: check::CheckSurfaceView,
failures: usize,
checks: Vec<RepoNativeCheckResult>,
}
#[derive(Serialize)]
struct RepoNativeCheckResult {
id: String,
path: String,
status: &'static str,
severity: &'static str,
message: String,
duration_ms: u64,
#[serde(skip_serializing_if = "Option::is_none")]
exit_code: Option<i32>,
}
#[derive(Serialize)]
struct ContextHealth {
source: &'static str,
context_used_pct: Option<u8>,
degradation_risk_pct: Option<u8>,
band: &'static str,
confidence: &'static str,
signals: ContextSignals,
cost: telemetry_cost::TelemetryCostView,
recommendation: &'static str,
}
#[derive(Serialize)]
struct ContextSignals {
compacted: Option<bool>,
session_minutes: Option<u64>,
ccd_start_cycles_in_conversation: Option<u32>,
host_total_tokens: Option<u64>,
host_context_window_tokens: Option<u64>,
tool_output_kb: Option<u64>,
operator_symptoms: Vec<String>,
}
#[derive(Serialize)]
struct BehavioralDriftState {
status: DriftAggregateStatus,
summary: String,
evidence: Vec<String>,
recommended_corrections: Vec<String>,
signals: Vec<BehavioralDriftSignal>,
}
#[derive(Serialize)]
struct BehavioralDriftSignal {
id: &'static str,
status: DriftSignalStatus,
summary: String,
evidence: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
recommended_correction: Option<String>,
}
struct ProjectionObservationLoad {
observations: Vec<projection_metadata::ProjectionObservation>,
warnings: Vec<String>,
}
struct BehavioralDriftInputs<'a> {
layout: &'a StateLayout,
locality_id: &'a str,
git: Option<&'a GitState>,
handoff: &'a HandoffState,
handoff_contents: &'a str,
tracked_session: Option<&'a session_state::SessionStateFile>,
extension_signals: Vec<extensions::RadarBehavioralDriftSignal>,
}
struct EvaluationInputs<'a> {
git: Option<&'a GitState>,
candidates: &'a CandidateUpdates,
execution_gates: &'a session_gates::ExecutionGatesView,
workflow_guidance: Option<&'a crate::extensions::RadarWorkflowGuidance>,
effective_memory: &'a MemoryState,
repo_native_checks: &'a RepoNativeChecksState,
behavioral_drift: &'a BehavioralDriftState,
escalation_view: &'a escalation_state::EscalationView,
}
struct LoadedHandoffState {
handoff: HandoffState,
contents: String,
}
struct RadarSnapshot {
report: RadarStateReport,
policy_digest: String,
projection_digests: compiled_state::ProjectionDigests,
}
#[derive(Serialize)]
struct RadarEvaluation {
next_step: EvaluationBucket,
backlog: EvaluationBucket,
memory: EvaluationBucket,
wrap_up: EvaluationBucket,
}
#[derive(Serialize)]
struct EvaluationBucket {
status: &'static str,
summary: String,
evidence: Vec<String>,
}
#[derive(Serialize)]
struct WorkflowHint {
id: &'static str,
summary: String,
reasons: Vec<String>,
}
#[derive(Serialize)]
struct CandidateUpdates {
handoff: Vec<CandidateUpdate>,
backlog: Vec<CandidateUpdate>,
memory: Vec<CandidateUpdate>,
}
#[derive(Serialize)]
struct CandidateUpdate {
kind: &'static str,
summary: String,
#[serde(skip_serializing_if = "Option::is_none")]
section: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
replacement_lines: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
entry_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
source_scope: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
suggested_destination: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
action: Option<CandidateAction>,
}
#[derive(Serialize)]
struct CandidateAction {
kind: &'static str,
argv: Vec<String>,
note: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
apply: Option<CandidateActionApply>,
}
#[derive(Serialize)]
struct CandidateActionApply {
argv: Vec<String>,
requirements: Vec<CandidateActionRequirement>,
note: &'static str,
}
#[derive(Serialize)]
struct CandidateActionRequirement {
id: &'static str,
flag: &'static str,
value_type: &'static str,
required: bool,
allowed_values: Vec<&'static str>,
note: &'static str,
}
#[derive(Serialize)]
struct ApprovalStep {
id: &'static str,
question: &'static str,
recommended_answer: &'static str,
recommendation: String,
evidence: Vec<String>,
}
impl RadarEvaluation {
fn default_empty() -> Self {
let empty_bucket = || EvaluationBucket {
status: "no_change",
summary: String::new(),
evidence: Vec::new(),
};
Self {
next_step: empty_bucket(),
backlog: empty_bucket(),
memory: empty_bucket(),
wrap_up: empty_bucket(),
}
}
}
impl CandidateUpdates {
fn default_empty() -> Self {
Self {
handoff: Vec::new(),
backlog: Vec::new(),
memory: Vec::new(),
}
}
}
impl RepoNativeChecksState {
fn default_empty(repo_root: &Path) -> Self {
let surface_path = repo_root.join(check::COMMANDS_DIR);
Self {
surface: check::CheckSurfaceView {
path: surface_path.display().to_string(),
status: "skipped",
prefix: check::CHECK_PREFIX,
entries: Vec::new(),
note: None,
},
failures: 0,
checks: Vec::new(),
}
}
}
impl CommandReport for RadarStateReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!("Handoff: {}", self.handoff.title);
if !self.handoff.immediate_actions.is_empty() {
println!("Immediate actions:");
for (index, step) in self.handoff.immediate_actions.iter().enumerate() {
println!("{}. {step}", index + 1);
}
}
if let Some(git) = &self.git {
let upstream = git.upstream.as_deref().unwrap_or("no upstream");
println!(
"Git: {} at {} (tracking {}; ahead {}, behind {}; clean: {})",
git.branch, git.head, upstream, git.ahead, git.behind, git.clean
);
} else {
println!("Git: unavailable by design for directory-substrate workspaces");
}
if let Some(checkout_state) = self.checkout_state.as_ref().filter(|state| state.advisory) {
println!("Checkout state: {}", checkout_state.summary);
}
if let Some(mode) = self
.session_state
.mode
.filter(|m| *m != session_state::SessionMode::General)
{
println!(
"Session: {} ({}, start_count={})",
mode.as_str(),
self.session_state.status,
self.session_state.start_count.unwrap_or(0)
);
}
if !self.workflow_signal.items.is_empty() {
println!("Workflow signal:");
for item in &self.workflow_signal.items {
println!("- {item}");
}
}
if self.execution_gates.total_count > 0 {
println!(
"Execution gates: {} total, {} unfinished",
self.execution_gates.total_count, self.execution_gates.unfinished_count
);
if let Some(anchor) = &self.execution_gates.attention_anchor {
println!(
"- [{} #{}/{}] {}",
anchor.status.as_str(),
anchor.index,
self.execution_gates.total_count,
anchor.text
);
}
}
if !self.repo_native_checks.checks.is_empty()
|| self.repo_native_checks.surface.note.is_some()
{
println!("Repo-native checks:");
if let Some(note) = &self.repo_native_checks.surface.note {
println!("- {note}");
}
for check in &self.repo_native_checks.checks {
println!("- {} [{}]: {}", check.id, check.status, check.message);
}
}
print_context_health(&self.context_health);
println!(
"Session boundary: {} ({})",
self.session_boundary.action.as_str(),
self.session_boundary.summary
);
for evidence in &self.session_boundary.evidence {
println!("- {evidence}");
}
print_behavioral_drift(&self.behavioral_drift);
println!("Evaluation:");
print_bucket("Next step", &self.evaluation.next_step);
print_bucket("Backlog", &self.evaluation.backlog);
print_bucket("Memory", &self.evaluation.memory);
print_bucket("Wrap up", &self.evaluation.wrap_up);
if !self.workflow_hints.is_empty() {
println!("Workflow hints:");
for hint in &self.workflow_hints {
println!("- {}: {}", hint.id, hint.summary);
for reason in &hint.reasons {
println!(" - {reason}");
}
}
}
if !self.candidate_updates.handoff.is_empty()
|| !self.candidate_updates.backlog.is_empty()
|| !self.candidate_updates.memory.is_empty()
{
println!("Candidate updates:");
print_candidates("Handoff", &self.candidate_updates.handoff);
print_candidates("Backlog", &self.candidate_updates.backlog);
print_candidates("Memory", &self.candidate_updates.memory);
}
}
}
impl CommandReport for ContextCheckReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!(
"Context check: {} ({}, trigger {}, reason {})",
self.action.as_str(),
self.urgency.as_str(),
self.trigger.as_str(),
self.reason
);
println!("{}", self.payload.risk_summary);
if self.action == ContextCheckAction::InjectDelta {
println!("Policy digest: {}", self.payload.policy_digest);
println!("Focus digest: {}", self.payload.focus_digest);
println!("State delta: {}", self.payload.state_delta);
}
if self.throttle.suppressed {
println!(
"Throttle: suppressed; next interval hint {}s",
self.throttle.next_interval_hint_seconds
);
}
}
}
pub fn run(
repo_root: &Path,
explicit_profile: Option<&str>,
lightweight: bool,
) -> Result<RadarStateReport> {
let _span = info_span!("radar_state").entered();
Ok(build_radar_snapshot(repo_root, explicit_profile, lightweight)?.report)
}
pub fn run_with_digests(
repo_root: &Path,
explicit_profile: Option<&str>,
lightweight: bool,
) -> Result<(RadarStateReport, compiled_state::ProjectionDigests)> {
let _span = info_span!("radar_state").entered();
let snapshot = build_radar_snapshot(repo_root, explicit_profile, lightweight)?;
Ok((snapshot.report, snapshot.projection_digests))
}
fn build_radar_snapshot(
repo_root: &Path,
explicit_profile: Option<&str>,
lightweight: bool,
) -> Result<RadarSnapshot> {
let profile = profile::resolve(explicit_profile)?;
let profile_name = profile.to_string();
let layout = StateLayout::resolve(repo_root, profile.clone())?;
if !layout.profile_root().is_dir() {
bail!(
"profile `{}` does not exist at {}; bootstrap it with `ccd attach` before using `ccd radar-state`",
layout.profile(),
layout.profile_root().display()
);
}
let marker = repo_marker::load(repo_root)?.ok_or_else(|| {
anyhow::anyhow!(
"repo is not linked: {} is missing; bootstrap this clone with `ccd attach` or `ccd link` first",
repo_root.join(repo_marker::MARKER_FILE).display()
)
})?;
let git = if layout.resolved_substrate().is_git() {
Some(handoff::read_git_state(
repo_root,
BranchMode::AllowDetachedHead,
)?)
} else {
None
};
let checkout_state = git
.as_ref()
.map(|git| handoff::checkout_state_view(repo_root, git));
let runtime = runtime_state::load_runtime_state(repo_root, &layout, &marker.locality_id)?;
debug!("runtime state loaded");
let tracked_session = session_state::load_for_layout(&layout)?;
let tracked_activity = session_state::load_activity_for_layout(&layout)?;
let active_session_id = session_state::load_session_id(&layout)?;
let session_state_view =
build_session_state_view(&layout, tracked_session.as_ref(), tracked_activity.as_ref());
let loaded_handoff = read_handoff(
repo_root,
&layout,
&runtime,
git.as_ref(),
active_session_id.as_deref(),
&session_state_view,
)?;
let workflow_guidance =
extensions::radar_workflow_guidance(&layout, repo_root, &marker.locality_id, true)?;
let extension_drift_signals = extensions::radar_behavioral_drift_signals(
&layout,
repo_root,
&marker.locality_id,
true,
git.as_ref(),
&loaded_handoff.handoff.title,
&loaded_handoff.handoff.immediate_actions,
active_session_id.as_deref(),
)?;
let workflow_signal = read_workflow_signal(&layout, &marker.locality_id)?;
let execution_gates = read_execution_gates(&runtime);
let effective_memory = read_effective_memory(&runtime);
let escalation_entries = escalation_state::load_for_layout(&layout)?;
let escalation_view = escalation_state::build_view(&layout, &escalation_entries);
let recovery = runtime_state::recovery_view(&runtime.recovery);
let context_health = build_context_health(
repo_root,
&layout,
&marker.locality_id,
git.as_ref(),
tracked_session.as_ref(),
active_session_id.as_deref(),
)?;
let behavioral_drift = build_behavioral_drift(
repo_root,
BehavioralDriftInputs {
layout: &layout,
locality_id: &marker.locality_id,
git: git.as_ref(),
handoff: &loaded_handoff.handoff,
handoff_contents: &loaded_handoff.contents,
tracked_session: tracked_session.as_ref(),
extension_signals: extension_drift_signals,
},
);
debug!("behavioral drift computed");
let (repo_native_checks, candidate_updates, evaluation, workflow_hints, approval_steps) =
if lightweight {
let repo_native_checks = RepoNativeChecksState::default_empty(repo_root);
let candidate_updates = CandidateUpdates::default_empty();
let evaluation = RadarEvaluation::default_empty();
let workflow_hints = Vec::new();
let approval_steps = Vec::new();
(
repo_native_checks,
candidate_updates,
evaluation,
workflow_hints,
approval_steps,
)
} else {
let repo_native_checks = read_repo_native_checks(repo_root);
let branch_memory_available =
runtime_state::resolve_active_branch_memory_path(
repo_root,
&layout,
&marker.locality_id,
)?
.is_some();
let candidate_updates = build_candidate_updates(
repo_root,
&profile_name,
&loaded_handoff.handoff,
&effective_memory,
tracked_session.as_ref(),
branch_memory_available,
);
let evaluation = build_evaluation(EvaluationInputs {
git: git.as_ref(),
candidates: &candidate_updates,
execution_gates: &execution_gates,
workflow_guidance: workflow_guidance.as_ref(),
effective_memory: &effective_memory,
repo_native_checks: &repo_native_checks,
behavioral_drift: &behavioral_drift,
escalation_view: &escalation_view,
});
let workflow_hints = build_workflow_hints(
&loaded_handoff.handoff,
&session_state_view,
&execution_gates,
&context_health,
&escalation_view,
);
let approval_steps = build_approval_steps(
&evaluation,
&candidate_updates,
workflow_guidance.as_ref(),
&behavioral_drift,
);
(
repo_native_checks,
candidate_updates,
evaluation,
workflow_hints,
approval_steps,
)
};
let session_boundary = build_session_boundary(
checkout_state.as_ref(),
&recovery,
&context_health,
&behavioral_drift,
&evaluation,
&escalation_view,
);
debug!("radar evaluation complete");
let projection_digests = {
let base = compiled_state::preview_for_target(
&runtime,
compiled_state::ProjectionTarget::Session,
)
.map(|store| compiled_state::compute_projection_digests(&store))
.unwrap_or_default();
let eg_json = serde_json::to_string(&execution_gates).unwrap_or_default();
let esc_json = serde_json::to_string(&escalation_view).unwrap_or_default();
let rec_json = serde_json::to_string(&recovery).unwrap_or_default();
let git_json = serde_json::to_string(&git).unwrap_or_default();
let sess_json = serde_json::to_string(&session_state_view).unwrap_or_default();
compiled_state::compute_extended_digests(
&base, &eg_json, &esc_json, &rec_json, &git_json, &sess_json,
)
};
let command_name = if lightweight {
"checkpoint"
} else {
"radar-state"
};
let mode_name = if lightweight {
"checkpoint"
} else {
"radar"
};
Ok(RadarSnapshot {
report: RadarStateReport {
command: command_name,
mode: mode_name,
schema_version: RADAR_STATE_SCHEMA_VERSION,
ok: true,
path: repo_root.display().to_string(),
profile: profile_name,
project_id: marker.locality_id.clone(),
locality_id: marker.locality_id,
handoff: loaded_handoff.handoff,
workflow_signal,
execution_gates,
effective_memory,
repo_native_checks,
git,
checkout_state,
session_state: session_state_view,
recovery,
escalation: escalation_view,
context_health,
session_boundary,
behavioral_drift,
evaluation,
workflow_hints,
candidate_updates,
approval_intents: vec![
"approve",
"approve_and_reuse_when_supported",
"reject_and_explain",
],
approval_steps,
},
policy_digest: build_policy_digest(&runtime),
projection_digests,
})
}
pub fn run_context_check(
repo_root: &Path,
explicit_profile: Option<&str>,
trigger: ContextCheckTrigger,
) -> Result<ContextCheckReport> {
let _span = info_span!("context_check").entered();
let snapshot = build_radar_snapshot(repo_root, explicit_profile, false)?;
let radar_report = snapshot.report;
let payload = ContextCheckPayload {
policy_digest: snapshot.policy_digest,
focus_digest: build_focus_digest(&radar_report),
state_delta: build_state_delta(&radar_report),
risk_summary: build_risk_summary(trigger, &radar_report),
};
let decision = build_context_check_decision(trigger, &radar_report, &payload);
Ok(ContextCheckReport {
command: "context-check",
schema_version: CONTEXT_CHECK_SCHEMA_VERSION,
ok: true,
path: radar_report.path,
profile: radar_report.profile,
project_id: radar_report.project_id,
locality_id: radar_report.locality_id,
trigger,
action: decision.action,
reason: decision.reason,
recommendation: decision.recommendation,
urgency: decision.urgency,
throttle: ContextCheckThrottle {
suppressed: decision.suppressed,
next_interval_hint_seconds: decision.next_interval_hint_seconds,
},
payload,
evidence: decision.evidence,
session_state: radar_report.session_state,
execution_gates: radar_report.execution_gates,
recovery: radar_report.recovery,
escalation: radar_report.escalation,
context_health: radar_report.context_health,
session_boundary: radar_report.session_boundary,
behavioral_drift: radar_report.behavioral_drift,
})
}
struct ContextCheckDecision {
action: ContextCheckAction,
reason: &'static str,
recommendation: String,
urgency: ContextCheckUrgency,
suppressed: bool,
next_interval_hint_seconds: u64,
evidence: Vec<String>,
}
fn build_context_check_decision(
trigger: ContextCheckTrigger,
report: &RadarStateReport,
payload: &ContextCheckPayload,
) -> ContextCheckDecision {
if let Some(checkout_state) = report
.checkout_state
.as_ref()
.filter(|state| state.advisory)
{
return ContextCheckDecision {
action: ContextCheckAction::Escalate,
reason: "checkout_advisory",
recommendation: "seek_operator_attention".to_owned(),
urgency: ContextCheckUrgency::High,
suppressed: false,
next_interval_hint_seconds: 0,
evidence: vec![checkout_state.summary.clone()],
};
}
if report.escalation.blocking_count > 0 {
return ContextCheckDecision {
action: ContextCheckAction::Escalate,
reason: "blocking_escalation",
recommendation: "seek_operator_attention".to_owned(),
urgency: ContextCheckUrgency::Critical,
suppressed: false,
next_interval_hint_seconds: 0,
evidence: report
.escalation
.entries
.iter()
.filter(|entry| matches!(entry.kind, escalation_state::EscalationKind::Blocking))
.map(|entry| format!("{}: {}", entry.id, entry.reason))
.collect(),
};
}
if report.behavioral_drift.status == DriftAggregateStatus::NeedsRecalibration {
return ContextCheckDecision {
action: ContextCheckAction::Escalate,
reason: "behavioral_drift",
recommendation: "recalibrate_session".to_owned(),
urgency: ContextCheckUrgency::High,
suppressed: false,
next_interval_hint_seconds: 0,
evidence: report.behavioral_drift.evidence.clone(),
};
}
if matches!(
trigger,
ContextCheckTrigger::PreCompaction | ContextCheckTrigger::IdleReset
) {
let reason = match trigger {
ContextCheckTrigger::PreCompaction => "pre_compaction",
ContextCheckTrigger::IdleReset => "idle_reset",
_ => unreachable!("guarded above"),
};
let mut evidence = vec![payload.risk_summary.clone()];
evidence.extend(report.session_boundary.evidence.iter().take(2).cloned());
return ContextCheckDecision {
action: ContextCheckAction::FlushForCompaction,
reason,
recommendation: "capture_before_compaction".to_owned(),
urgency: ContextCheckUrgency::High,
suppressed: false,
next_interval_hint_seconds: 0,
evidence,
};
}
if matches!(
report.context_health.recommendation,
"wrap_up_soon" | "wrap_up_and_clear"
) {
return ContextCheckDecision {
action: ContextCheckAction::WrapUpRequired,
reason: "wrap_up_window",
recommendation: report.context_health.recommendation.to_owned(),
urgency: match report.context_health.recommendation {
"wrap_up_and_clear" => ContextCheckUrgency::Critical,
_ => ContextCheckUrgency::High,
},
suppressed: false,
next_interval_hint_seconds: 0,
evidence: if report.session_boundary.evidence.is_empty() {
vec![payload.risk_summary.clone()]
} else {
report.session_boundary.evidence.clone()
},
};
}
if report.session_boundary.action == SessionBoundaryAction::Refresh
|| report.evaluation.next_step.status == "review_required"
{
return ContextCheckDecision {
action: ContextCheckAction::CheckpointNow,
reason: "handoff_refresh_needed",
recommendation: "checkpoint_now".to_owned(),
urgency: ContextCheckUrgency::Moderate,
suppressed: false,
next_interval_hint_seconds: 0,
evidence: if report.evaluation.next_step.evidence.is_empty() {
vec![payload.state_delta.clone()]
} else {
report.evaluation.next_step.evidence.clone()
},
};
}
let manualish_trigger = matches!(
trigger,
ContextCheckTrigger::Manual
| ContextCheckTrigger::Resume
| ContextCheckTrigger::SupervisorPoll
);
let moderate_pressure = matches!(report.context_health.band, "moderate" | "risky")
|| report.execution_gates.unfinished_count > 0
|| report.recovery.status == "loaded";
if manualish_trigger || moderate_pressure {
return ContextCheckDecision {
action: ContextCheckAction::InjectDelta,
reason: match trigger {
ContextCheckTrigger::Resume => "resume_refresh",
ContextCheckTrigger::SupervisorPoll => "supervisor_poll",
ContextCheckTrigger::Manual => "manual_refresh",
_ => "band_transition",
},
recommendation: match trigger {
ContextCheckTrigger::Resume => "resume_with_digest".to_owned(),
_ => report.context_health.recommendation.to_owned(),
},
urgency: if matches!(report.context_health.band, "risky") {
ContextCheckUrgency::High
} else {
ContextCheckUrgency::Moderate
},
suppressed: false,
next_interval_hint_seconds: if matches!(trigger, ContextCheckTrigger::Resume) {
300
} else {
600
},
evidence: vec![payload.state_delta.clone(), payload.risk_summary.clone()],
};
}
ContextCheckDecision {
action: ContextCheckAction::None,
reason: if trigger == ContextCheckTrigger::Interval {
"interval_suppressed"
} else {
"no_action"
},
recommendation: "continue".to_owned(),
urgency: ContextCheckUrgency::Low,
suppressed: trigger == ContextCheckTrigger::Interval,
next_interval_hint_seconds: 900,
evidence: vec![payload.risk_summary.clone()],
}
}
fn build_policy_digest(runtime: &runtime_state::LoadedRuntimeState) -> String {
let guardrails: Vec<String> = runtime
.state
.handoff
.operational_guardrails
.iter()
.map(|item| item.text.clone())
.collect();
if guardrails.is_empty() {
return "No compiled operational guardrails are currently recorded.".to_owned();
}
summarize_lines(&guardrails, 2)
}
fn build_focus_digest(report: &RadarStateReport) -> String {
let mut parts = vec![report.handoff.title.clone()];
if let Some(mode) = report
.session_state
.mode
.filter(|mode| *mode != session_state::SessionMode::General)
{
parts.push(format!("mode `{}`", mode.as_str()));
}
if let Some(anchor) = &report.execution_gates.attention_anchor {
parts.push(format!(
"gate [{} #{}/{}] {}",
anchor.status.as_str(),
anchor.index,
report.execution_gates.total_count,
anchor.text
));
} else if let Some(next_action) = report.handoff.immediate_actions.first() {
parts.push(format!("next `{next_action}`"));
}
parts.join("; ")
}
fn build_state_delta(report: &RadarStateReport) -> String {
let mut parts = Vec::new();
if !report.candidate_updates.handoff.is_empty() {
parts.push(format!(
"{} handoff update candidate(s)",
report.candidate_updates.handoff.len()
));
}
if !report.candidate_updates.backlog.is_empty() {
parts.push(format!(
"{} backlog update candidate(s)",
report.candidate_updates.backlog.len()
));
}
if !report.candidate_updates.memory.is_empty() {
parts.push(format!(
"{} memory promotion candidate(s)",
report.candidate_updates.memory.len()
));
}
if let Some(anchor) = &report.execution_gates.attention_anchor {
parts.push(format!(
"execution gate [{} #{}/{}] {}",
anchor.status.as_str(),
anchor.index,
report.execution_gates.total_count,
anchor.text
));
}
if parts.is_empty() {
parts.push(report.evaluation.next_step.summary.clone());
}
parts.join("; ")
}
fn build_risk_summary(trigger: ContextCheckTrigger, report: &RadarStateReport) -> String {
let mut parts = vec![format!(
"trigger `{}` with context band `{}` (`{}`)",
trigger.as_str(),
report.context_health.band,
report.context_health.recommendation
)];
if report.behavioral_drift.status == DriftAggregateStatus::NeedsRecalibration {
parts.push("behavioral drift needs recalibration".to_owned());
}
if report.escalation.blocking_count > 0 {
parts.push(format!(
"{} blocking escalation(s) active",
report.escalation.blocking_count
));
}
parts.push(format!(
"session boundary is `{}`",
report.session_boundary.action.as_str()
));
parts.join("; ")
}
fn summarize_lines(lines: &[String], limit: usize) -> String {
let mut parts: Vec<String> = lines.iter().take(limit).cloned().collect();
if lines.len() > limit {
parts.push(format!("+{} more", lines.len() - limit));
}
parts.join("; ")
}
fn print_bucket(label: &str, bucket: &EvaluationBucket) {
println!("- {label} [{}]: {}", bucket.status, bucket.summary);
for evidence in &bucket.evidence {
println!(" - {evidence}");
}
}
fn print_context_health(context_health: &ContextHealth) {
match context_health.degradation_risk_pct {
Some(risk) => println!(
"Context health: {} (risk {}%, confidence {}, source {}, recommendation {})",
context_health.band,
risk,
context_health.confidence,
context_health.source,
context_health.recommendation
),
None => println!(
"Context health: {} (confidence {}, source {}, recommendation {})",
context_health.band,
context_health.confidence,
context_health.source,
context_health.recommendation
),
}
match context_health.cost.session_estimate_usd {
Some(session_cost) => {
let mut parts = vec![format!(
"session ${}",
crate::telemetry::cost::format_usd(session_cost)
)];
if let Some(focus_item_cost) = context_health.cost.focus_item_estimate_usd {
parts.push(format!(
"focus item ${}",
crate::telemetry::cost::format_usd(focus_item_cost)
));
}
if let Some(model) = &context_health.cost.model {
parts.push(format!("model {model}"));
}
println!("Cost: {}", parts.join(", "));
for alert in &context_health.cost.alerts {
println!("- cost alert: {alert}");
}
}
None => {
if let Some(reason) = &context_health.cost.reason {
println!("Cost: unavailable ({reason})");
}
}
}
}
fn print_behavioral_drift(behavioral_drift: &BehavioralDriftState) {
println!(
"Behavioral drift: {} ({})",
behavioral_drift.status.as_str(),
behavioral_drift.summary
);
for signal in &behavioral_drift.signals {
println!(
"- {} [{}]: {}",
signal.id,
signal.status.as_str(),
signal.summary
);
}
}
fn print_candidates(label: &str, candidates: &[CandidateUpdate]) {
for candidate in candidates {
println!("- {label}: {}", candidate.summary);
}
}
fn read_handoff(
repo_root: &Path,
layout: &StateLayout,
runtime: &runtime_state::LoadedRuntimeState,
git: Option<&GitState>,
active_session_id: Option<&str>,
session_view: &RadarSessionStateView,
) -> Result<LoadedHandoffState> {
let path = layout.state_db_path();
if runtime.sources.handoff.is_missing() {
let remediation = handoff_write_command(repo_root, layout);
let rerun = radar_state_command(repo_root, layout);
bail!(
"ccd radar-state cannot evaluate continuity or wrap-up readiness because the canonical workspace-local handoff is uninitialized at {}. Create it with `{remediation}` and rerun `{rerun}`.",
path.display(),
);
}
let mut current_system_state = handoff::current_system_state_lines(git, active_session_id);
if let Some(mode) = session_view.mode {
if session_view.status == "active" && mode != session_state::SessionMode::General {
current_system_state.push(format!("Session mode: `{}`", mode.as_str()));
}
}
let contents = runtime.sources.handoff.content.clone();
Ok(LoadedHandoffState {
handoff: HandoffState {
path: path.display().to_string(),
title: handoff::extract_title(&contents)?,
immediate_actions: handoff::extract_numbered_section(&contents, "Immediate Actions"),
current_system_state,
definition_of_done: handoff::extract_numbered_section(&contents, "Definition of Done"),
},
contents,
})
}
fn handoff_write_command(repo_root: &Path, layout: &StateLayout) -> String {
let base = format!("ccd handoff write --path {}", repo_root.display());
if layout.profile().as_str() == DEFAULT_PROFILE {
base
} else {
format!("{base} --profile {}", layout.profile().as_str())
}
}
fn radar_state_command(repo_root: &Path, layout: &StateLayout) -> String {
let base = format!("ccd radar-state --path {}", repo_root.display());
if layout.profile().as_str() == DEFAULT_PROFILE {
base
} else {
format!("{base} --profile {}", layout.profile().as_str())
}
}
fn build_session_state_view(
layout: &StateLayout,
tracked_session: Option<&session_state::SessionStateFile>,
tracked_activity: Option<&session_state::SessionActivityState>,
) -> RadarSessionStateView {
let path = layout.state_db_path().display().to_string();
match tracked_session {
Some(state) => {
let status = session_state_status(state, session_state::now_epoch_s().ok());
RadarSessionStateView {
path,
status,
session_id: state.session_id.clone(),
mode: Some(state.mode),
start_count: Some(state.start_count),
lifecycle: session_state::lifecycle_projection(
state,
session_state::now_epoch_s().unwrap_or_default(),
None,
tracked_activity,
),
}
}
None => RadarSessionStateView {
path,
status: "missing",
session_id: None,
mode: None,
start_count: None,
lifecycle: session_state::SessionLifecycleProjection::missing(),
},
}
}
fn session_state_status(
state: &session_state::SessionStateFile,
now_epoch_s: Option<u64>,
) -> &'static str {
if state.session_id.is_none() {
return "stale";
}
match now_epoch_s {
Some(now) if session_state::is_stale(state, now) => "stale",
_ => "active",
}
}
fn read_workflow_signal(layout: &StateLayout, locality_id: &str) -> Result<WorkflowSignalState> {
let loaded = runtime_state::load_workflow_signal_surface(layout, locality_id)?;
if loaded.source.is_missing() {
return Ok(WorkflowSignalState {
path: loaded.source.path.display().to_string(),
status: "missing",
items: Vec::new(),
});
}
let items = loaded
.items
.iter()
.filter(|item| item.lifecycle.is_active())
.map(|item| item.text.clone())
.collect::<Vec<_>>();
let status = if items.is_empty() {
loaded.source.status.as_str()
} else {
"loaded"
};
Ok(WorkflowSignalState {
path: loaded.source.path.display().to_string(),
status,
items,
})
}
fn read_execution_gates(
runtime: &runtime_state::LoadedRuntimeState,
) -> session_gates::ExecutionGatesView {
runtime.execution_gates.view.clone()
}
fn read_effective_memory(runtime: &runtime_state::LoadedRuntimeState) -> MemoryState {
let structured = memory_entries::inspect_sources_with_branch_and_clone(
some_if_loaded(&runtime.sources.profile_memory),
some_if_loaded(&runtime.sources.locality_memory),
some_if_loaded(&runtime.sources.pod_memory),
some_if_loaded(&runtime.sources.branch_memory),
some_if_loaded(&runtime.sources.clone_memory),
);
let mut segments = Vec::new();
let mut contributing_paths = Vec::new();
if let Some(contents) = some_if_loaded(&runtime.sources.profile_memory) {
if !contents.is_empty() {
segments.push(contents.to_owned());
contributing_paths.push(runtime.sources.profile_memory.path.display().to_string());
}
}
if let Some(contents) = some_if_loaded(&runtime.sources.locality_memory) {
if !contents.is_empty() {
segments.push(contents.to_owned());
contributing_paths.push(runtime.sources.locality_memory.path.display().to_string());
}
}
if let Some(contents) = some_if_loaded(&runtime.sources.pod_memory) {
if !contents.is_empty() {
segments.push(contents.to_owned());
contributing_paths.push(runtime.sources.pod_memory.path.display().to_string());
}
}
if let Some(contents) = some_if_loaded(&runtime.sources.branch_memory) {
if !contents.is_empty() {
segments.push(contents.to_owned());
contributing_paths.push(runtime.sources.branch_memory.path.display().to_string());
}
}
if let Some(contents) = some_if_loaded(&runtime.sources.clone_memory) {
if !contents.is_empty() {
segments.push(contents.to_owned());
contributing_paths.push(runtime.sources.clone_memory.path.display().to_string());
}
}
let content = segments.join("\n\n");
let status = if content.is_empty() {
"empty"
} else {
"loaded"
};
MemoryState {
status,
profile_path: runtime.sources.profile_memory.path.display().to_string(),
project_path: runtime.sources.locality_memory.path.display().to_string(),
locality_path: runtime.sources.locality_memory.path.display().to_string(),
pod_path: runtime.sources.pod_memory.path.display().to_string(),
work_stream_path: runtime.sources.branch_memory.path.display().to_string(),
branch_path: runtime.sources.branch_memory.path.display().to_string(),
workspace_path: runtime.sources.clone_memory.path.display().to_string(),
clone_path: runtime.sources.clone_memory.path.display().to_string(),
content,
structured,
contributing_paths,
}
}
fn read_repo_native_checks(repo_root: &Path) -> RepoNativeChecksState {
match check::run(repo_root) {
Ok(report) => RepoNativeChecksState {
surface: report.surface,
failures: report.failures,
checks: report
.checks
.into_iter()
.map(|check| RepoNativeCheckResult {
id: check.id,
path: check.path,
status: check.status,
severity: check.severity,
message: check.message,
duration_ms: check.duration_ms,
exit_code: check.exit_code,
})
.collect(),
},
Err(error) => {
let surface_path = repo_root.join(check::COMMANDS_DIR);
let message = format!("Failed to inspect repo-native checks: {error:#}");
RepoNativeChecksState {
surface: check::CheckSurfaceView {
path: surface_path.display().to_string(),
status: "error",
prefix: check::CHECK_PREFIX,
entries: Vec::new(),
note: Some(message.clone()),
},
failures: 1,
checks: vec![RepoNativeCheckResult {
id: "surface".to_owned(),
path: surface_path.display().to_string(),
status: "fail",
severity: "error",
message,
duration_ms: 0,
exit_code: None,
}],
}
}
}
}
fn some_if_loaded(surface: &runtime_state::RuntimeTextSurface) -> Option<&str> {
if surface.is_missing() {
None
} else {
Some(&surface.content)
}
}
fn build_behavioral_drift(
repo_root: &Path,
inputs: BehavioralDriftInputs<'_>,
) -> BehavioralDriftState {
let prefix_observations = projection_observations_for_session(
repo_root,
inputs.layout,
inputs.locality_id,
inputs.tracked_session,
);
let signals = vec![
build_handoff_structure_signal(inputs.handoff_contents),
build_handoff_expectations_signal(inputs.tracked_session, inputs.git, inputs.handoff),
build_surface_order_signal(),
build_compiled_state_churn_signal(
inputs.layout,
inputs.tracked_session,
&prefix_observations,
),
build_tool_surface_signal(inputs.tracked_session, &prefix_observations),
]
.into_iter()
.chain(
inputs
.extension_signals
.into_iter()
.map(map_extension_signal),
)
.collect::<Vec<_>>();
let drifted = signals
.iter()
.filter(|signal| signal.status == DriftSignalStatus::Drift)
.collect::<Vec<_>>();
let no_signal_count = signals
.iter()
.filter(|signal| signal.status == DriftSignalStatus::NoSignal)
.count();
if !drifted.is_empty() {
let mut recommended_corrections = Vec::new();
for correction in drifted
.iter()
.filter_map(|signal| signal.recommended_correction.clone())
{
if !recommended_corrections.contains(&correction) {
recommended_corrections.push(correction);
}
}
return BehavioralDriftState {
status: DriftAggregateStatus::NeedsRecalibration,
summary: format!(
"{} behavioral drift signal(s) need explicit recalibration before wrap-up is treated as clean.",
drifted.len()
),
evidence: drifted
.iter()
.flat_map(|signal| signal.evidence.clone())
.collect(),
recommended_corrections,
signals,
};
}
if no_signal_count == signals.len() {
return BehavioralDriftState {
status: DriftAggregateStatus::NoSignal,
summary: "Radar has no deterministic behavioral-drift signal beyond the base evaluation surfaces."
.to_owned(),
evidence: Vec::new(),
recommended_corrections: Vec::new(),
signals,
};
}
BehavioralDriftState {
status: DriftAggregateStatus::Aligned,
summary: "No confirmed behavioral drift was detected from the available CCD-local signals."
.to_owned(),
evidence: Vec::new(),
recommended_corrections: Vec::new(),
signals,
}
}
fn map_extension_signal(signal: extensions::RadarBehavioralDriftSignal) -> BehavioralDriftSignal {
BehavioralDriftSignal {
id: signal.id,
status: match signal.status {
extensions::RadarBehavioralDriftStatus::Aligned => DriftSignalStatus::Aligned,
extensions::RadarBehavioralDriftStatus::Drift => DriftSignalStatus::Drift,
extensions::RadarBehavioralDriftStatus::NoSignal => DriftSignalStatus::NoSignal,
},
summary: signal.summary,
evidence: signal.evidence,
recommended_correction: signal.recommended_correction,
}
}
fn build_handoff_structure_signal(contents: &str) -> BehavioralDriftSignal {
let missing_sections = handoff::REQUIRED_SECTIONS
.iter()
.filter(|section| !handoff::has_section(contents, section))
.map(|section| section.trim_start_matches("## ").to_owned())
.collect::<Vec<_>>();
if missing_sections.is_empty() {
return aligned(
"handoff_structure",
"The workspace-local handoff contains the required CCD session sections.",
vec![layout_handoff_sections()],
);
}
drift(
"handoff_structure",
"The workspace-local handoff is structurally incomplete for reliable next-session guidance.",
vec![format!("Missing sections: {}", missing_sections.join(", "))],
"Rewrite the handoff so it includes the required CCD sections before closing the session.",
)
}
fn build_handoff_expectations_signal(
tracked_session: Option<&session_state::SessionStateFile>,
git: Option<&GitState>,
handoff: &HandoffState,
) -> BehavioralDriftSignal {
let Some(_session) = tracked_session else {
return no_signal(
"handoff_expectations",
"No active session telemetry is available, so handoff-expectation drift cannot be scoped to this conversation.",
Vec::new(),
);
};
let mut evidence = vec![format!("Handoff title: `{}`", handoff.title)];
if let Some(git) = git {
evidence.insert(0, format!("Branch: `{}`", git.branch));
}
if handoff.title != "No active session"
&& !handoff.immediate_actions.is_empty()
&& !handoff.definition_of_done.is_empty()
{
return aligned(
"handoff_expectations",
"The active session is anchored by a concrete handoff title, actions, and definition of done.",
evidence,
);
}
if handoff.title == "No active session" {
evidence.push("The handoff title still says `No active session`.".to_owned());
}
if handoff.immediate_actions.is_empty() {
evidence.push("`## Immediate Actions` is empty.".to_owned());
}
if handoff.definition_of_done.is_empty() {
evidence.push("`## Definition of Done` is empty.".to_owned());
}
drift(
"handoff_expectations",
"The active session is not anchored by a concrete next-session intent yet.",
evidence,
"Set a concrete next-session title and fill in Immediate Actions and Definition of Done before wrap-up.",
)
}
fn build_surface_order_signal() -> BehavioralDriftSignal {
let start_order_ok = START_RENDER_SECTION_ORDER == CANONICAL_PROMPT_SURFACE_ORDER;
let compiled_order_ok =
compiled_state::SESSION_RENDER_SURFACE_ORDER == CANONICAL_COMPILED_SURFACE_ORDER;
let mut evidence = Vec::new();
evidence.push(format!(
"Start order: {}",
START_RENDER_SECTION_ORDER
.iter()
.map(|section| section.as_str())
.collect::<Vec<_>>()
.join(" -> ")
));
evidence.push(format!(
"Compiled order: {}",
compiled_state::SESSION_RENDER_SURFACE_ORDER.join(" -> ")
));
if start_order_ok && compiled_order_ok {
return aligned(
"prefix_surface_order",
"Prompt surfaces remain ordered by stability, preserving cache-friendly prefix assembly.",
evidence,
);
}
if !start_order_ok {
evidence.push(format!(
"Expected start order: {}",
CANONICAL_PROMPT_SURFACE_ORDER
.iter()
.map(|section| section.as_str())
.collect::<Vec<_>>()
.join(" -> ")
));
}
if !compiled_order_ok {
evidence.push(format!(
"Expected compiled order: {}",
CANONICAL_COMPILED_SURFACE_ORDER.join(" -> ")
));
}
drift(
"prefix_surface_order",
"Prompt surface ordering drifted away from the canonical stability order.",
evidence,
"Keep prompt surfaces ordered policy -> memory -> backlog -> execution_gates -> handoff, and keep the compiled session subset effective_memory -> workflow_signal -> handoff -> execution_gates -> escalation -> recovery -> git_state -> session_state.",
)
}
fn build_compiled_state_churn_signal(
layout: &StateLayout,
tracked_session: Option<&session_state::SessionStateFile>,
observation_load: &ProjectionObservationLoad,
) -> BehavioralDriftSignal {
let Some(_session) = tracked_session else {
return no_signal(
"compiled_state_churn",
"No active session telemetry is available, so derived runtime-view churn cannot be scoped to this conversation.",
Vec::new(),
);
};
let fingerprints = observation_load
.observations
.iter()
.map(|observation| observation.source_fingerprint.trim())
.filter(|fingerprint| !fingerprint.is_empty())
.collect::<Vec<_>>();
let distinct_fingerprints = distinct_count(&fingerprints);
if distinct_fingerprints == 0 {
return no_signal(
"compiled_state_churn",
"No derived runtime-view observations were recorded after this session started.",
observation_load.warnings.clone(),
);
}
let mut evidence = vec![
format!(
"{} distinct derived runtime-view fingerprint(s) observed in this session.",
distinct_fingerprints
),
format!(
"Observation log: {}",
layout.clone_projection_metadata_path().display()
),
];
evidence.extend(observation_load.warnings.iter().cloned());
if let Some(summary) = projection_surface_change_summary(&observation_load.observations) {
evidence.push(summary);
}
if distinct_fingerprints >= 3 {
return drift(
"compiled_state_churn",
"The derived session prefix changed repeatedly during one session, which breaks prompt-cache reuse.",
evidence,
"Batch memory, focus, and handoff edits when possible, or use lightweight delta updates instead of repeatedly rebuilding the full session prefix.",
);
}
aligned(
"compiled_state_churn",
"Compiled-state churn stayed within the expected session baseline.",
evidence,
)
}
fn build_tool_surface_signal(
tracked_session: Option<&session_state::SessionStateFile>,
observation_load: &ProjectionObservationLoad,
) -> BehavioralDriftSignal {
let Some(_session) = tracked_session else {
return no_signal(
"tool_surface_mutation",
"No active session telemetry is available, so tool-surface drift cannot be scoped to this conversation.",
Vec::new(),
);
};
let tool_fingerprints = observation_load
.observations
.iter()
.filter_map(|observation| observation.tool_surface_fingerprint.as_deref())
.map(str::trim)
.filter(|fingerprint| !fingerprint.is_empty())
.collect::<Vec<_>>();
let distinct_tool_fingerprints = distinct_count(&tool_fingerprints);
if distinct_tool_fingerprints == 0 {
return no_signal(
"tool_surface_mutation",
"No MCP tool-surface fingerprint was recorded for this session.",
observation_load.warnings.clone(),
);
}
let mut evidence = vec![format!(
"{} distinct tool-surface fingerprint(s) observed in this session.",
distinct_tool_fingerprints
)];
evidence.extend(observation_load.warnings.iter().cloned());
if distinct_tool_fingerprints >= 2 {
return drift(
"tool_surface_mutation",
"The available tool surface changed mid-session, which can invalidate cached prefixes.",
evidence,
"Register tool stubs up front and avoid adding or removing MCP tools mid-session when defer-loading can preserve prefix stability.",
);
}
aligned(
"tool_surface_mutation",
"The observed tool surface stayed stable during this session.",
evidence,
)
}
fn projection_observations_for_session(
repo_root: &Path,
layout: &StateLayout,
locality_id: &str,
tracked_session: Option<&session_state::SessionStateFile>,
) -> ProjectionObservationLoad {
let Some(session) = tracked_session else {
return ProjectionObservationLoad {
observations: Vec::new(),
warnings: Vec::new(),
};
};
let mut warnings = Vec::new();
let mut observations = match projection_metadata::load_for_layout(layout) {
Ok(Some(metadata)) => metadata
.observations
.into_iter()
.filter(|observation| observation.observed_at_epoch_s >= session.started_at_epoch_s)
.collect::<Vec<_>>(),
Ok(None) => Vec::new(),
Err(error) => {
let warning = format!(
"Failed to load projection observations from {}: {error:#}",
layout.clone_projection_metadata_path().display()
);
eprintln!("Warning: {warning}");
warnings.push(warning);
Vec::new()
}
};
let current_store = match runtime_state::load_runtime_state(repo_root, layout, locality_id) {
Ok(loaded) => match compiled_state::preview_for_target(
&loaded,
compiled_state::ProjectionTarget::Session,
) {
Ok(store) => Some(store),
Err(error) => {
let warning = format!(
"Failed to preview the current compiled session state for repo `{locality_id}`: {error:#}"
);
eprintln!("Warning: {warning}");
warnings.push(warning);
None
}
},
Err(error) => {
let warning = format!(
"Failed to load runtime state for repo `{locality_id}` while auditing behavioral drift: {error:#}"
);
eprintln!("Warning: {warning}");
warnings.push(warning);
None
}
};
if let Some(store) = current_store {
let current = projection_metadata::ProjectionObservation {
observed_at_epoch_s: session_state::now_epoch_s()
.ok()
.unwrap_or(session.started_at_epoch_s),
source_fingerprint: store.source_fingerprint.clone(),
projection_digests: store
.projection_digests
.clone()
.or_else(|| Some(compiled_state::compute_projection_digests(&store))),
tool_surface_fingerprint: projection_metadata::current_tool_surface_fingerprint(),
session_id: None,
};
if observations.last().is_none_or(|last| {
last.source_fingerprint != current.source_fingerprint
|| last.tool_surface_fingerprint != current.tool_surface_fingerprint
}) {
observations.push(current);
}
}
ProjectionObservationLoad {
observations,
warnings,
}
}
fn projection_surface_change_summary(
observations: &[projection_metadata::ProjectionObservation],
) -> Option<String> {
let mut counts = BTreeMap::new();
for window in observations.windows(2) {
let [previous, current] = window else {
continue;
};
let (Some(previous), Some(current)) = (
previous.projection_digests.as_ref(),
current.projection_digests.as_ref(),
) else {
continue;
};
if previous.effective_memory != current.effective_memory {
*counts.entry("effective_memory").or_insert(0usize) += 1;
}
if previous.workflow_signal != current.workflow_signal {
*counts.entry("workflow_signal").or_insert(0usize) += 1;
}
if previous.handoff != current.handoff {
*counts.entry("handoff").or_insert(0usize) += 1;
}
}
if counts.is_empty() {
return None;
}
Some(format!(
"Observed changed surfaces: {}",
counts
.into_iter()
.map(|(surface, count)| format!("{surface} x{count}"))
.collect::<Vec<_>>()
.join(", ")
))
}
fn distinct_count(values: &[&str]) -> usize {
values.iter().copied().collect::<BTreeSet<_>>().len()
}
fn aligned(
id: &'static str,
summary: impl Into<String>,
evidence: Vec<String>,
) -> BehavioralDriftSignal {
BehavioralDriftSignal {
id,
status: DriftSignalStatus::Aligned,
summary: summary.into(),
evidence,
recommended_correction: None,
}
}
fn no_signal(
id: &'static str,
summary: impl Into<String>,
evidence: Vec<String>,
) -> BehavioralDriftSignal {
BehavioralDriftSignal {
id,
status: DriftSignalStatus::NoSignal,
summary: summary.into(),
evidence,
recommended_correction: None,
}
}
fn drift(
id: &'static str,
summary: impl Into<String>,
evidence: Vec<String>,
recommended_correction: impl Into<String>,
) -> BehavioralDriftSignal {
BehavioralDriftSignal {
id,
status: DriftSignalStatus::Drift,
summary: summary.into(),
evidence,
recommended_correction: Some(recommended_correction.into()),
}
}
fn layout_handoff_sections() -> String {
format!(
"Required sections present: {}",
handoff::REQUIRED_SECTIONS
.iter()
.map(|section| section.trim_start_matches("## "))
.collect::<Vec<_>>()
.join(", ")
)
}
fn build_candidate_updates(
repo_root: &Path,
profile: &str,
_handoff: &HandoffState,
effective_memory: &MemoryState,
tracked_session: Option<&session_state::SessionStateFile>,
branch_memory_available: bool,
) -> CandidateUpdates {
CandidateUpdates {
handoff: Vec::new(),
backlog: Vec::new(),
memory: build_memory_candidates(
repo_root,
profile,
effective_memory,
tracked_session,
branch_memory_available,
),
}
}
#[derive(Clone, Copy)]
enum MemoryCandidateScope {
Clone,
Branch,
Pod,
}
impl MemoryCandidateScope {
fn label(self) -> &'static str {
match self {
Self::Clone => "workspace",
Self::Branch => "work_stream",
Self::Pod => "pod",
}
}
fn source_scope(self) -> &'static str {
match self {
Self::Clone => "workspace-memory",
Self::Branch => "work-stream-memory",
Self::Pod => "pod-memory",
}
}
fn suggested_destination(self, branch_memory_available: bool) -> &'static str {
match self {
Self::Clone if branch_memory_available => "work-stream-memory",
Self::Clone | Self::Branch | Self::Pod => "project-memory",
}
}
fn promotion_review_summary(self, entry: &memory_entries::StructuredMemoryEntry) -> String {
match self {
Self::Clone => format!(
"{} memory entry `{}` was touched this session; review whether it should stay workspace-local or be promoted to a higher memory scope.",
self.label(),
entry.id
),
Self::Branch | Self::Pod => format!(
"{} memory entry `{}` was touched this session; review whether it should move into project memory.",
self.label(),
entry.id
),
}
}
}
fn build_memory_candidates(
repo_root: &Path,
profile: &str,
effective_memory: &MemoryState,
tracked_session: Option<&session_state::SessionStateFile>,
branch_memory_available: bool,
) -> Vec<CandidateUpdate> {
let Some(current_session_start_count) = active_session_start_count(tracked_session) else {
return Vec::new();
};
let branch_entry_ids = effective_memory
.structured
.branch_entries
.iter()
.map(|entry| entry.id.clone())
.collect::<BTreeSet<_>>();
let repo_entry_ids = effective_memory
.structured
.repo_entries
.iter()
.map(|entry| entry.id.clone())
.collect::<BTreeSet<_>>();
let context = MemoryCandidateContext {
repo_root,
profile,
branch_entry_ids: &branch_entry_ids,
repo_entry_ids: &repo_entry_ids,
current_session_start_count,
branch_memory_available,
};
let mut memory = Vec::new();
extend_memory_candidates(
&mut memory,
&effective_memory.structured.clone_entries,
MemoryCandidateScope::Clone,
&context,
);
extend_memory_candidates(
&mut memory,
&effective_memory.structured.branch_entries,
MemoryCandidateScope::Branch,
&context,
);
extend_memory_candidates(
&mut memory,
&effective_memory.structured.pod_entries,
MemoryCandidateScope::Pod,
&context,
);
memory
}
fn active_session_start_count(
tracked_session: Option<&session_state::SessionStateFile>,
) -> Option<u64> {
let tracked_session = tracked_session?;
if session_state_status(tracked_session, session_state::now_epoch_s().ok()) != "active" {
return None;
}
Some(u64::from(tracked_session.start_count))
}
struct MemoryCandidateContext<'a> {
repo_root: &'a Path,
profile: &'a str,
branch_entry_ids: &'a BTreeSet<String>,
repo_entry_ids: &'a BTreeSet<String>,
current_session_start_count: u64,
branch_memory_available: bool,
}
fn extend_memory_candidates(
candidates: &mut Vec<CandidateUpdate>,
entries: &[memory_entries::StructuredMemoryEntry],
scope: MemoryCandidateScope,
context: &MemoryCandidateContext<'_>,
) {
candidates.extend(
entries
.iter()
.filter_map(|entry| memory_candidate_from_entry(entry, scope, context)),
);
}
fn memory_candidate_from_entry(
entry: &memory_entries::StructuredMemoryEntry,
scope: MemoryCandidateScope,
context: &MemoryCandidateContext<'_>,
) -> Option<CandidateUpdate> {
if entry.state != "active"
|| entry.last_touched_session != context.current_session_start_count
|| entry.source_ref.is_some()
|| !memory_promote::is_promotable_entry_type(&entry.entry_type)
{
return None;
}
let suggested_destination = scope.suggested_destination(context.branch_memory_available);
if candidate_already_exists(
&entry.id,
suggested_destination,
context.branch_entry_ids,
context.repo_entry_ids,
) {
return None;
}
Some(CandidateUpdate {
kind: "promotion_review",
summary: scope.promotion_review_summary(entry),
section: None,
replacement_lines: None,
entry_id: Some(entry.id.clone()),
source_scope: Some(scope.source_scope()),
suggested_destination: Some(suggested_destination),
action: Some(memory_candidate_admit_preview_action(
context.repo_root,
context.profile,
entry.id.as_str(),
scope,
suggested_destination,
)),
})
}
fn candidate_already_exists(
entry_id: &str,
suggested_destination: &'static str,
branch_entry_ids: &BTreeSet<String>,
repo_entry_ids: &BTreeSet<String>,
) -> bool {
match suggested_destination {
"work-stream-memory" => {
branch_entry_ids.contains(entry_id) || repo_entry_ids.contains(entry_id)
}
"project-memory" => repo_entry_ids.contains(entry_id),
_ => false,
}
}
fn memory_candidate_admit_preview_action(
repo_root: &Path,
profile: &str,
entry_id: &str,
scope: MemoryCandidateScope,
suggested_destination: &'static str,
) -> CandidateAction {
CandidateAction {
kind: "memory_candidate_admit_preview",
argv: vec![
"ccd".to_owned(),
"memory".to_owned(),
"candidate".to_owned(),
"admit".to_owned(),
"--path".to_owned(),
repo_root.display().to_string(),
"--profile".to_owned(),
profile.to_owned(),
"--entry".to_owned(),
entry_id.to_owned(),
"--source-scope".to_owned(),
scope.source_scope().to_owned(),
"--destination".to_owned(),
suggested_destination.to_owned(),
],
note: "Preview only. Add `--write` to stage this reviewed candidate as a higher-scope `promotion_candidate` entry.",
apply: Some(memory_candidate_admit_apply_action(
repo_root,
profile,
entry_id,
scope,
suggested_destination,
)),
}
}
fn memory_candidate_admit_apply_action(
repo_root: &Path,
profile: &str,
entry_id: &str,
scope: MemoryCandidateScope,
suggested_destination: &'static str,
) -> CandidateActionApply {
CandidateActionApply {
argv: vec![
"ccd".to_owned(),
"memory".to_owned(),
"candidate".to_owned(),
"admit".to_owned(),
"--path".to_owned(),
repo_root.display().to_string(),
"--profile".to_owned(),
profile.to_owned(),
"--entry".to_owned(),
entry_id.to_owned(),
"--source-scope".to_owned(),
scope.source_scope().to_owned(),
"--destination".to_owned(),
suggested_destination.to_owned(),
"--write".to_owned(),
],
requirements: Vec::new(),
note: "Mutates memory surfaces. Execute this apply command only after reviewing the staged candidate.",
}
}
fn build_context_health(
repo_root: &Path,
layout: &StateLayout,
locality_id: &str,
git: Option<&GitState>,
tracked_session: Option<&session_state::SessionStateFile>,
active_session_id: Option<&str>,
) -> Result<ContextHealth> {
let now_epoch_s = session_state::now_epoch_s()?;
let host_snapshot = host_telemetry::current(repo_root).ok().flatten();
let context_used_pct = host_snapshot
.as_ref()
.and_then(|snapshot| snapshot.context_used_pct)
.or_else(|| optional_env_u8("CCD_CONTEXT_USED_PCT"));
let signals = ContextSignals {
compacted: host_snapshot
.as_ref()
.and_then(|snapshot| snapshot.compacted)
.or_else(|| optional_env_bool("CCD_CONTEXT_COMPACTED")),
session_minutes: tracked_session
.map(|state| session_state::session_minutes(state, now_epoch_s)),
ccd_start_cycles_in_conversation: tracked_session.map(|state| state.start_count),
host_total_tokens: host_snapshot
.as_ref()
.and_then(|snapshot| snapshot.total_tokens),
host_context_window_tokens: host_snapshot
.as_ref()
.and_then(|snapshot| snapshot.model_context_window),
tool_output_kb: optional_env_u64("CCD_TOOL_OUTPUT_KB"),
operator_symptoms: optional_env_list("CCD_OPERATOR_SYMPTOMS"),
};
let has_host_signal = host_snapshot.is_some();
let has_env_signal = optional_env_u8("CCD_CONTEXT_USED_PCT").is_some()
|| signals.compacted.is_some()
|| signals.tool_output_kb.is_some()
|| !signals.operator_symptoms.is_empty();
let has_session_signal =
signals.session_minutes.is_some() || signals.ccd_start_cycles_in_conversation.is_some();
let source = match (has_host_signal, has_env_signal, has_session_signal) {
(true, _, true) => "mixed",
(true, _, false) => "host",
(false, true, true) => "mixed",
(false, true, false) => "env",
(false, false, true) => "heuristic",
(false, false, false) => "none",
};
let mut risk = 0u8;
let mut scored_signals = 0usize;
if let Some(minutes) = signals.session_minutes {
scored_signals += 1;
risk = risk.saturating_add(match minutes {
0..=29 => 0,
30..=59 => 10,
60..=89 => 25,
_ => 40,
});
}
if let Some(cycles) = signals.ccd_start_cycles_in_conversation {
scored_signals += 1;
risk = risk.saturating_add(match cycles {
0..=1 => 0,
2..=3 => 10,
_ => 25,
});
}
if let Some(used_pct) = context_used_pct {
scored_signals += 1;
risk = risk.saturating_add(match used_pct {
0..=69 => 0,
70..=84 => 10,
85..=94 => 20,
_ => 30,
});
}
if let Some(compacted) = signals.compacted {
scored_signals += 1;
if compacted {
risk = risk.saturating_add(35);
}
}
if let Some(tool_output_kb) = signals.tool_output_kb {
scored_signals += 1;
risk = risk.saturating_add(match tool_output_kb {
0..=99 => 0,
100..=249 => 10,
250..=499 => 20,
_ => 30,
});
}
if !signals.operator_symptoms.is_empty() {
scored_signals += 1;
let symptom_score: u8 = signals.operator_symptoms.iter().take(3).map(|_| 15u8).sum();
risk = risk.saturating_add(symptom_score);
}
let degradation_risk_pct = if scored_signals == 0 {
None
} else {
Some(risk.min(95))
};
let band = match degradation_risk_pct {
None => "unknown",
Some(0..=34) => "low",
Some(35..=59) => "moderate",
Some(60..=79) => "risky",
Some(_) => "critical",
};
let confidence = if has_host_signal && (signals.compacted.is_some() || has_session_signal) {
"high"
} else if has_host_signal || has_session_signal || has_env_signal {
"medium"
} else {
"low"
};
let recommendation = match degradation_risk_pct {
Some(80..=100) => "wrap_up_and_clear",
Some(60..=79) => "wrap_up_soon",
Some(35..=59) => "keep_focus",
Some(_) => "continue",
None if has_host_signal || has_env_signal || has_session_signal => "continue",
None => "capture_session_state",
};
let cost = telemetry_cost::build_cost_view(
repo_root,
layout,
locality_id,
git,
active_session_id,
host_snapshot.as_ref(),
)?;
Ok(ContextHealth {
source,
context_used_pct,
degradation_risk_pct,
band,
confidence,
signals,
cost,
recommendation,
})
}
fn build_evaluation(inputs: EvaluationInputs<'_>) -> RadarEvaluation {
let next_step = if inputs.candidates.handoff.is_empty() {
match inputs.execution_gates.attention_anchor.as_ref() {
Some(anchor) if anchor.status == session_gates::ExecutionGateStatus::Blocked => {
EvaluationBucket {
status: "review_required",
summary:
"The first unfinished execution gate is blocked; resolve it or rewrite the gate set before clean wrap-up."
.to_owned(),
evidence: execution_gate_evidence(inputs.execution_gates),
}
}
Some(_) => EvaluationBucket {
status: "continue",
summary:
"Execution gates are active; keep working through the first unfinished gate before clean wrap-up."
.to_owned(),
evidence: execution_gate_evidence(inputs.execution_gates),
},
None => EvaluationBucket {
status: "no_change_detected",
summary: "No deterministic CLI signal says the next step changed.".to_owned(),
evidence: vec![
"Current HEAD is already reflected in the workspace-local handoff or no repo-state delta was detected."
.to_owned(),
],
},
}
} else {
EvaluationBucket {
status: "review_required",
summary:
"Repo state changed relative to the recorded handoff; refresh the workspace-local handoff before closing."
.to_owned(),
evidence: inputs
.candidates
.handoff
.iter()
.map(|candidate| candidate.summary.clone())
.collect(),
}
};
let backlog = if let Some(workflow_guidance) = inputs.workflow_guidance {
EvaluationBucket {
status: workflow_guidance.evaluation.status,
summary: workflow_guidance.evaluation.summary.clone(),
evidence: workflow_guidance.evaluation.evidence.clone(),
}
} else {
EvaluationBucket {
status: "no_cli_signal",
summary:
"No workflow extension is active for this clone; workflow coaching is skipped."
.to_owned(),
evidence: Vec::new(),
}
};
let memory = if !inputs.candidates.memory.is_empty() {
EvaluationBucket {
status: "review_required",
summary:
"Structured memory entries touched this session may belong in a higher memory scope; review them before wrap-up."
.to_owned(),
evidence: inputs
.candidates
.memory
.iter()
.map(|candidate| candidate.summary.clone())
.collect(),
}
} else if inputs.effective_memory.content.is_empty() {
EvaluationBucket {
status: "no_cli_signal",
summary:
"No effective CCD-local memory is recorded for this profile, linked project, active work stream, or workspace."
.to_owned(),
evidence: vec![
"Persist durable lessons deliberately in profile, project, work stream, or workspace memory when a pattern repeats."
.to_owned(),
],
}
} else {
EvaluationBucket {
status: "loaded",
summary:
"Effective CCD-local memory is loaded for this profile, linked project, active work stream, or workspace."
.to_owned(),
evidence: inputs.effective_memory.contributing_paths.clone(),
}
};
let wrap_up = if inputs.escalation_view.blocking_count > 0 {
EvaluationBucket {
status: "needs_review",
summary:
"Blocking escalation(s) are active; resolve them before using Radar wrap-up as a clean close."
.to_owned(),
evidence: inputs
.escalation_view
.entries
.iter()
.filter(|e| matches!(e.kind, escalation_state::EscalationKind::Blocking))
.map(|e| format!("{}: {}", e.id, e.reason))
.collect(),
}
} else if inputs.repo_native_checks.failures > 0 {
EvaluationBucket {
status: "needs_review",
summary:
"One or more repo-native checks failed; resolve them before using Radar wrap-up as a clean close."
.to_owned(),
evidence: inputs
.repo_native_checks
.checks
.iter()
.filter(|check| check.severity == "error")
.map(|check| format!("{}: {}", check.id, check.message))
.collect(),
}
} else if inputs.behavioral_drift.status == DriftAggregateStatus::NeedsRecalibration {
EvaluationBucket {
status: "needs_review",
summary:
"Behavioral drift was detected; recalibrate the handoff or focus before using Radar wrap-up as a clean close."
.to_owned(),
evidence: inputs.behavioral_drift.evidence.clone(),
}
} else if inputs.git.is_some_and(|git| git.behind > 0) {
EvaluationBucket {
status: "needs_review",
summary:
"The branch is behind its upstream; reconcile before using Radar wrap-up as a clean close."
.to_owned(),
evidence: vec![format!(
"Behind upstream by {} commit(s).",
inputs.git.expect("checked above").behind
)],
}
} else if inputs.git.is_some_and(|git| !git.clean) {
EvaluationBucket {
status: "ready_to_commit",
summary:
"Local changes are present; the repo is ready for validation, review, and commit."
.to_owned(),
evidence: wrap_up_evidence(inputs.git.expect("checked above")),
}
} else if inputs.git.is_some_and(|git| git.ahead > 0) {
EvaluationBucket {
status: "ready_to_push",
summary: "No local file changes remain and local commits are ready to push.".to_owned(),
evidence: vec![format!(
"Ahead of upstream by {} commit(s).",
inputs.git.expect("checked above").ahead
)],
}
} else if inputs.git.is_none() {
EvaluationBucket {
status: "clean",
summary:
"Git checkout context is unavailable by design for directory-substrate workspaces."
.to_owned(),
evidence: vec![
"Wrap-up review is limited to continuity, memory, execution gates, and repo-native checks.".to_owned(),
],
}
} else {
EvaluationBucket {
status: "clean",
summary: "No local changes or unpublished commits were detected.".to_owned(),
evidence: vec!["Worktree is clean and branch is in sync with upstream.".to_owned()],
}
};
RadarEvaluation {
next_step,
backlog,
memory,
wrap_up,
}
}
fn execution_gate_evidence(view: &session_gates::ExecutionGatesView) -> Vec<String> {
let mut evidence = Vec::new();
if let Some(seed) = &view.seeded_from {
evidence.push(format!("Gate set source: {seed}."));
}
if let Some(anchor) = &view.attention_anchor {
evidence.push(format!(
"Current gate [{}/{} {}]: {}",
anchor.index,
view.total_count,
anchor.status.as_str(),
anchor.text
));
}
if view.unfinished_count > 0 {
evidence.push(format!(
"{} execution gate(s) remain unfinished.",
view.unfinished_count
));
}
evidence
}
fn build_workflow_hints(
handoff: &HandoffState,
session_view: &RadarSessionStateView,
execution_gates: &session_gates::ExecutionGatesView,
context_health: &ContextHealth,
escalation_view: &escalation_state::EscalationView,
) -> Vec<WorkflowHint> {
let mut hints = Vec::new();
let research_session = matches!(
(session_view.status, session_view.mode),
("active", Some(session_state::SessionMode::Research))
);
let wrap_up_pressure = matches!(
context_health.recommendation,
"wrap_up_soon" | "wrap_up_and_clear"
);
let has_blocking_escalation = escalation_view.blocking_count > 0;
let mut reasons = Vec::new();
if execution_gates.total_count > 0 {
reasons.push(format!(
"Execution gates already bound the next implementation pass ({} total, {} unfinished).",
execution_gates.total_count, execution_gates.unfinished_count
));
}
if !handoff.immediate_actions.is_empty() {
reasons.push(format!(
"The handoff already carries {} immediate action(s) for the next pass.",
handoff.immediate_actions.len()
));
}
if !handoff.definition_of_done.is_empty() {
reasons.push(format!(
"The handoff already carries {} definition-of-done item(s).",
handoff.definition_of_done.len()
));
}
if research_session && wrap_up_pressure && !has_blocking_escalation && !reasons.is_empty() {
let mut hint_reasons = vec![
"The active session is explicitly tagged as research.".to_owned(),
format!(
"Context health recommends `{}` (band `{}`).",
context_health.recommendation, context_health.band
),
];
hint_reasons.extend(reasons);
hints.push(WorkflowHint {
id: "consider_split_to_implement",
summary:
"Consider closing this research pass with a compact handoff and starting a fresh implement session."
.to_owned(),
reasons: hint_reasons,
});
}
hints
}
fn build_session_boundary(
checkout_state: Option<&handoff::CheckoutStateView>,
recovery: &runtime_state::RuntimeRecoveryView,
context_health: &ContextHealth,
behavioral_drift: &BehavioralDriftState,
evaluation: &RadarEvaluation,
escalation_view: &escalation_state::EscalationView,
) -> SessionBoundaryRecommendation {
let recovery_note = (recovery.status == "loaded").then_some(
"Recovery artifacts are loaded as supporting context only; handoff, workflow guidance, and execution gates remain authoritative."
.to_owned(),
);
if let Some(checkout_state) = checkout_state.filter(|state| state.advisory) {
let mut evidence = vec![checkout_state.summary.clone()];
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Stop,
"Stop here and continue from a live checkout before trusting this session boundary.",
evidence,
);
}
if escalation_view.blocking_count > 0 {
let mut evidence = escalation_view
.entries
.iter()
.filter(|entry| matches!(entry.kind, escalation_state::EscalationKind::Blocking))
.map(|entry| format!("{}: {}", entry.id, entry.reason))
.collect::<Vec<_>>();
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Stop,
"Stop and resolve blocking escalations before treating Radar wrap-up as authoritative.",
evidence,
);
}
if behavioral_drift.status == DriftAggregateStatus::NeedsRecalibration {
let mut evidence = behavioral_drift.evidence.clone();
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Stop,
"Stop and recalibrate the handoff or focus before closing this session.",
evidence,
);
}
if evaluation.next_step.status == "review_required" {
let mut evidence = evaluation.next_step.evidence.clone();
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Refresh,
"Refresh the workspace-local handoff before closing this session.",
evidence,
);
}
if evaluation.wrap_up.status == "needs_review" {
let mut evidence = evaluation.wrap_up.evidence.clone();
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Stop,
evaluation.wrap_up.summary.clone(),
evidence,
);
}
if matches!(
context_health.recommendation,
"wrap_up_soon" | "wrap_up_and_clear"
) || matches!(
evaluation.wrap_up.status,
"ready_to_commit" | "ready_to_push"
) {
let mut evidence = evaluation.wrap_up.evidence.clone();
evidence.push(format!(
"Context health recommends `{}` (band `{}`).",
context_health.recommendation, context_health.band
));
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::WrapUp,
"Wrap up now; CCD sees a clean session boundary for close-out work.",
evidence,
);
}
let mut evidence = evaluation.next_step.evidence.clone();
if evidence.is_empty() {
evidence.push(format!(
"Context health recommends `{}` (band `{}`).",
context_health.recommendation, context_health.band
));
}
append_recovery_note(&mut evidence, recovery_note.as_deref());
SessionBoundaryRecommendation::new(
SessionBoundaryAction::Continue,
"Continue the current session; CCD does not yet see a stronger boundary action.",
evidence,
)
}
fn append_recovery_note(evidence: &mut Vec<String>, note: Option<&str>) {
if let Some(note) = note {
evidence.push(note.to_owned());
}
}
fn build_approval_steps(
evaluation: &RadarEvaluation,
candidates: &CandidateUpdates,
workflow_guidance: Option<&crate::extensions::RadarWorkflowGuidance>,
behavioral_drift: &BehavioralDriftState,
) -> Vec<ApprovalStep> {
let mut steps = vec![ApprovalStep {
id: "next_step",
question: "Did this session change what should be next, or block something?",
recommended_answer: if evaluation.next_step.status == "review_required" {
"yes"
} else {
"no"
},
recommendation: evaluation.next_step.summary.clone(),
evidence: evaluation.next_step.evidence.clone(),
}];
if let Some(workflow_guidance) = workflow_guidance {
steps.extend(
workflow_guidance
.approval_steps
.iter()
.map(|step| ApprovalStep {
id: step.id,
question: step.question,
recommended_answer: step.recommended_answer,
recommendation: step.recommendation.clone(),
evidence: step.evidence.clone(),
}),
);
}
if behavioral_drift.status == DriftAggregateStatus::NeedsRecalibration {
steps.push(ApprovalStep {
id: "recalibrate",
question:
"Recent behavior drifted from CCD expectations. Do you want me to recalibrate the handoff or focus before wrap-up?",
recommended_answer: "yes",
recommendation: behavioral_drift.summary.clone(),
evidence: behavioral_drift.evidence.clone(),
});
}
if !candidates.memory.is_empty() {
steps.push(ApprovalStep {
id: "memory",
question:
"These are things worth persisting in MEMORY.md. Do you want me to persist them?",
recommended_answer: "yes",
recommendation: evaluation.memory.summary.clone(),
evidence: candidates
.memory
.iter()
.map(|candidate| candidate.summary.clone())
.collect(),
});
}
steps.push(ApprovalStep {
id: "wrap_up",
question: "Wrap up now? I can validate, show git status/diff, stage explicit paths, commit after approval, and push after approval.",
recommended_answer: match evaluation.wrap_up.status {
"ready_to_commit" | "ready_to_push" => "yes",
_ => "no",
},
recommendation: evaluation.wrap_up.summary.clone(),
evidence: evaluation.wrap_up.evidence.clone(),
});
steps
}
fn wrap_up_evidence(git: &GitState) -> Vec<String> {
let mut evidence = Vec::new();
if !git.staged_files.is_empty() {
evidence.push(format!("Staged files: {}", git.staged_files.join(", ")));
}
if !git.unstaged_files.is_empty() {
evidence.push(format!("Unstaged files: {}", git.unstaged_files.join(", ")));
}
if !git.untracked_files.is_empty() {
evidence.push(format!(
"Untracked files: {}",
git.untracked_files.join(", ")
));
}
evidence
}
fn optional_env_bool(name: &str) -> Option<bool> {
let value = std::env::var(name).ok()?;
match value.trim().to_ascii_lowercase().as_str() {
"1" | "true" | "yes" => Some(true),
"0" | "false" | "no" => Some(false),
_ => None,
}
}
fn optional_env_u8(name: &str) -> Option<u8> {
std::env::var(name)
.ok()
.and_then(|value| value.trim().parse::<u8>().ok())
}
fn optional_env_u64(name: &str) -> Option<u64> {
std::env::var(name)
.ok()
.and_then(|value| value.trim().parse::<u64>().ok())
}
fn optional_env_list(name: &str) -> Vec<String> {
std::env::var(name)
.ok()
.map(|value| {
value
.split(',')
.map(str::trim)
.filter(|entry| !entry.is_empty())
.map(str::to_owned)
.collect()
})
.unwrap_or_default()
}
pub(crate) fn apply_delta_filter(
mut value: serde_json::Value,
baseline: &compiled_state::ProjectionDigests,
current: &compiled_state::ProjectionDigests,
) -> serde_json::Value {
let pairs: &[(&str, &str, &str)] = &[
("handoff", &baseline.handoff, ¤t.handoff),
("effective_memory", &baseline.effective_memory, ¤t.effective_memory),
("workflow_signal", &baseline.workflow_signal, ¤t.workflow_signal),
("execution_gates", &baseline.execution_gates, ¤t.execution_gates),
("escalation", &baseline.escalation, ¤t.escalation),
("recovery", &baseline.recovery, ¤t.recovery),
("git", &baseline.git_state, ¤t.git_state),
("session_state", &baseline.session_state, ¤t.session_state),
];
if let Some(obj) = value.as_object_mut() {
for &(field, base_digest, curr_digest) in pairs {
if !base_digest.is_empty() && base_digest == curr_digest {
let is_present_and_non_null = obj
.get(field)
.is_some_and(|v| !v.is_null());
if is_present_and_non_null {
obj.insert(
field.to_owned(),
serde_json::json!({
"status": "unchanged",
"digest": curr_digest
}),
);
}
}
}
}
value
}
#[derive(Deserialize)]
pub(crate) struct CommitPayload {
#[serde(default)]
pub(crate) handoff: Option<crate::handoff::write::HandoffWriteInput>,
#[serde(default)]
pub(crate) recovery: Option<crate::recovery::RecoveryWriteInput>,
#[serde(default)]
pub(crate) clear_session: bool,
}
#[derive(Serialize)]
pub(crate) struct CommitResult {
pub(crate) ok: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) handoff: Option<CommitWriteOutcome>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) recovery: Option<CommitWriteOutcome>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) session_cleared: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) error: Option<String>,
}
#[derive(Serialize)]
pub(crate) struct CommitWriteOutcome {
pub(crate) ok: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) error: Option<String>,
}
pub(crate) fn commit_writes(
repo_root: &Path,
explicit_profile: Option<&str>,
payload: CommitPayload,
) -> CommitResult {
let write_options = crate::state::protected_write::ExclusiveWriteOptions::default();
let mut result = CommitResult {
ok: true,
handoff: None,
recovery: None,
session_cleared: None,
error: None,
};
if let Some(handoff_input) = payload.handoff {
match crate::handoff::write::run_with_input(
repo_root,
explicit_profile,
write_options.clone(),
handoff_input,
) {
Ok(_) => {
result.handoff = Some(CommitWriteOutcome {
ok: true,
error: None,
});
}
Err(e) => {
result.ok = false;
result.handoff = Some(CommitWriteOutcome {
ok: false,
error: Some(format!("{e:#}")),
});
result.error = Some(format!("handoff write failed: {e:#}"));
return result;
}
}
}
if let Some(recovery_input) = payload.recovery {
match crate::recovery::write_with_input(
repo_root,
explicit_profile,
recovery_input,
write_options,
) {
Ok(_) => {
result.recovery = Some(CommitWriteOutcome {
ok: true,
error: None,
});
}
Err(e) => {
result.ok = false;
result.recovery = Some(CommitWriteOutcome {
ok: false,
error: Some(format!("{e:#}")),
});
result.error = Some(format!("recovery write failed: {e:#}"));
return result;
}
}
}
if payload.clear_session {
let locality_id = crate::repo::marker::load(repo_root)
.ok()
.flatten()
.map(|m| m.locality_id);
match crate::state::session::clear(
repo_root,
explicit_profile,
locality_id.as_deref(),
crate::state::session::SessionClearOptions::default(),
) {
Ok(_) => {
result.session_cleared = Some(true);
}
Err(e) => {
result.ok = false;
result.session_cleared = Some(false);
result.error = Some(format!("session clear failed: {e:#}"));
return result;
}
}
}
result
}
#[cfg(test)]
mod tests {
use super::session_state_status;
use crate::state::session::{SessionMode, SessionOwnerKind, SessionStateFile};
#[test]
fn session_state_status_treats_missing_session_id_as_stale_without_clock() {
let state = SessionStateFile {
schema_version: 3,
started_at_epoch_s: 10,
last_started_at_epoch_s: 10,
start_count: 1,
session_id: None,
mode: SessionMode::Research,
owner_kind: SessionOwnerKind::Interactive,
owner_id: None,
supervisor_id: None,
lease_ttl_secs: None,
last_heartbeat_at_epoch_s: None,
revision: 0,
};
assert_eq!(session_state_status(&state, None), "stale");
}
}