use std::fs;
use std::path::{Path, PathBuf};
use std::process::ExitCode;
use anyhow::{Context, Result, bail};
use serde::Serialize;
use crate::content_trust::ContentTrust;
use crate::extensions;
use crate::extensions::backlog_state;
use crate::handoff::{self, BranchMode, CheckoutContinuityAdvisory};
use crate::memory::{
entries as memory_entries, pod_migration, provider as memory_provider, recall as memory_recall,
};
use crate::output::{self, CommandReport, WorkStreamView};
use crate::paths::git as git_paths;
use crate::paths::state::StateLayout;
use crate::profile;
use crate::repo::marker as repo_marker;
use crate::repo::registry as repo_registry;
use crate::repo::truth as project_truth;
use crate::session_boundary::{SessionBoundaryAction, SessionBoundaryRecommendation};
use crate::state::compiled as compiled_state;
use crate::state::config_migration;
use crate::state::escalation as escalation_state;
use crate::state::issue_refs;
use crate::state::machine_presence;
use crate::state::pod_identity;
use crate::state::policy_projection;
use crate::state::runtime as runtime_state;
use crate::state::session as session_state;
use crate::state::session_gates;
use crate::state::validation_profile;
use tracing::{debug, info_span};
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum StartupDisposition {
ResumeActiveContinuity,
NeedsContinuityInput,
NoActiveContinuity,
WorkflowAttentionRequired,
ResumeBlockedContinuity,
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub(crate) enum StartRenderSection {
EffectivePolicy,
EffectiveMemory,
Backlog,
ExecutionGates,
Handoff,
}
impl StartRenderSection {
pub(crate) fn as_str(self) -> &'static str {
match self {
Self::EffectivePolicy => "effective_policy",
Self::EffectiveMemory => "effective_memory",
Self::Backlog => "backlog",
Self::ExecutionGates => "execution_gates",
Self::Handoff => "handoff",
}
}
}
pub(crate) const START_RENDER_SECTION_ORDER: &[StartRenderSection] = &[
StartRenderSection::EffectivePolicy,
StartRenderSection::EffectiveMemory,
StartRenderSection::Backlog,
StartRenderSection::ExecutionGates,
StartRenderSection::Handoff,
];
#[derive(Serialize)]
pub struct StartReport {
command: &'static str,
ok: bool,
disposition: StartupDisposition,
session_boundary: SessionBoundaryRecommendation,
path: String,
profile: String,
project_id: String,
workspace_path: String,
work_stream: WorkStreamView,
locality_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
session_id: Option<String>,
compact_summary: CompactSummary,
source_order: &'static str,
manifest: ManifestView,
pod_identity: pod_identity::PodIdentityView,
machine_identity: pod_identity::MachineIdentityView,
machine_presence: machine_presence::MachinePresenceView,
execution_context: machine_presence::MachineExecutionContextView,
takeover_preconditions: machine_presence::MachineTakeoverPreconditionsView,
coordination_scope: pod_identity::CoordinationScopeView,
sources: Vec<SourceView>,
effective_policy: RenderedView,
policy_projection: policy_projection::PolicyProjectionView,
effective_memory: MemoryView,
memory_recall: memory_recall::StartRecallView,
memory_provider: memory_provider::MemoryProviderView,
startup_recall: memory_provider::StartupRecallView,
backlog: backlog_state::GitHubBacklogCacheView,
extension_dispatch: extensions::dispatch::ExtensionStartupPayload,
execution_gates: session_gates::ExecutionGatesView,
handoff: HandoffView,
recovery: runtime_state::RuntimeRecoveryView,
session_state: SessionStateView,
#[serde(skip_serializing_if = "Option::is_none")]
activation: Option<session_state::SessionStateStartReport>,
escalation: escalation_state::EscalationView,
compiled_state: compiled_state::CompiledStateSurfaceView,
alerts: Vec<StartAlert>,
#[serde(skip_serializing_if = "Vec::is_empty")]
refresh_actions: Vec<RefreshAction>,
warnings: Vec<String>,
}
const START_CHECK_EXIT_NOT_READY: u8 = 3;
const START_CHECK_EXIT_BLOCKED_ESCALATION: u8 = 2;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum StartCheckReadiness {
Ready,
Blocked,
NotReady,
}
#[derive(Serialize)]
pub struct StartCheckReport {
command: &'static str,
ok: bool,
disposition: StartupDisposition,
path: String,
profile: String,
project_id: String,
workspace_path: String,
work_stream: WorkStreamView,
locality_id: String,
mode: &'static str,
readiness: StartCheckReadiness,
escalation: escalation_state::EscalationView,
alerts: Vec<StartAlert>,
#[serde(skip_serializing_if = "Vec::is_empty")]
refresh_actions: Vec<RefreshAction>,
warnings: Vec<String>,
}
#[derive(Serialize)]
struct StartReadinessView {
status: StartReadinessStatus,
exit_code: u8,
note: &'static str,
}
#[derive(Clone, Copy, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
enum StartReadinessStatus {
Ready,
Blocked,
NotReady,
}
#[derive(Serialize)]
struct ManifestView {
path: String,
status: &'static str,
entries: Vec<String>,
}
#[derive(Serialize)]
struct SourceView {
kind: &'static str,
path: String,
status: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<String>,
}
#[derive(Serialize)]
struct RenderedView {
status: &'static str,
content: String,
parts: Vec<RenderedPart>,
}
#[derive(Serialize)]
struct MemoryView {
status: &'static str,
content: String,
parts: Vec<RenderedPart>,
structured: memory_entries::StructuredMemoryView,
}
#[derive(Serialize)]
struct RenderedPart {
kind: &'static str,
path: String,
status: &'static str,
}
struct EffectiveMemorySurfaces<'a> {
profile: &'a runtime_state::RuntimeTextSurface,
locality: &'a runtime_state::RuntimeTextSurface,
pod: &'a runtime_state::RuntimeTextSurface,
branch: &'a runtime_state::RuntimeTextSurface,
clone: &'a runtime_state::RuntimeTextSurface,
}
#[derive(Serialize)]
struct HandoffView {
path: String,
status: &'static str,
content: String,
}
#[derive(Serialize)]
struct SessionStateView {
path: String,
status: &'static str,
schema_version: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
session_id: Option<String>,
started_at_epoch_s: Option<u64>,
last_started_at_epoch_s: Option<u64>,
start_count: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
mode: Option<session_state::SessionMode>,
#[serde(flatten)]
lifecycle: session_state::SessionLifecycleProjection,
}
#[derive(Clone, Serialize)]
struct StartAlert {
check: &'static str,
severity: StartAlertSeverity,
message: String,
#[serde(skip_serializing_if = "Option::is_none")]
details: Option<serde_json::Value>,
}
impl StartAlert {
fn warning(check: &'static str, message: String) -> Self {
Self {
check,
severity: StartAlertSeverity::Warning,
message,
details: None,
}
}
fn error(check: &'static str, message: String) -> Self {
Self {
check,
severity: StartAlertSeverity::Error,
message,
details: None,
}
}
}
#[derive(Clone, Copy, Serialize)]
#[serde(rename_all = "snake_case")]
enum StartAlertSeverity {
Info,
Warning,
Error,
}
#[derive(Clone, Serialize)]
pub struct RefreshAction {
target: &'static str,
status: &'static str,
message: String,
#[serde(skip)]
cached_git: Option<handoff::GitState>,
}
#[derive(Serialize)]
struct CompactSummary {
current_state: CompactCurrentState,
next_focus: CompactNextFocus,
active_guardrails: CompactGuardrails,
}
#[derive(Serialize)]
struct CompactCurrentState {
source: &'static str,
title: String,
current_system_state: Vec<String>,
}
#[derive(Serialize)]
struct CompactNextFocus {
continuity_immediate_actions: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
execution_gate_anchor: Option<String>,
backlog: CompactBacklogSummary,
}
#[derive(Serialize)]
struct CompactBacklogSummary {
status: CompactBacklogStatus,
#[serde(skip_serializing_if = "Option::is_none")]
content_trust: Option<ContentTrust>,
#[serde(skip_serializing_if = "Option::is_none")]
repo: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
fetched_at_epoch_s: Option<u64>,
active_items: Vec<backlog_state::GitHubBacklogSummaryItem>,
}
#[derive(Clone, Copy, Serialize)]
#[serde(rename_all = "snake_case")]
enum CompactBacklogStatus {
Missing,
Empty,
Loaded,
Stale,
}
#[derive(Serialize)]
struct CompactGuardrails {
operational_guardrails: Vec<String>,
effective_memory: compiled_state::CompiledMemoryView,
}
struct ManifestResolution {
source_order: &'static str,
manifest: ManifestView,
project_truth_sources: Vec<SourceView>,
}
#[derive(Clone)]
struct TextSource {
kind: &'static str,
path: PathBuf,
status: &'static str,
content: Option<String>,
}
impl StartReport {
pub(crate) fn with_activation(
mut self,
activation: session_state::SessionStateStartReport,
) -> Self {
self.activation = Some(activation);
self
}
}
impl CommandReport for StartReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!(
"Resolved CCD session context for profile {} and linked project {}.",
self.profile, self.project_id
);
println!("Source order: {}", self.source_order);
println!(
"Manifest: {} ({})",
self.manifest.path, self.manifest.status
);
println!("Pod identity: {}", self.pod_identity.status);
println!("Machine identity: {}", self.machine_identity.status);
println!("Machine presence: {}", self.machine_presence.status);
println!("Execution context: {}", self.execution_context.status);
println!(
"Takeover preconditions: {}",
self.takeover_preconditions.status
);
println!("Coordination scope: {}", self.coordination_scope.status);
println!("Handoff: {} ({})", self.handoff.path, self.handoff.status);
println!(
"Session state: {} ({})",
self.session_state.path, self.session_state.status
);
if let Some(activation) = &self.activation {
if let Some(warning) = activation.warning_message() {
println!("Activation warning: {warning}");
}
println!("Activation: {}", activation.summary_line());
}
println!(
"Memory provider: {} ({}, health {}, start-recall {})",
self.memory_provider.effective_recall_provider.name,
self.memory_provider.status,
self.memory_provider.health.status,
self.memory_provider.start_recall_policy.mode
);
println!(
"Startup recall: {} ({} result(s))",
self.startup_recall.status,
self.startup_recall.results.len()
);
if let Some(message) = &self.startup_recall.message {
println!("- {message}");
}
render_memory_recall_section(&self.memory_recall);
println!(
"Session boundary: {} ({})",
self.session_boundary.action.as_str(),
self.session_boundary.summary
);
for evidence in &self.session_boundary.evidence {
println!("- {evidence}");
}
println!(
"Compiled state: {} ({}, target: {})",
self.compiled_state.path, self.compiled_state.status, self.compiled_state.target
);
if self.backlog.status != "missing" {
println!(
"Backlog cache: {} ({})",
self.backlog.path, self.backlog.status
);
}
println!(
"Escalation: {} ({} blocking, {} non-blocking, status: {}).",
self.escalation.path,
self.escalation.blocking_count,
self.escalation.non_blocking_count,
self.escalation.status
);
if self.escalation.blocking_count > 0 {
println!("Blocking escalations:");
for entry in
self.escalation.entries.iter().filter(|entry| {
matches!(entry.kind, escalation_state::EscalationKind::Blocking)
})
{
println!("- {}: {}", entry.id, entry.reason);
}
}
if !self.refresh_actions.is_empty() {
println!("Refresh actions:");
for action in &self.refresh_actions {
println!("- {} [{}] {}", action.target, action.status, action.message);
}
}
for alert in &self.alerts {
if render_start_alert(alert) {
continue;
}
println!("{}: {}", alert.severity.title_label(), alert.message);
}
if !self.sources.is_empty() {
println!();
println!("Sources:");
for source in &self.sources {
println!("- {} [{}] {}", source.kind, source.status, source.path);
}
}
for section in START_RENDER_SECTION_ORDER {
match section {
StartRenderSection::EffectivePolicy => {
render_section("Effective Policy", &self.effective_policy.content)
}
StartRenderSection::EffectiveMemory => {
render_section("Effective Memory", &self.effective_memory.content)
}
StartRenderSection::Backlog => render_backlog_section(&self.backlog),
StartRenderSection::ExecutionGates => {
render_execution_gates_section(&self.execution_gates)
}
StartRenderSection::Handoff => render_section("Handoff", &self.handoff.content),
}
}
}
}
impl CommandReport for StartCheckReport {
fn exit_code(&self) -> ExitCode {
ExitCode::from(self.readiness.exit_code())
}
fn render_text(&self) {
println!(
"Start readiness is {} for profile {} and linked project {}.",
self.readiness.status_label(),
self.profile,
self.project_id
);
println!("{}", self.readiness.note());
println!(
"Exit codes: 0 ready, {START_CHECK_EXIT_BLOCKED_ESCALATION} blocked, {START_CHECK_EXIT_NOT_READY} not ready, 1 command error."
);
if !self.refresh_actions.is_empty() {
println!("Refresh actions:");
for action in &self.refresh_actions {
println!("- {} [{}] {}", action.target, action.status, action.message);
}
}
if self.alerts.is_empty() {
println!("No readiness alerts are active.");
if self.readiness.status() == StartReadinessStatus::Blocked {
println!(
"Blocking escalations: {} blocking, {} non-blocking at {}.",
self.escalation.blocking_count,
self.escalation.non_blocking_count,
self.escalation.path
);
for entry in self.escalation.entries.iter().filter(|entry| {
matches!(entry.kind, escalation_state::EscalationKind::Blocking)
}) {
println!("- {}: {}", entry.id, entry.reason);
}
}
return;
}
if self.readiness.status() == StartReadinessStatus::Blocked {
println!(
"Blocking escalations: {} blocking, {} non-blocking at {}.",
self.escalation.blocking_count,
self.escalation.non_blocking_count,
self.escalation.path
);
for entry in
self.escalation.entries.iter().filter(|entry| {
matches!(entry.kind, escalation_state::EscalationKind::Blocking)
})
{
println!("- {}: {}", entry.id, entry.reason);
}
}
let blocking_alerts: Vec<_> = self
.alerts
.iter()
.filter(|a| !matches!(a.severity, StartAlertSeverity::Info))
.collect();
if !blocking_alerts.is_empty() {
println!("Blocking alerts (warnings also block in `--check`):");
for alert in &blocking_alerts {
println!(
"- [{}] {}: {}",
alert.severity.upper_label(),
alert.check,
alert.message
);
}
}
let info_alerts: Vec<_> = self
.alerts
.iter()
.filter(|a| matches!(a.severity, StartAlertSeverity::Info))
.collect();
if !info_alerts.is_empty() {
println!("Informational:");
for alert in &info_alerts {
println!(
"- [{}] {}: {}",
alert.severity.upper_label(),
alert.check,
alert.message
);
}
}
}
}
impl StartCheckReadiness {
fn ok(self) -> bool {
matches!(self, Self::Ready)
}
fn exit_code(self) -> u8 {
match self {
Self::Ready => 0,
Self::Blocked => START_CHECK_EXIT_BLOCKED_ESCALATION,
Self::NotReady => START_CHECK_EXIT_NOT_READY,
}
}
fn note(self) -> &'static str {
match self {
Self::Ready => {
"No start alerts are active; the workspace-local session context is ready to load."
}
Self::Blocked => "Unresolved blocking escalation prevents readiness.",
Self::NotReady => {
"One or more start alerts are active; `start --check` is intentionally fail-closed, so warnings also block until the workspace state is reconciled."
}
}
}
fn status(self) -> StartReadinessStatus {
match self {
Self::Ready => StartReadinessStatus::Ready,
Self::Blocked => StartReadinessStatus::Blocked,
Self::NotReady => StartReadinessStatus::NotReady,
}
}
fn status_label(self) -> &'static str {
match self {
Self::Ready => "ready",
Self::Blocked => "blocked",
Self::NotReady => "not_ready",
}
}
fn view(self) -> StartReadinessView {
StartReadinessView {
status: self.status(),
exit_code: self.exit_code(),
note: self.note(),
}
}
}
impl Serialize for StartCheckReadiness {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.view().serialize(serializer)
}
}
impl StartAlertSeverity {
fn title_label(self) -> &'static str {
match self {
Self::Info => "Info",
Self::Warning => "Warning",
Self::Error => "Error",
}
}
fn upper_label(self) -> &'static str {
match self {
Self::Info => "INFO",
Self::Warning => "WARN",
Self::Error => "ERROR",
}
}
}
fn render_start_alert(alert: &StartAlert) -> bool {
if alert.check != "codemap_db" || !matches!(alert.severity, StartAlertSeverity::Info) {
return false;
}
if let Some(entry_count) = alert
.details
.as_ref()
.and_then(|details| details.get("entry_count"))
.and_then(serde_json::Value::as_u64)
{
let entry_label = if entry_count == 1 { "entry" } else { "entries" };
println!("Codemap: structural code map available ({entry_count} {entry_label}).");
} else {
println!("Codemap: {}", alert.message);
}
println!(
"Read project truth first, then use `ccd codemap query --prefix <path>` to narrow unfamiliar areas before opening more files."
);
true
}
fn render_memory_recall_section(view: &memory_recall::StartRecallView) {
if matches!(view.status, "missing" | "skipped") {
return;
}
println!("Memory recall: {}", view.status);
if let (Some(provider), Some(kind)) = (
view.configured_provider.as_deref(),
view.configured_provider_kind.as_deref(),
) {
println!("- configured provider: {provider} ({kind})");
}
if let (Some(provider), Some(kind)) = (
view.used_provider.as_deref(),
view.used_provider_kind.as_deref(),
) {
println!("- used provider: {provider} ({kind})");
}
println!(
"- budget: search={}, describe={}, expand={}",
view.budget.search_limit, view.budget.describe_limit, view.budget.expand_limit
);
println!(
"- results: search={}, describe={}, expand={}",
view.search_results.len(),
view.described_results.len(),
view.expanded_results.len()
);
if let Some(error) = view.error.as_deref() {
println!("- error: {error}");
}
for warning in &view.warnings {
println!("- warning: {warning}");
}
}
pub fn run(repo_root: &Path, explicit_profile: Option<&str>, refresh: bool) -> Result<StartReport> {
let _span = info_span!("start").entered();
let core = resolve_core(repo_root, explicit_profile, refresh)?;
let disposition = derive_disposition(&core);
debug!(?disposition, "start disposition determined");
let compact_summary = build_compact_summary(&core);
let session_boundary = build_session_boundary(&core, disposition);
let _ = config_migration::migrate_repo_overlay_if_needed(&core.layout, &core.locality_id);
let manifest = resolve_manifest(repo_root, &core.layout, &core.locality_id)?;
let pod_manifest = core
.active_pod_identity
.as_ref()
.map(|identity| read_text_source("pod_manifest", &identity.manifest_path))
.transpose()?;
let policy_sources = policy_projection::read_policy_sources(
&core.layout,
&core.locality_id,
core.active_pod_identity.as_ref(),
)?;
let effective_policy = render_effective_policy_view(&policy_sources);
let policy_projection = policy_projection::build_view(
&policy_sources,
policy_projection::PolicySessionContext::from_start_view(
core.loaded_session.view.mode,
&core.loaded_session.view.lifecycle,
),
&core.escalation,
);
let effective_memory = render_effective_memory_view(
EffectiveMemorySurfaces {
profile: &core.sources.profile_memory,
locality: &core.sources.locality_memory,
pod: &core.sources.pod_memory,
branch: &core.sources.branch_memory,
clone: &core.sources.clone_memory,
},
runtime_state::structured_memory_view(&core.sources),
core.compiled_state.store.pod_identity_active,
&core.compiled_state.store.effective_memory,
);
let handoff_view = into_compiled_handoff_view(
&core.sources.handoff,
compiled_state::render_handoff_content(&core.compiled_state.store.handoff),
);
let mut sources = manifest.project_truth_sources;
if let Some(source) = pod_manifest {
sources.push(into_source_view(source));
}
sources.push(runtime_source_view(&core.sources.profile_memory));
sources.push(runtime_source_view(&core.sources.locality_memory));
sources.push(runtime_source_view(&core.sources.pod_memory));
sources.push(runtime_source_view(&core.sources.branch_memory));
sources.push(runtime_source_view(&core.sources.clone_memory));
if core.validation_profile.status != "missing" {
sources.push(validation_profile_source_view(&core.validation_profile));
}
sources.push(runtime_source_view(&core.sources.handoff));
sources.push(execution_gates_source_view(
&core.execution_gates.view,
core.execution_gates.raw.as_ref(),
)?);
sources.push(session_state_source_view(
&core.loaded_session.view,
core.loaded_session.raw.as_ref(),
)?);
sources.push(SourceView {
kind: "escalation_state",
path: core.escalation.path.clone(),
status: core.escalation.status,
content: None,
});
sources.extend(backlog_source_views(&core.backlog));
sources.push(SourceView {
kind: "compiled_state",
path: core.compiled_state.surface.path.clone(),
status: core.compiled_state.surface.status,
content: None,
});
Ok(StartReport {
command: "start",
ok: true,
disposition,
session_boundary,
path: repo_root.display().to_string(),
profile: core.profile,
project_id: core.locality_id.clone(),
workspace_path: repo_root.display().to_string(),
work_stream: output::work_stream_view(core.git.as_ref()),
locality_id: core.locality_id,
session_id: core.session_id,
compact_summary,
source_order: manifest.source_order,
manifest: manifest.manifest,
pod_identity: core.pod_identity,
machine_identity: core.machine_identity,
machine_presence: core.machine_presence,
execution_context: core.execution_context,
takeover_preconditions: core.takeover_preconditions,
coordination_scope: core.coordination_scope,
sources,
effective_policy,
policy_projection,
effective_memory,
memory_recall: core.memory_recall,
memory_provider: core.memory_provider,
startup_recall: core.startup_recall,
backlog: core.backlog,
extension_dispatch: core.extension_dispatch,
execution_gates: core.execution_gates.view,
handoff: handoff_view,
recovery: runtime_state::recovery_view(&core.recovery),
session_state: core.loaded_session.view,
activation: None,
escalation: core.escalation,
compiled_state: core.compiled_state.surface,
alerts: core.alerts,
refresh_actions: core.refresh_actions,
warnings: core.warnings,
})
}
pub fn run_check(
repo_root: &Path,
explicit_profile: Option<&str>,
refresh: bool,
) -> Result<StartCheckReport> {
let core = resolve_core(repo_root, explicit_profile, refresh)?;
let disposition = derive_disposition(&core);
let readiness = start_readiness_view(&core.alerts);
Ok(StartCheckReport {
command: "start",
ok: readiness.ok(),
disposition,
path: repo_root.display().to_string(),
profile: core.profile,
project_id: core.locality_id.clone(),
workspace_path: repo_root.display().to_string(),
work_stream: output::work_stream_view(core.git.as_ref()),
locality_id: core.locality_id,
mode: "check",
readiness,
escalation: core.escalation,
alerts: core.alerts,
refresh_actions: core.refresh_actions,
warnings: core.warnings,
})
}
pub fn run_compiled_only(
repo_root: &Path,
explicit_profile: Option<&str>,
refresh: bool,
) -> Result<StartReport> {
let core = resolve_core(repo_root, explicit_profile, refresh)?;
let disposition = derive_disposition(&core);
let compact_summary = build_compact_summary(&core);
let session_boundary = build_session_boundary(&core, disposition);
Ok(StartReport {
command: "start",
ok: true,
disposition,
session_boundary,
path: repo_root.display().to_string(),
profile: core.profile,
project_id: core.locality_id.clone(),
workspace_path: repo_root.display().to_string(),
work_stream: output::work_stream_view(core.git.as_ref()),
locality_id: core.locality_id,
session_id: core.session_id,
compact_summary,
source_order: "manifest",
manifest: ManifestView {
path: String::new(),
status: "skipped",
entries: Vec::new(),
},
pod_identity: core.pod_identity,
machine_identity: core.machine_identity,
machine_presence: core.machine_presence,
execution_context: core.execution_context,
takeover_preconditions: core.takeover_preconditions,
coordination_scope: core.coordination_scope,
sources: Vec::new(),
effective_policy: RenderedView {
status: "skipped",
content: String::new(),
parts: Vec::new(),
},
policy_projection: policy_projection::skipped_view(),
effective_memory: MemoryView {
status: "skipped",
content: String::new(),
parts: Vec::new(),
structured: memory_entries::StructuredMemoryView {
status: "skipped",
profile_entries: Vec::new(),
repo_entries: Vec::new(),
pod_entries: Vec::new(),
branch_entries: Vec::new(),
clone_entries: Vec::new(),
diagnostics: Vec::new(),
},
},
memory_recall: memory_recall::StartRecallView::skipped(),
memory_provider: core.memory_provider,
startup_recall: core.startup_recall,
backlog: core.backlog,
extension_dispatch: core.extension_dispatch,
execution_gates: core.execution_gates.view,
handoff: HandoffView {
path: String::new(),
status: "skipped",
content: String::new(),
},
recovery: runtime_state::recovery_view(&core.recovery),
session_state: core.loaded_session.view,
activation: None,
escalation: core.escalation,
compiled_state: core.compiled_state.surface,
alerts: core.alerts,
refresh_actions: core.refresh_actions,
warnings: core.warnings,
})
}
struct ResolvedCore {
profile: String,
locality_id: String,
layout: StateLayout,
git: Option<handoff::GitState>,
sources: runtime_state::RuntimeSourceSurfaces,
recovery: runtime_state::LoadedRuntimeRecoveryState,
compiled_state: compiled_state::PreparedCompiledState,
active_pod_identity: Option<pod_identity::LoadedPodIdentity>,
pod_identity: pod_identity::PodIdentityView,
machine_identity: pod_identity::MachineIdentityView,
machine_presence: machine_presence::MachinePresenceView,
execution_context: machine_presence::MachineExecutionContextView,
takeover_preconditions: machine_presence::MachineTakeoverPreconditionsView,
coordination_scope: pod_identity::CoordinationScopeView,
memory_recall: memory_recall::StartRecallView,
memory_provider: memory_provider::MemoryProviderView,
startup_recall: memory_provider::StartupRecallView,
loaded_session: LoadedSessionState,
execution_gates: LoadedExecutionGates,
validation_profile: validation_profile::LoadedValidationProfile,
backlog: backlog_state::GitHubBacklogCacheView,
assignment_outcome: extensions::dispatch::AssignmentOutcome,
extension_dispatch: extensions::dispatch::ExtensionStartupPayload,
session_id: Option<String>,
escalation: escalation_state::EscalationView,
alerts: Vec<StartAlert>,
refresh_actions: Vec<RefreshAction>,
warnings: Vec<String>,
}
fn resolve_core(
repo_root: &Path,
explicit_profile: Option<&str>,
refresh: bool,
) -> Result<ResolvedCore> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
debug!(profile = %profile, "profile resolved");
let marker = repo_marker::load(repo_root)?.ok_or_else(|| {
let attach_command = if layout.profile().as_str() == profile::DEFAULT_PROFILE {
format!("ccd attach --path {}", repo_root.display())
} else {
format!(
"ccd attach --path {} --profile {}",
repo_root.display(),
layout.profile()
)
};
let link_command = if layout.profile().as_str() == profile::DEFAULT_PROFILE {
format!("ccd link --path {}", repo_root.display())
} else {
format!(
"ccd link --path {} --profile {}",
repo_root.display(),
layout.profile()
)
};
anyhow::anyhow!(
"ccd start cannot load policy, memory, handoff, or dispatch state because this workspace is not linked: {} is missing. Run `{attach_command}` to bootstrap a new project overlay, or `{link_command}` to reconnect to an existing one.",
repo_root.join(repo_marker::MARKER_FILE).display(),
)
})?;
let locality_id = marker.locality_id;
ensure_repo_registry_exists(&layout, &locality_id)?;
debug!(locality_id = %locality_id, "repo linked");
let clone_profile_root = layout.clone_profile_root();
fs::create_dir_all(&clone_profile_root).with_context(|| {
format!(
"failed to create directory {}",
clone_profile_root.display()
)
})?;
let session_id = session_state::load_session_id(&layout)?;
let mut refresh_actions = if refresh {
refresh_before_resolve(repo_root, &layout)
} else {
Vec::new()
};
let git = if layout.resolved_substrate().is_git() {
Some(
refresh_actions
.iter_mut()
.find_map(|action| action.cached_git.take())
.map(Ok)
.unwrap_or_else(|| {
handoff::read_git_state(repo_root, BranchMode::AllowDetachedHead)
})?,
)
} else {
None
};
let checkout_continuity_advisory = git
.as_ref()
.and_then(|git| handoff::checkout_continuity_advisory(repo_root, git));
let raw_backlog_cache = extensions::load_work_queue_snapshot(&layout)?;
let backlog_cache_status =
backlog_cache_status_for_start(raw_backlog_cache.as_ref(), &refresh_actions)?;
let startup_ctx = extensions::StartupContext {
layout: &layout,
repo_root,
locality_id: &locality_id,
allow_cached_work: backlog_cache_status == "loaded",
raw_backlog_cache: raw_backlog_cache.as_ref(),
};
let pre_assignment_snapshot = match &session_id {
Some(sid) => extensions::dispatch::load_session_assignment(&startup_ctx, sid)?,
None => match git.as_ref() {
Some(git) => extensions::dispatch::load_branch_assignment(&startup_ctx, &git.branch)?,
None => None,
},
};
let worktree = repo_root
.file_name()
.map(|v| v.to_string_lossy().into_owned())
.unwrap_or_else(|| repo_root.display().to_string());
let assignment_outcome = match &session_id {
Some(sid) => {
let branch_opt = git
.as_ref()
.and_then(|git| (git.branch != "HEAD").then_some(git.branch.as_str()));
extensions::dispatch::ensure_assignment(
&startup_ctx,
extensions::dispatch::AssignmentOwner::Session {
session_id: sid,
worktree: &worktree,
branch: branch_opt,
},
)?
}
None => match (&checkout_continuity_advisory, git.as_ref()) {
(Some(CheckoutContinuityAdvisory::LandedBranch { trunk }), Some(git)) => {
extensions::dispatch::AssignmentOutcome {
status: extensions::dispatch::AssignmentStatus::Skipped,
reason: Some(format!(
"branch `{}` is already landed on local `origin/{trunk}`; \
local redispatch is skipped",
git.branch
)),
next_step: None,
assignment: None,
}
}
(_, Some(git)) => extensions::dispatch::ensure_assignment(
&startup_ctx,
extensions::dispatch::AssignmentOwner::PreSessionBranch {
worktree: &worktree,
branch: &git.branch,
},
)?,
(_, None) => extensions::dispatch::AssignmentOutcome {
status: extensions::dispatch::AssignmentStatus::Skipped,
reason: Some(
"directory substrate has no pre-session work stream identity; choose the next item explicitly or start a session first"
.to_owned(),
),
next_step: None,
assignment: None,
},
},
};
let extension_dispatch =
extensions::dispatch::build_startup_payload(&startup_ctx, &assignment_outcome)?;
let active_pod_identity = pod_identity::resolve_active_identity(&layout)?;
let pod_memory_binding = pod_identity::resolve_pod_memory_binding(&layout, &locality_id)?;
let pod_identity_view = pod_identity::resolve_pod_identity_view(&layout)?;
let machine_identity_view = pod_identity::resolve_machine_identity_view(&layout)?;
let coordination_scope = pod_identity::resolve_coordination_scope_view(&layout, &locality_id)?;
let raw = runtime_state::load_raw_start_runtime_sources(
repo_root,
&layout,
&locality_id,
pod_memory_binding
.as_ref()
.map(|binding| binding.name.as_str()),
active_pod_identity.is_some(),
)?;
debug!("runtime state loaded");
let validation_profile = validation_profile::load_for_layout(&layout, &locality_id)?;
let loaded_session = load_session_state(&layout)?;
let execution_gates = load_execution_gates(&layout)?;
let escalation_entries = escalation_state::load_for_layout(&layout)?;
let escalation = escalation_state::build_view(&layout, &escalation_entries);
let machine_presence = machine_presence::resolve_machine_presence_view(&layout)?;
let execution_context = machine_presence::build_execution_context_view(
&layout,
Some(&locality_id),
&machine_identity_view,
&machine_presence,
Some(&loaded_session.view.lifecycle),
);
let takeover_preconditions = machine_presence::build_takeover_preconditions_view(
&execution_context,
Some(&loaded_session.view.lifecycle),
escalation.blocking_count > 0,
escalation.blocking_count,
true,
);
let memory_provider_inspection =
memory_provider::inspect_provider(repo_root, &layout, &locality_id, true)?;
let start_result = compiled_state::ensure_for_start(&layout, raw)?;
debug!("compiled state ready");
let compiled_state = start_result.compiled;
let sources = start_result.sources;
let recovery = start_result.recovery;
let mut backlog = backlog_state::load_cache_view_from(&layout, raw_backlog_cache.clone(), 12)?;
if backlog_cache_status == "stale" && backlog.status != "missing" {
backlog.status = "stale";
}
let startup_recall = match memory_provider::collect_startup_recall(
&memory_provider_inspection,
repo_root,
&layout,
&locality_id,
&compiled_state.store.handoff,
) {
Ok(view) => view,
Err(error) => memory_provider::StartupRecallView {
status: "fallback".to_owned(),
query: None,
provider: memory_provider_inspection
.view
.configured_recall_provider
.clone()
.or_else(|| {
Some(
memory_provider_inspection
.view
.effective_recall_provider
.name
.clone(),
)
}),
results: Vec::new(),
message: Some(error.to_string()),
},
};
let mut recall_setup = memory_provider::prepare_start_recall(&layout, repo_root, &locality_id);
let memory_recall = if let Some(error) = recall_setup.config_error.clone() {
memory_recall::StartRecallView {
status: "error",
configured_provider: None,
configured_provider_kind: None,
used_provider: None,
used_provider_kind: None,
fallback_used: false,
query: None,
budget: recall_setup.budget,
search_results: Vec::new(),
described_results: Vec::new(),
expanded_results: Vec::new(),
error: Some(error),
warnings: Vec::new(),
}
} else if let Some(provider) = recall_setup.provider.as_mut() {
let fallback_provider = if provider.is_builtin_markdown() {
None
} else {
Some(&mut recall_setup.fallback as &mut dyn memory_recall::RecallProvider)
};
memory_recall::run_start_recall(
provider,
fallback_provider,
memory_recall::build_start_query(
&compiled_state.store.handoff.title,
&compiled_state.store.handoff.immediate_actions,
),
recall_setup.budget,
)
} else {
memory_recall::StartRecallView::missing()
};
let mut alerts = refresh_actions
.iter()
.filter(|action| action.status == "failed")
.map(|action| StartAlert::warning(action.target, action.message.clone()))
.collect::<Vec<_>>();
for issue in &memory_provider_inspection.issues {
alerts.push(StartAlert::warning(issue.check, issue.message.clone()));
}
if startup_recall.status == "fallback" {
if let Some(message) = &startup_recall.message {
alerts.push(StartAlert::warning("memory_provider", message.clone()));
}
}
if let Some(error) = memory_recall.error.as_deref() {
alerts.push(StartAlert::warning(
"memory_provider",
format!("memory recall provider failed during start recall: {error}"),
));
}
for warning in &memory_recall.warnings {
alerts.push(StartAlert::warning("memory_provider", warning.clone()));
}
if active_pod_identity.is_some() {
let migration_preview = pod_migration::analyze(&layout, &locality_id)?;
let suggested_memory = migration_preview
.memory_items
.iter()
.filter(|item| {
item.classification == pod_migration::MigrationClassification::PodWideCandidate
})
.count();
let suggested_policy = migration_preview
.policy_items
.iter()
.filter(|item| {
item.classification == pod_migration::MigrationClassification::PodWideCandidate
})
.count();
if suggested_memory > 0 || suggested_policy > 0 {
alerts.push(StartAlert::warning(
"pod_migration",
format!(
"pod identity is active but profile defaults still look pod-wide ({} memory, {} policy); preview them with `ccd pod migrate-defaults --path {}`",
suggested_memory,
suggested_policy,
repo_root.display()
),
));
}
}
if loaded_session.view.status == "active" {
alerts.push(StartAlert::warning(
"session_state",
format!(
"active session already exists for profile `{profile}` in this clone; session-state should be refreshed or cleared deliberately"
),
));
}
if validation_profile.status == "invalid" {
alerts.push(StartAlert::error(
"repo_validation_profile",
format!(
"failed to load {}: {}",
validation_profile.path.display(),
validation_profile
.error
.clone()
.unwrap_or_else(|| "invalid validation profile".to_owned())
),
));
}
let issue_reference_report = issue_refs::resolve_issue_references(
git.as_ref().map(|git| git.branch.as_str()).unwrap_or(""),
&compiled_state.store.handoff.title,
&compiled_state.store.handoff.immediate_actions,
raw_backlog_cache.as_ref(),
);
let ref_check_assignment = assignment_outcome
.assignment
.as_ref()
.or(pre_assignment_snapshot.as_ref());
if let Some(assignment) = ref_check_assignment {
let ext_alerts = extensions::dispatch::resolve_assignment_references(
&startup_ctx,
assignment,
raw_backlog_cache.as_ref(),
)?;
for ext_alert in ext_alerts {
alerts.push(StartAlert {
check: ext_alert.check,
severity: match ext_alert.severity {
extensions::dispatch::StartupAlertSeverity::Warning => {
StartAlertSeverity::Warning
}
extensions::dispatch::StartupAlertSeverity::Error => StartAlertSeverity::Error,
},
message: ext_alert.message,
details: None,
});
}
}
if let Some(advisory) = checkout_continuity_advisory.as_ref() {
alerts.push(StartAlert::warning(
"checkout_state",
advisory.start_alert_message(git.as_ref().expect("checkout advisory requires git")),
));
}
if let (Some(git), Some(_), Some(ref assignment)) =
(git.as_ref(), &session_id, &assignment_outcome.assignment)
{
if let Some(ref recorded_branch) = assignment.branch {
if recorded_branch != &git.branch && git.branch != "HEAD" {
alerts.push(StartAlert::warning(
"session_branch_drift",
format!(
"session assignment was created on branch `{recorded_branch}` \
but current branch is `{}`",
git.branch
),
));
}
}
}
if issue_reference_report.has_closed_matches() {
alerts.push(StartAlert::warning(
"closed_issue_reference",
issue_reference_report.start_alert_message(backlog.status),
));
}
if issue_reference_report.has_out_of_queue_matches() {
alerts.push(StartAlert::warning(
"out_of_queue_continuity",
issue_reference_report.out_of_queue_start_alert_message(),
));
}
if backlog.status == "stale" {
let message = match extensions::work_queue_refresh_hint(
backlog.provider.as_deref(),
backlog.repo.as_deref(),
) {
Some(refresh_command) => format!(
"the extension-provided shared-queue snapshot at {} is stale; refresh it with `{refresh_command}` before relying on it for continuity or focus decisions",
backlog.path
),
None => format!(
"the extension-provided shared-queue snapshot at {} is stale, and this build has no registered refresh command; repair the active backlog binding before relying on it for continuity or focus decisions",
backlog.path
),
};
alerts.push(StartAlert::warning("backlog_cache", message));
}
for ext_diag in extensions::health_diagnostics(&layout, repo_root, &locality_id)? {
alerts.push(StartAlert {
check: ext_diag.check,
severity: match ext_diag.severity {
"error" => StartAlertSeverity::Error,
"info" => StartAlertSeverity::Info,
_ => StartAlertSeverity::Warning,
},
message: ext_diag.message,
details: ext_diag.details,
});
}
if git.is_some()
&& backlog.active_items.is_empty()
&& matches!(backlog.provider.as_deref(), Some("local-markdown") | None)
&& !alerts.iter().any(|a| a.check == "backlog_adapter")
{
if let Some(owner_repo) = git_paths::github_remote_owner_repo(repo_root) {
let hint = match backlog.provider.as_deref() {
Some("local-markdown") => format!(
"backlog adapter is `local-markdown` with no active items, but this repo is hosted on GitHub ({owner_repo})"
),
_ => format!(
"no backlog cache exists, but this repo is hosted on GitHub ({owner_repo})"
),
};
alerts.push(StartAlert::warning(
"backlog_provider_mismatch",
format!(
"{hint}; run `ccd backlog pull --path .` to activate the GitHub backlog from the detected `origin` remote, or pass `--repo {owner_repo}` explicitly if you do not want to rely on origin detection"
),
));
}
}
if let Some(message) =
project_truth::legacy_roadmap_exclusion_warning(repo_root, &layout, &locality_id)?
{
alerts.push(StartAlert::warning("legacy_roadmap", message));
}
if escalation.blocking_count > 0 {
let blocking_reasons = escalation
.entries
.iter()
.filter(|entry| matches!(entry.kind, escalation_state::EscalationKind::Blocking))
.map(|entry| entry.reason.clone())
.collect::<Vec<_>>();
let reason = match blocking_reasons.as_slice() {
[] => "no blocking reasons recorded".to_owned(),
[single] => single.clone(),
reasons => reasons.join("; "),
};
let label = if escalation.blocking_count == 1 {
"blocking escalation"
} else {
"blocking escalations"
};
alerts.push(StartAlert::warning(
"escalation_state",
format!("{} {}: {}", escalation.blocking_count, label, reason),
));
}
let warnings = alerts
.iter()
.filter(|alert| matches!(alert.severity, StartAlertSeverity::Warning))
.map(|alert| alert.message.clone())
.collect::<Vec<_>>();
Ok(ResolvedCore {
profile: profile.to_string(),
locality_id,
layout,
git,
sources,
recovery,
compiled_state,
active_pod_identity,
pod_identity: pod_identity_view,
machine_identity: machine_identity_view,
machine_presence,
execution_context,
takeover_preconditions,
coordination_scope,
memory_recall,
memory_provider: memory_provider_inspection.view,
startup_recall,
loaded_session,
execution_gates,
validation_profile,
backlog,
assignment_outcome,
extension_dispatch,
session_id,
escalation,
alerts,
refresh_actions,
warnings,
})
}
fn build_compact_summary(core: &ResolvedCore) -> CompactSummary {
let current_state_source = if core.git.is_some() {
"live_git"
} else {
"directory_workspace"
};
let mut current_system_state =
handoff::current_system_state_lines(core.git.as_ref(), core.session_id.as_deref());
if let Some(mode_line) = session_mode_current_state_line(&core.loaded_session.view) {
current_system_state.push(mode_line);
}
CompactSummary {
current_state: CompactCurrentState {
source: current_state_source,
title: core.compiled_state.store.handoff.title.clone(),
current_system_state,
},
next_focus: CompactNextFocus {
continuity_immediate_actions: core
.compiled_state
.store
.handoff
.immediate_actions
.clone(),
execution_gate_anchor: core
.execution_gates
.view
.attention_anchor
.as_ref()
.map(|anchor| format!("[{}] {}", anchor.status.as_str(), anchor.text)),
backlog: CompactBacklogSummary {
status: compact_backlog_status(core.backlog.status),
content_trust: core.backlog.content_trust,
repo: core.backlog.repo.clone(),
fetched_at_epoch_s: core.backlog.fetched_at_epoch_s,
active_items: core.backlog.active_items.clone(),
},
},
active_guardrails: CompactGuardrails {
operational_guardrails: core
.compiled_state
.store
.handoff
.operational_guardrails
.clone(),
effective_memory: core.compiled_state.store.effective_memory.clone(),
},
}
}
fn render_execution_gates_section(view: &session_gates::ExecutionGatesView) {
if matches!(view.status, "missing" | "empty") {
return;
}
println!();
println!("Execution Gates:");
println!(
"- {} total, {} unfinished",
view.total_count, view.unfinished_count
);
if let Some(seed) = &view.seeded_from {
println!("- Seeded from: {seed}");
}
if let Some(anchor) = &view.attention_anchor {
println!(
"- Attention anchor [{} #{}/{}]: {}",
anchor.status.as_str(),
anchor.index,
view.total_count,
anchor.text
);
}
}
const DISPOSITION_QUALIFYING_WARNINGS: &[&str] = &["session_state", "backlog_cache"];
fn derive_disposition(core: &ResolvedCore) -> StartupDisposition {
if core
.alerts
.iter()
.any(|a| matches!(a.severity, StartAlertSeverity::Error))
{
return StartupDisposition::WorkflowAttentionRequired;
}
if core.alerts.iter().any(|a| {
matches!(a.severity, StartAlertSeverity::Warning)
&& DISPOSITION_QUALIFYING_WARNINGS.contains(&a.check)
}) {
return StartupDisposition::WorkflowAttentionRequired;
}
if core.escalation.blocking_count > 0 {
return StartupDisposition::ResumeBlockedContinuity;
}
let title = &core.compiled_state.store.handoff.title;
let handoff_content = &core.sources.handoff.content;
if title != "No active session" && !handoff_content.trim().is_empty() {
return StartupDisposition::ResumeActiveContinuity;
}
if core.assignment_outcome.assignment.is_some()
|| !core.compiled_state.store.workflow_signal_active.is_empty()
|| (core.extension_dispatch.next_step.status
== extensions::dispatch::NextStepStatus::NeedsInput
&& core.extension_dispatch.next_step.source
== extensions::dispatch::NextStepSource::BacklogAdapter)
{
return StartupDisposition::NeedsContinuityInput;
}
StartupDisposition::NoActiveContinuity
}
fn build_session_boundary(
core: &ResolvedCore,
disposition: StartupDisposition,
) -> SessionBoundaryRecommendation {
let recovery_note = recovery_boundary_note(&core.recovery);
if core.escalation.blocking_count > 0 {
let mut evidence = core
.escalation
.entries
.iter()
.filter(|entry| matches!(entry.kind, escalation_state::EscalationKind::Blocking))
.map(|entry| format!("{}: {}", entry.id, entry.reason))
.collect::<Vec<_>>();
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Stop,
"Stop and resolve blocking escalations before continuing this session.",
evidence,
);
}
let stop_alerts = core
.alerts
.iter()
.filter(|alert| {
matches!(
alert.check,
"checkout_state" | "closed_issue_reference" | "out_of_queue_continuity"
)
})
.collect::<Vec<_>>();
if !stop_alerts.is_empty() {
let mut evidence = stop_alerts
.iter()
.map(|alert| alert.message.clone())
.collect::<Vec<_>>();
append_recovery_note(&mut evidence, recovery_note.as_deref());
let summary = if stop_alerts
.iter()
.any(|alert| alert.check == "checkout_state")
{
"Stop here and choose the next step from a live checkout before continuing."
} else {
"Stop and refresh continuity or choose the next step explicitly before continuing."
};
return SessionBoundaryRecommendation::new(SessionBoundaryAction::Stop, summary, evidence);
}
let refresh_alerts = core
.alerts
.iter()
.filter(|alert| matches!(alert.check, "session_state" | "backlog_cache"))
.collect::<Vec<_>>();
if !refresh_alerts.is_empty() {
let mut evidence = refresh_alerts
.iter()
.map(|alert| alert.message.clone())
.collect::<Vec<_>>();
append_recovery_note(&mut evidence, recovery_note.as_deref());
let summary = match (
refresh_alerts
.iter()
.any(|alert| alert.check == "session_state"),
refresh_alerts
.iter()
.any(|alert| alert.check == "backlog_cache"),
) {
(true, true) => "Refresh session telemetry and shared-queue context before continuing.",
(true, false) => {
"Refresh or clear the current session record before continuing this session."
}
(false, true) => "Refresh the shared-queue snapshot before continuing this session.",
(false, false) => unreachable!("refresh alerts must not be empty"),
};
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Refresh,
summary,
evidence,
);
}
if core.extension_dispatch.next_step.status == extensions::dispatch::NextStepStatus::NeedsInput
{
let mut evidence = core
.extension_dispatch
.next_step
.reason
.clone()
.into_iter()
.collect::<Vec<_>>();
append_recovery_note(&mut evidence, recovery_note.as_deref());
let summary = "Stop and choose the next step explicitly before continuing this session.";
return SessionBoundaryRecommendation::new(SessionBoundaryAction::Stop, summary, evidence);
}
if matches!(disposition, StartupDisposition::NoActiveContinuity) {
let mut evidence = vec![
"No active continuity snapshot or explicit next-step selection is loaded yet."
.to_owned(),
];
append_recovery_note(&mut evidence, recovery_note.as_deref());
return SessionBoundaryRecommendation::new(
SessionBoundaryAction::Stop,
"Stop here and confirm the next step before continuing this session.",
evidence,
);
}
let mut evidence = Vec::new();
if let Some(observation) = &core.extension_dispatch.next_step.observation {
let observed_ref = if observation.item.ccd_id != 0 {
format!("ccd#{}", observation.item.ccd_id)
} else if observation.item.github_issue_number != 0 {
format!("GH#{}", observation.item.github_issue_number)
} else {
observation.item.title.clone()
};
evidence.push(format!(
"Extension-owned next step is currently `{observed_ref}`."
));
} else if core.assignment_outcome.assignment.is_some() {
evidence.push("A local assignment is already active for this session context.".to_owned());
} else if !core
.compiled_state
.store
.handoff
.immediate_actions
.is_empty()
{
evidence.push(format!(
"The handoff already carries {} immediate action(s).",
core.compiled_state.store.handoff.immediate_actions.len()
));
}
append_recovery_note(&mut evidence, recovery_note.as_deref());
SessionBoundaryRecommendation::new(
SessionBoundaryAction::Continue,
"Continue with the current continuity and extension-owned next-step context.",
evidence,
)
}
fn recovery_boundary_note(recovery: &runtime_state::LoadedRuntimeRecoveryState) -> Option<String> {
(recovery.state.checkpoint.is_some() || recovery.state.working_buffer.is_some()).then_some(
"Recovery artifacts are loaded as supporting context only; continuity, dispatch, and backlog remain authoritative."
.to_owned(),
)
}
fn append_recovery_note(evidence: &mut Vec<String>, note: Option<&str>) {
if let Some(note) = note {
evidence.push(note.to_owned());
}
}
fn start_readiness_view(alerts: &[StartAlert]) -> StartCheckReadiness {
let non_info_alerts: Vec<_> = alerts
.iter()
.filter(|a| !matches!(a.severity, StartAlertSeverity::Info))
.collect();
if non_info_alerts.is_empty() {
StartCheckReadiness::Ready
} else if non_info_alerts
.iter()
.all(|alert| alert.check == "escalation_state")
{
StartCheckReadiness::Blocked
} else {
StartCheckReadiness::NotReady
}
}
fn compact_backlog_status(status: &str) -> CompactBacklogStatus {
match status {
"missing" => CompactBacklogStatus::Missing,
"empty" => CompactBacklogStatus::Empty,
"loaded" => CompactBacklogStatus::Loaded,
"stale" => CompactBacklogStatus::Stale,
other => unreachable!("unexpected compact backlog status: {other}"),
}
}
fn refresh_before_resolve(repo_root: &Path, layout: &StateLayout) -> Vec<RefreshAction> {
vec![try_refresh_backlog(repo_root, layout)]
}
fn backlog_cache_status_for_start(
cache: Option<&backlog_state::GitHubBacklogCache>,
refresh_actions: &[RefreshAction],
) -> Result<&'static str> {
let Some(cache) = cache else {
return Ok("missing");
};
if refresh_actions
.iter()
.any(|action| action.target == "backlog_cache" && action.status == "failed")
{
return Ok("stale");
}
if backlog_state::is_stale(cache, backlog_state::now_epoch_s()?) {
Ok("stale")
} else {
Ok("loaded")
}
}
fn try_refresh_backlog(repo_root: &Path, layout: &StateLayout) -> RefreshAction {
let cache_view = match backlog_state::load_cache_view(layout, 1) {
Ok(view) => view,
Err(error) => {
return RefreshAction {
target: "backlog_cache",
status: "failed",
message: format!(
"failed to inspect the extension-provided shared-queue snapshot: {error:#}"
),
cached_git: None,
};
}
};
let cache = match extensions::load_work_queue_snapshot(layout) {
Ok(cache) => cache,
Err(error) => {
return RefreshAction {
target: "backlog_cache",
status: "failed",
message: format!(
"failed to load the extension-provided shared-queue snapshot: {error:#}"
),
cached_git: None,
};
}
};
let Some(cache) = cache else {
return RefreshAction {
target: "backlog_cache",
status: "skipped",
message: format!(
"no extension-provided shared-queue snapshot is available at {}; skipped automatic refresh",
cache_view.path
),
cached_git: None,
};
};
let now = match backlog_state::now_epoch_s() {
Ok(now) => now,
Err(error) => {
return RefreshAction {
target: "backlog_cache",
status: "failed",
message: format!("failed to read the current time for backlog refresh: {error:#}"),
cached_git: None,
};
}
};
if !backlog_state::is_stale(&cache, now) && !backlog_state::should_revalidate_on_refresh(&cache)
{
return RefreshAction {
target: "backlog_cache",
status: "skipped",
message: format!(
"the extension-provided shared-queue snapshot at {} is already current",
cache_view.path
),
cached_git: None,
};
}
match extensions::refresh_work_queue_cache(repo_root, layout) {
Some(Ok((repo, active_count))) => RefreshAction {
target: "backlog_cache",
status: "refreshed",
message: format!(
"refreshed the shared-queue snapshot for {repo}; {active_count} active item(s)"
),
cached_git: None,
},
Some(Err(error)) => RefreshAction {
target: "backlog_cache",
status: "failed",
message: format!(
"automatic shared-queue snapshot refresh failed for {}: {error:#}",
cache_view.path
),
cached_git: None,
},
None => RefreshAction {
target: "backlog_cache",
status: "skipped",
message: format!(
"the active backlog extension did not expose a shared-queue refresh handler for {}",
cache_view.path
),
cached_git: None,
},
}
}
fn render_section(title: &str, content: &str) {
println!();
println!("{title}:");
if content.is_empty() {
println!("(empty)");
} else {
println!("{content}");
}
}
fn render_backlog_section(backlog: &backlog_state::GitHubBacklogCacheView) {
println!();
println!("Backlog:");
if backlog.status == "missing" {
println!("- status: missing");
return;
}
println!(
"- status: {} | open: {} | queue-scoped: {} | candidates: {} | conflicts: {} | auto-selectable: {}",
backlog.status,
backlog.queue_summary.open_issues,
backlog.queue_summary.queue_scoped,
backlog.queue_summary.queue_candidates,
backlog.queue_summary.policy_conflicts,
backlog.queue_summary.auto_selectable
);
if let Some(dispatch) = &backlog.dispatch {
println!(
"- dispatch: {}{}",
dispatch.status,
dispatch
.reason
.as_deref()
.map(|reason| format!(" ({reason})"))
.unwrap_or_default()
);
}
if backlog.active_items.is_empty() {
return;
}
for item in &backlog.active_items {
match &item.claimed_by {
Some(claimant) => println!(
"- [{}] {} (#{}, {}, claimed by {}, {}, {})",
item.display_ref(),
item.title,
item.github_issue_number,
item.status,
claimant,
item.queue_state.as_str(),
item.dispatch_state.as_str()
),
None => println!(
"- [{}] {} (#{}, {}, {}, {})",
item.display_ref(),
item.title,
item.github_issue_number,
item.status,
item.queue_state.as_str(),
item.dispatch_state.as_str()
),
}
}
}
fn ensure_profile_exists(layout: &StateLayout) -> Result<()> {
let profile_root = layout.profile_root();
if profile_root.is_dir() {
return Ok(());
}
bail!(
"profile `{}` does not exist at {}; bootstrap it with `ccd attach` before using `ccd start`",
layout.profile(),
profile_root.display()
)
}
fn ensure_repo_registry_exists(layout: &StateLayout, locality_id: &str) -> Result<()> {
let registry_path = layout.repo_metadata_path(locality_id)?;
if repo_registry::load(®istry_path)?.is_some() {
return Ok(());
}
bail!(
"project ID `{locality_id}` (`locality_id` compatibility) is not linked in the registry: {} is missing; re-run `ccd link --project-id {locality_id}` or `ccd attach`",
registry_path.display()
)
}
fn resolve_manifest(
repo_root: &Path,
layout: &StateLayout,
locality_id: &str,
) -> Result<ManifestResolution> {
let resolved = project_truth::resolve_manifest(repo_root, layout, locality_id)?;
let sources = resolved
.project_truth_paths
.iter()
.map(|path| read_existing_source("project_truth", path))
.collect::<Result<Vec<_>>>()?;
Ok(ManifestResolution {
source_order: resolved.source_order,
manifest: ManifestView {
path: resolved.manifest_path.display().to_string(),
status: resolved.manifest_status,
entries: resolved
.entries
.iter()
.map(|entry| entry.display().to_string())
.collect(),
},
project_truth_sources: sources,
})
}
fn read_existing_source(kind: &'static str, path: &Path) -> Result<SourceView> {
let source = read_text_source(kind, path)?;
Ok(into_source_view(source))
}
fn read_text_source(kind: &'static str, path: &Path) -> Result<TextSource> {
match fs::read_to_string(path) {
Ok(contents) => Ok(TextSource {
kind,
path: path.to_path_buf(),
status: "loaded",
content: Some(contents),
}),
Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(TextSource {
kind,
path: path.to_path_buf(),
status: "missing",
content: None,
}),
Err(error) => Err(error).with_context(|| format!("failed to read {}", path.display())),
}
}
fn into_compiled_handoff_view(
source: &runtime_state::RuntimeTextSurface,
compiled_content: String,
) -> HandoffView {
HandoffView {
path: source.path.display().to_string(),
status: if source.status.is_loaded_native() {
"loaded"
} else {
source.status.as_str()
},
content: compiled_content,
}
}
struct LoadedSessionState {
view: SessionStateView,
raw: Option<session_state::SessionStateFile>,
}
struct LoadedExecutionGates {
view: session_gates::ExecutionGatesView,
raw: Option<session_gates::ExecutionGateStateFile>,
}
fn load_session_state(layout: &StateLayout) -> Result<LoadedSessionState> {
let path = layout.state_db_path();
let Some(state) = session_state::load_for_layout(layout)? else {
return Ok(LoadedSessionState {
view: SessionStateView {
path: path.display().to_string(),
status: "missing",
schema_version: None,
session_id: None,
started_at_epoch_s: None,
last_started_at_epoch_s: None,
start_count: None,
mode: None,
lifecycle: session_state::SessionLifecycleProjection::missing(),
},
raw: None,
});
};
let now = session_state::now_epoch_s()?;
let activity = session_state::load_activity_for_layout(layout)?;
let lifecycle = session_state::lifecycle_projection(&state, now, None, activity.as_ref());
let status = if lifecycle.stale == Some(true) || state.session_id.is_none() {
"stale"
} else {
"active"
};
Ok(LoadedSessionState {
view: SessionStateView {
path: path.display().to_string(),
status,
schema_version: Some(state.schema_version),
session_id: state.session_id.clone(),
started_at_epoch_s: Some(state.started_at_epoch_s),
last_started_at_epoch_s: Some(state.last_started_at_epoch_s),
start_count: Some(state.start_count),
mode: Some(state.mode),
lifecycle,
},
raw: Some(state),
})
}
fn session_mode_current_state_line(view: &SessionStateView) -> Option<String> {
match (view.status, view.mode) {
("active", Some(mode)) if mode != session_state::SessionMode::General => {
Some(format!("Session mode: `{}`", mode.as_str()))
}
_ => None,
}
}
fn load_execution_gates(layout: &StateLayout) -> Result<LoadedExecutionGates> {
let raw = session_gates::load_for_layout(layout)?;
Ok(LoadedExecutionGates {
view: session_gates::build_view(layout, raw.clone()),
raw,
})
}
fn session_state_source_view(
view: &SessionStateView,
raw: Option<&session_state::SessionStateFile>,
) -> Result<SourceView> {
let content = raw.map(serde_json::to_string_pretty).transpose()?;
Ok(SourceView {
kind: "session_state",
path: view.path.clone(),
status: view.status,
content,
})
}
fn execution_gates_source_view(
view: &session_gates::ExecutionGatesView,
raw: Option<&session_gates::ExecutionGateStateFile>,
) -> Result<SourceView> {
let content = raw.map(serde_json::to_string_pretty).transpose()?;
Ok(SourceView {
kind: "execution_gates",
path: view.path.clone(),
status: view.status,
content,
})
}
fn validation_profile_source_view(
profile: &validation_profile::LoadedValidationProfile,
) -> SourceView {
SourceView {
kind: "repo_validation_profile",
path: profile.path.display().to_string(),
status: profile.status,
content: profile
.contents
.clone()
.and_then(|content| non_empty_content(&content)),
}
}
fn render_effective_policy_view(
parts_in_order: &[policy_projection::PolicySource],
) -> RenderedView {
let mut parts = Vec::new();
let mut segments = Vec::new();
for source in parts_in_order {
parts.push(RenderedPart {
kind: source.kind,
path: source.path.display().to_string(),
status: source.status,
});
if let Some(content) = &source.content {
if !content.is_empty() {
segments.push(content.clone());
}
}
}
let content = segments.join("\n\n");
let status = if content.is_empty() {
"empty"
} else {
"loaded"
};
RenderedView {
status,
content,
parts,
}
}
fn render_effective_memory_view(
surfaces: EffectiveMemorySurfaces<'_>,
structured: memory_entries::StructuredMemoryView,
pod_identity_active: bool,
compiled_memory: &compiled_state::CompiledMemoryView,
) -> MemoryView {
let compiled_content =
compiled_state::render_memory_content(compiled_memory, pod_identity_active);
let parts = if pod_identity_active {
vec![
runtime_rendered_part(surfaces.pod),
runtime_rendered_part(surfaces.profile),
runtime_rendered_part(surfaces.locality),
runtime_rendered_part(surfaces.branch),
runtime_rendered_part(surfaces.clone),
]
} else {
vec![
runtime_rendered_part(surfaces.profile),
runtime_rendered_part(surfaces.locality),
runtime_rendered_part(surfaces.pod),
runtime_rendered_part(surfaces.branch),
runtime_rendered_part(surfaces.clone),
]
};
MemoryView {
status: if compiled_content.is_empty() {
"empty"
} else {
"loaded"
},
content: compiled_content,
parts,
structured,
}
}
fn runtime_rendered_part(source: &runtime_state::RuntimeTextSurface) -> RenderedPart {
RenderedPart {
kind: source.kind,
path: source.path.display().to_string(),
status: source.status.as_str(),
}
}
fn runtime_source_view(source: &runtime_state::RuntimeTextSurface) -> SourceView {
SourceView {
kind: source.kind,
path: source.path.display().to_string(),
status: source.status.as_str(),
content: non_empty_content(&source.content),
}
}
fn backlog_source_views(backlog: &backlog_state::GitHubBacklogCacheView) -> Vec<SourceView> {
vec![
SourceView {
kind: "work_queue_cache",
path: backlog.path.clone(),
status: backlog.status,
content: None,
},
SourceView {
kind: "work_queue_view",
path: backlog.rendered_path.clone(),
status: backlog.status,
content: None,
},
]
}
fn into_source_view(source: TextSource) -> SourceView {
let content = source.content.unwrap_or_default();
let status = if source.status == "loaded" && content.is_empty() {
"empty"
} else {
source.status
};
SourceView {
kind: source.kind,
path: source.path.display().to_string(),
status,
content: non_empty_content(&content),
}
}
fn non_empty_content(content: &str) -> Option<String> {
if content.is_empty() {
None
} else {
Some(content.to_owned())
}
}