use std::collections::HashSet;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::ExitCode;
use anyhow::Result;
use serde::Serialize;
use super::{check, host, skills, sync};
use crate::handoff::{self, BranchMode};
use crate::memory::{entries as memory_entries, provider as memory_provider};
use crate::output::CommandReport;
use crate::paths::state::StateLayout;
use crate::paths::substrate::SubstrateKind;
use crate::profile;
use crate::repo::marker as repo_marker;
use crate::repo::registry as repo_registry;
use crate::repo::truth as project_truth;
use crate::state::config_migration;
use crate::state::consistency;
use crate::state::machine_registry;
use crate::state::runtime as runtime_state;
use crate::state::session as session_state;
use crate::state::validation_profile::{
self, HandoffDoctorValidationProfile, ValidationProfile, ValidationSeverity,
};
const AGENTS_PLACEHOLDERS: &[&str] = &[
"[One paragraph: what this project is, what it does, and who it is for.]",
"[List the invariants where a violation cascades.]",
];
const MEMORY_PLACEHOLDERS: &[&str] = &[
"[project name]",
"[Facts that stay true across sessions.]",
"[Start empty. Add an item when a pattern repeats or a rule changes.]",
"[Optional: commands that repeatedly save time or prevent known failures.]",
];
const SESSION_LOG_PHRASES: &[&str] = &[
"i then",
"i tried",
"i decided",
"i ran",
"i fixed",
"i added",
"next i ",
"after that",
"we then",
"we decided",
];
const OPTIONAL_PROJECT_TRUTH: &[&str] = &["MEMORY.md", "_MANIFEST.md"];
#[derive(Serialize, Clone)]
pub struct DoctorCheck {
check: String,
file: String,
status: &'static str,
severity: &'static str,
message: String,
}
#[derive(Serialize)]
pub struct DoctorReport {
command: &'static str,
ok: bool,
repo_root: String,
failures: usize,
warnings: usize,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
host_integrations: Vec<host::HostIntegrationStatus>,
checks: Vec<DoctorCheck>,
}
impl DoctorReport {
pub fn filter_by_status(&mut self, min_status: &str) {
let min_rank = status_rank(min_status);
self.checks
.retain(|check| status_rank(check.status) >= min_rank);
}
}
fn status_rank(status: &str) -> u8 {
match status {
"fail" => 2,
"warn" => 1,
_ => 0,
}
}
impl CommandReport for DoctorReport {
fn exit_code(&self) -> ExitCode {
if self.failures > 0 {
ExitCode::from(1)
} else {
ExitCode::SUCCESS
}
}
fn render_text(&self) {
for check in &self.checks {
let label = match check.status {
"pass" => "PASS",
"warn" => "WARN",
"fail" => "FAIL",
_ => "INFO",
};
println!("[{label}] {}", check.message);
}
println!(
"Doctor summary: {} failure(s), {} warning(s).",
self.failures, self.warnings
);
}
}
#[derive(Clone, Copy)]
pub struct RunOptions {
pub include_repo_native_checks: bool,
}
impl Default for RunOptions {
fn default() -> Self {
Self {
include_repo_native_checks: true,
}
}
}
pub fn run(
repo_root: &Path,
explicit_profile: Option<&str>,
options: RunOptions,
) -> Result<DoctorReport> {
let mut checks = Vec::new();
let mut host_integrations = Vec::new();
let sync_profile = sync::SyncProfile::default();
let InspectedCoreFile {
checks: agent_checks,
contents: agents_contents,
} = inspect_core_file(
&repo_root.join("AGENTS.md"),
"AGENTS.md",
AGENTS_PLACEHOLDERS,
Some((sync_profile.soft_line_limit, sync_profile.hard_line_limit)),
)?;
checks.extend(agent_checks);
for file in OPTIONAL_PROJECT_TRUTH {
let path = repo_root.join(file);
if !path.exists() {
continue;
}
let (placeholders, budget) = known_file_rules(file, &sync_profile);
checks.extend(inspect_core_file(&path, file, placeholders, budget)?.checks);
}
let layout = match profile::resolve(explicit_profile)
.and_then(|profile| StateLayout::resolve(repo_root, profile))
{
Ok(layout) => Some(layout),
Err(error) => {
checks.push(fail_check(
"clone_state",
".git/ccd",
format!("failed to resolve workspace-state paths: {error:#}"),
));
None
}
};
let marker = match repo_marker::load(repo_root) {
Ok(Some(marker)) => {
checks.push(pass_check(
"repo_marker",
repo_marker::MARKER_FILE,
format!(
"{} contains project ID `{}`",
repo_marker::MARKER_FILE,
marker.locality_id
),
));
Some(marker)
}
Ok(None) => {
checks.push(fail_check(
"repo_marker",
repo_marker::MARKER_FILE,
format!(
"{} is missing; bootstrap this workspace with `ccd attach` or `ccd link`",
repo_marker::MARKER_FILE
),
));
None
}
Err(error) => {
checks.push(fail_check(
"repo_marker",
repo_marker::MARKER_FILE,
format!("failed to parse {}: {error:#}", repo_marker::MARKER_FILE),
));
None
}
};
if let Some(layout) = &layout {
checks.extend(validate_profile_kernel(layout)?);
}
if let (Some(layout), Some(marker)) = (layout.as_ref(), marker.as_ref()) {
let mut migrated_from = Vec::new();
if let Ok(config_migration::MigrationReport::Migrated { from }) =
config_migration::migrate_repo_overlay_if_needed(layout, &marker.locality_id)
{
migrated_from.extend(from);
}
if !migrated_from.is_empty() {
checks.push(DoctorCheck {
check: "config_migration".to_owned(),
file: layout
.repo_overlay_config_path(&marker.locality_id)
.map(|p| p.display().to_string())
.unwrap_or_default(),
status: "migrated",
severity: "info",
message: format!(
"migrated {} legacy config file(s) to repo overlay config.toml",
migrated_from.len()
),
});
}
checks.extend(validate_linked_repo(
repo_root,
layout,
&marker.locality_id,
)?);
let validation_profile =
load_repo_validation_profile(layout, &marker.locality_id, &mut checks)?;
checks.extend(validate_clone_state_with_profile(
repo_root,
layout,
&marker.locality_id,
&validation_profile,
)?);
match memory_provider::inspect_provider(repo_root, layout, &marker.locality_id, true) {
Ok(inspection) => {
if inspection.issues.is_empty() {
let config_path = layout.profile_config_path().display().to_string();
let provider = &inspection.view.effective_recall_provider;
let message = if inspection.view.configured_recall_provider.is_some() {
format!(
"memory recall provider `{}` is healthy with capabilities [{}]; startup recall policy is `{}`",
provider.name,
provider.capabilities.join(", "),
inspection.view.start_recall_policy.mode
)
} else {
format!(
"memory recall uses built-in `{}` fallback; startup recall policy is `{}`",
provider.name,
inspection.view.start_recall_policy.mode
)
};
checks.push(pass_check("memory_provider", &config_path, message));
} else {
for issue in inspection.issues {
checks.push(fail_check(issue.check, &issue.file, issue.message));
}
}
}
Err(error) => checks.push(fail_check(
"memory_provider",
&layout.profile_config_path().display().to_string(),
format!("failed to inspect memory provider configuration: {error:#}"),
)),
}
host_integrations = host::inspect(repo_root, layout, &marker.locality_id)?;
for integration in &host_integrations {
checks.extend(host_integration_checks(integration));
}
if let Some(check) = context_window_configuration_check(&host_integrations) {
checks.push(check);
}
}
if options.include_repo_native_checks {
checks.extend(validate_repo_native_checks(repo_root));
}
let has_mirror = repo_root.join("CLAUDE.md").exists()
|| repo_root.join("GEMINI.md").exists()
|| repo_root.join(".claude/CLAUDE.md").exists()
|| repo_root.join("skills/canonical").is_dir();
if has_mirror {
match agents_contents.as_deref() {
Some(agents) => {
let hash = sync::sha256(agents);
match sync::render_all(repo_root, agents, &hash, &sync_profile) {
Ok(generated) => {
let check = sync::check_generated(&generated);
if check.ok {
checks.push(pass_check(
"mirror_sync",
"generated mirrors",
"Generated mirrors are in sync",
));
} else {
for issue in sync::with_surface_messages(
check.issues,
sync::SyncIssueSurface::Doctor,
repo_root,
) {
checks.push(fail_check(
"mirror_sync",
&issue.label,
&issue.message,
));
}
}
}
Err(error) => {
checks.push(fail_check(
"mirror_sync",
"generated mirrors",
format!("failed to render generated mirrors: {error:#}"),
));
}
}
}
None => {
checks.push(fail_check(
"mirror_sync",
"AGENTS.md",
"Generated mirrors exist but AGENTS.md is missing",
));
}
}
} else {
checks.push(pass_check(
"mirror_sync",
"generated mirrors",
"No generated mirrors present; skipping sync check",
));
}
checks.extend(validate_installed_skills());
let failures = checks
.iter()
.filter(|check| check.severity == "error")
.count();
let warnings = checks
.iter()
.filter(|check| check.severity == "warning")
.count();
Ok(DoctorReport {
command: "doctor",
ok: failures == 0,
repo_root: repo_root.display().to_string(),
failures,
warnings,
host_integrations,
checks,
})
}
fn context_window_configuration_check(
integrations: &[host::HostIntegrationStatus],
) -> Option<DoctorCheck> {
let claude_integration_active = integrations.iter().any(|i| i.host == "claude");
if !claude_integration_active {
return None;
}
let has_override = [
"CCD_CLAUDE_CONTEXT_WINDOW_TOKENS",
"CLAUDE_CONTEXT_WINDOW_TOKENS",
"CCD_CONTEXT_WINDOW_TOKENS",
]
.iter()
.any(|name| {
std::env::var(name)
.ok()
.is_some_and(|value| !value.trim().is_empty())
});
if has_override {
return Some(pass_check(
"context_window_config",
"env",
"context-window env var is set; radar can compute `context_used_pct`",
));
}
Some(warn_check(
"context_window_config",
"env",
"claude runtime is active but no context-window env var is set; \
`ccd handover` will report `context_used_pct: null` and \
recommend `capture_session_state` on every turn. Set \
`CCD_CONTEXT_WINDOW_TOKENS=200000` (or `=1000000` for Opus 4.7 \
1M mode) to enable window-relative scoring. Adapter-specific \
overrides (`CCD_CLAUDE_CONTEXT_WINDOW_TOKENS`) take precedence \
when set.",
))
}
fn is_tier_two_mode(mode: &str) -> bool {
matches!(mode, "native_hook" | "reference_adapter")
}
fn host_integration_checks(status: &host::HostIntegrationStatus) -> Vec<DoctorCheck> {
let mut checks = Vec::new();
if is_tier_two_mode(&status.mode)
&& status.scaffold_status == host::IntegrationAssetStatus::Missing
&& status.install_status == host::IntegrationAssetStatus::Missing
{
let paths = status
.source_paths
.iter()
.chain(status.applied_paths.iter())
.cloned()
.collect::<Vec<_>>()
.join(", ");
checks.push(DoctorCheck {
check: "host_integration_readiness".to_owned(),
file: paths,
status: "fail",
severity: "error",
message: format!(
"repo declares {} ({}) but neither the CCD-managed scaffold nor the applied runtime artifacts exist; run `ccd init --path .` to bootstrap and auto-apply (or `ccd host apply --path .` to re-apply after scaffolding)",
status.host, status.mode,
),
});
return checks;
}
if status.scaffold_status != host::IntegrationAssetStatus::NotApplicable {
let (check_status, severity, message) = match status.scaffold_status {
host::IntegrationAssetStatus::Present => (
"pass",
"info",
format!(
"repo expects {} ({}) and the CCD-managed scaffold is present",
status.host, status.mode
),
),
host::IntegrationAssetStatus::Missing => (
"warn",
"warning",
format!(
"repo expects {} ({}) but the CCD-managed scaffold is missing; run `ccd init --path .`",
status.host, status.mode
),
),
host::IntegrationAssetStatus::Drifted => (
"warn",
"warning",
format!(
"repo expects {} ({}) but the CCD-managed scaffold has drifted from the generated content",
status.host, status.mode
),
),
host::IntegrationAssetStatus::InvalidMode => unreachable!(),
host::IntegrationAssetStatus::NotApplicable => unreachable!(),
};
checks.push(DoctorCheck {
check: "host_scaffold".to_owned(),
file: status.source_paths.join(", "),
status: check_status,
severity,
message,
});
}
if status.install_status != host::IntegrationAssetStatus::NotApplicable {
let (check_status, severity, message) = match status.install_status {
host::IntegrationAssetStatus::Present => (
"pass",
"info",
format!(
"{} ({}) is installed at the expected runtime path",
status.host, status.mode
),
),
host::IntegrationAssetStatus::Missing => (
"warn",
"warning",
format!(
"{} ({}) is expected but not applied; run `ccd init --path .` to bootstrap and auto-apply, or `ccd host apply --path .` to re-apply after scaffolding",
status.host, status.mode
),
),
host::IntegrationAssetStatus::Drifted => (
"warn",
"warning",
format!(
"{} ({}) is installed but drifted from the CCD-managed apply output",
status.host, status.mode
),
),
host::IntegrationAssetStatus::InvalidMode => (
"warn",
"warning",
format!(
"{} ({}) is configured with an unsupported install mode; repair the repo overlay before applying runtime assets",
status.host, status.mode
),
),
host::IntegrationAssetStatus::NotApplicable => unreachable!(),
};
checks.push(DoctorCheck {
check: "host_apply".to_owned(),
file: status.applied_paths.join(", "),
status: check_status,
severity,
message,
});
}
checks
}
fn validate_profile_kernel(layout: &StateLayout) -> Result<Vec<DoctorCheck>> {
let mut checks = Vec::new();
let profile_root = layout.profile_root();
if !profile_root.is_dir() {
checks.push(fail_check(
"profile_kernel",
&profile_root.display().to_string(),
format!(
"profile `{}` does not exist at {}",
layout.profile(),
profile_root.display()
),
));
return Ok(checks);
}
checks.push(pass_check(
"profile_kernel",
&profile_root.display().to_string(),
format!(
"profile `{}` exists at {}",
layout.profile(),
profile_root.display()
),
));
for path in [
layout.profile_config_path(),
layout.profile_policy_path(),
layout.profile_memory_path(),
] {
let label = path.display().to_string();
if path.is_file() {
checks.push(pass_check(
"profile_file",
&label,
format!("{label} exists"),
));
} else {
checks.push(fail_check(
"profile_file",
&label,
format!("{label} is missing"),
));
}
}
let profile_memory_path = layout.profile_memory_path();
if profile_memory_path.is_file() {
checks.extend(validate_structured_memory_file(&profile_memory_path));
}
Ok(checks)
}
fn validate_linked_repo(
repo_root: &Path,
layout: &StateLayout,
locality_id: &str,
) -> Result<Vec<DoctorCheck>> {
let mut checks = Vec::new();
let doctor_command = if layout.profile().as_str() == profile::DEFAULT_PROFILE {
format!("ccd doctor --path {}", repo_root.display())
} else {
format!(
"ccd doctor --path {} --profile {}",
repo_root.display(),
layout.profile()
)
};
let registry_path = layout.repo_metadata_path(locality_id)?;
match repo_registry::load(®istry_path) {
Ok(Some(_)) => checks.push(pass_check(
"repo_registry",
®istry_path.display().to_string(),
format!("project registry exists for project ID `{locality_id}`"),
)),
Ok(None) => checks.push(fail_check(
"repo_registry",
®istry_path.display().to_string(),
format!("project registry is missing for project ID `{locality_id}`"),
)),
Err(error) => checks.push(fail_check(
"repo_registry",
®istry_path.display().to_string(),
format!("failed to read project registry for project ID `{locality_id}`: {error:#}"),
)),
}
let overlay_root = layout.repo_overlay_root(locality_id)?;
if overlay_root.is_dir() {
checks.push(pass_check(
"repo_overlay",
&overlay_root.display().to_string(),
format!("project overlay exists for project ID `{locality_id}`"),
));
} else {
checks.push(fail_check(
"repo_overlay",
&overlay_root.display().to_string(),
format!("project overlay is missing for project ID `{locality_id}`"),
));
}
match machine_registry::resolve_coordination_scope_view(layout, locality_id) {
Ok(scope) if scope.status == "configured" => checks.push(pass_check(
"coordination_scope",
scope
.config_path
.as_deref()
.unwrap_or("<unknown coordination-scope config>"),
format!(
"coordination scope `{}` is configured for project `{locality_id}`",
scope.name.as_deref().unwrap_or("<unknown>")
),
)),
Ok(_) => checks.push(pass_check(
"coordination_scope",
&overlay_root.display().to_string(),
"no coordination scope is configured for this project",
)),
Err(error) => checks.push(fail_check(
"coordination_scope",
&overlay_root.display().to_string(),
format!(
"ccd doctor found invalid coordination scope configuration for project `{locality_id}`; shared dispatch and pod compatibility may resolve incorrectly until `[dispatch].coordination_scope` or legacy pod aliases are fixed. Inspect the active profile and project overlay config, then rerun `{doctor_command}`: {error:#}"
),
)),
}
match machine_registry::resolve_machine_identity_view(layout) {
Ok(machine) if machine.status == "declared" => {
let manifest_path = machine
.manifest_path
.as_deref()
.unwrap_or("<unknown machine manifest>");
checks.push(pass_check(
"machine_identity",
manifest_path,
format!(
"machine identity `{}` is declared with trust class `{}`",
machine.id.as_deref().unwrap_or("<unknown>"),
match machine.trust_class {
Some(machine_registry::MachineTrustClass::Owned) => "owned",
Some(machine_registry::MachineTrustClass::Limited) => "limited",
Some(machine_registry::MachineTrustClass::Observer) => "observer",
None => "unknown",
}
),
));
}
Ok(machine) => {
let path = machine
.manifest_path
.unwrap_or_else(|| layout.ccd_root().join("pods").display().to_string());
let message = machine.reason.unwrap_or_else(|| {
format!(
"no active machine identity is declared for profile `{}`",
layout.profile()
)
});
checks.push(pass_check("machine_identity", &path, message));
}
Err(error) => checks.push(fail_check(
"machine_identity",
&layout.ccd_root().join("pods").display().to_string(),
format!(
"ccd doctor found invalid machine identity state under {}; machine-specific reporting and capability hints may resolve incorrectly until the machine manifest is fixed. Fix `machine.toml` and rerun `{doctor_command}`: {error:#}",
layout.ccd_root().join("pods").display()
),
)),
}
match project_truth::resolve_manifest(repo_root, layout, locality_id) {
Ok(resolution) => {
if resolution.manifest_status == "loaded" {
checks.push(pass_check(
"repo_manifest",
&resolution.manifest_path.display().to_string(),
format!(
"{} loaded with {} source entrie(s)",
resolution.manifest_path.display(),
resolution.entries.len()
),
));
}
if let Some(message) = project_truth::legacy_roadmap_exclusion_warning_from_resolution(
repo_root,
&resolution,
) {
checks.push(warn_check(
"legacy_roadmap",
project_truth::LEGACY_ROADMAP_PATH,
message,
));
}
}
Err(error) => checks.push(fail_check(
"repo_manifest",
&layout
.repo_manifest_path(locality_id)?
.display()
.to_string(),
format!("manifest validation failed: {error:#}"),
)),
}
for path in [
layout.repo_policy_path(locality_id)?,
layout.repo_memory_path(locality_id)?,
] {
if !path.exists() {
continue;
}
match fs::read_to_string(&path) {
Ok(_) => checks.push(pass_check(
"repo_overlay_file",
&path.display().to_string(),
format!("{} is readable", path.display()),
)),
Err(error) => checks.push(fail_check(
"repo_overlay_file",
&path.display().to_string(),
format!("failed to read {}: {error}", path.display()),
)),
}
}
let repo_memory_path = layout.repo_memory_path(locality_id)?;
if repo_memory_path.is_file() {
checks.extend(validate_structured_memory_file(&repo_memory_path));
}
for diagnostic in crate::extensions::health_diagnostics(layout, repo_root, locality_id)? {
checks.push(match diagnostic.severity {
"error" => fail_check(diagnostic.check, &diagnostic.file, diagnostic.message),
"warning" => warn_check(diagnostic.check, &diagnostic.file, diagnostic.message),
_ => pass_check(diagnostic.check, &diagnostic.file, diagnostic.message),
});
}
Ok(checks)
}
fn validate_clone_state_with_profile(
repo_root: &Path,
layout: &StateLayout,
locality_id: &str,
validation_profile: &ValidationProfile,
) -> Result<Vec<DoctorCheck>> {
let mut checks = Vec::new();
let clone_profile_root = layout.clone_profile_root();
if clone_profile_root.is_dir() {
checks.push(pass_check(
"clone_state",
&clone_profile_root.display().to_string(),
format!(
"workspace-local state exists for profile `{}`",
layout.profile()
),
));
} else {
checks.push(fail_check(
"clone_state",
&clone_profile_root.display().to_string(),
format!(
"workspace-local state is missing for profile `{}`",
layout.profile()
),
));
return Ok(checks);
}
if layout.resolved_substrate().kind() == SubstrateKind::Directory {
if let Some(binding_path) = layout.resolved_substrate().workspace_binding_path() {
checks.push(pass_check(
"workspace_binding",
&binding_path.display().to_string(),
format!(
"directory workspace binding is valid for profile `{}`",
layout.profile()
),
));
}
}
let db_label = layout.state_db_path().display().to_string();
let handoff_surface = match runtime_state::load_canonical_handoff_surface(layout) {
Ok(surface) => Some(surface),
Err(error) => {
checks.push(fail_check(
"runtime_state",
&db_label,
format!("failed to check {db_label}: {error:#}"),
));
None
}
};
if let Some(surface) = handoff_surface.as_ref() {
if surface.is_missing() {
checks.push(pass_check(
"runtime_state",
&db_label,
format!(
"{db_label} has no handoff state; `ccd start` will create it when a session begins"
),
));
} else {
checks.push(pass_check(
"runtime_state",
&db_label,
format!("{db_label} is present and canonical"),
));
}
}
if let Some(handoff_surface) = handoff_surface.as_ref() {
let label = handoff_surface.path.display().to_string();
if handoff_surface.is_missing() {
checks.push(configured_check(
"handoff",
&label,
format!(
"{label} has no handoff state; run `ccd start` to create the canonical workspace-local handoff"
),
validation_profile.doctor.handoff.missing,
));
} else {
checks.extend(validate_handoff_quality(
&label,
&handoff_surface.content,
&validation_profile.doctor.handoff,
));
if layout.resolved_substrate().is_git() {
let git = handoff::read_git_state(repo_root, BranchMode::AllowDetachedHead)?;
let checkout_advisory = handoff::checkout_continuity_advisory(repo_root, &git);
if let Some(advisory) = checkout_advisory.as_ref() {
checks.push(warn_check(
"checkout_state",
&label,
advisory.doctor_message(&git),
));
}
} else {
checks.push(pass_check(
"checkout_state",
&label,
format!(
"{label} uses the directory substrate; Git checkout freshness is unavailable by design"
),
));
}
}
}
match session_state::load_for_layout(layout) {
Ok(Some(state)) => {
let now = session_state::now_epoch_s()?;
if session_state::is_stale(&state, now) {
checks.push(warn_check(
"session_state",
&db_label,
format!(
"{db_label} is stale; refresh session-state before relying on context-health telemetry"
),
));
} else {
checks.push(pass_check(
"session_state",
&db_label,
format!("{db_label} is present and fresh"),
));
}
}
Ok(None) => checks.push(pass_check(
"session_state",
&db_label,
format!(
"{db_label} has no session telemetry; run `ccd session-state start` to record it"
),
)),
Err(error) => checks.push(fail_check(
"session_state",
&db_label,
format!("failed to read session state from {db_label}: {error:#}"),
)),
}
let compiled_state_label = layout.compiled_state_path().display().to_string();
let compiled_state_message = if layout.compiled_state_path().exists() {
format!(
"{compiled_state_label} is present as an implementation-private optimization artifact; default CCD correctness does not require it"
)
} else {
format!(
"{compiled_state_label} is absent; default CCD correctness computes derived runtime views on demand from canonical state"
)
};
checks.push(pass_check(
"compiled_state",
&compiled_state_label,
compiled_state_message,
));
match consistency::evaluate(repo_root, layout, locality_id) {
Ok(consistency) => checks.extend(consistency_doctor_checks(
layout,
locality_id,
&consistency.axes,
)?),
Err(error) => checks.push(warn_check(
"consistency_audit",
&layout.state_db_path().display().to_string(),
format!(
"artifact consistency audit was skipped because runtime state could not be loaded deterministically: {error:#}"
),
)),
}
Ok(checks)
}
fn load_repo_validation_profile(
layout: &StateLayout,
locality_id: &str,
checks: &mut Vec<DoctorCheck>,
) -> Result<ValidationProfile> {
let loaded = validation_profile::load_for_layout(layout, locality_id)?;
let label = loaded.path.display().to_string();
match loaded.status {
"loaded" => {
checks.push(pass_check(
"repo_validation_profile",
&label,
format!("{label} loaded"),
));
Ok(loaded.profile)
}
"missing" => Ok(loaded.profile),
_ => {
checks.push(fail_check(
"repo_validation_profile",
&label,
format!(
"failed to load {label}: {}",
loaded
.error
.unwrap_or_else(|| "invalid validation profile".to_owned())
),
));
Ok(loaded.profile)
}
}
}
fn validate_repo_native_checks(repo_root: &Path) -> Vec<DoctorCheck> {
match check::run(repo_root) {
Ok(report) if report.surface.status == "absent" => vec![pass_check(
"repo_native_check",
check::COMMANDS_DIR,
report.surface.note.unwrap_or_else(|| {
format!(
"No repo-native `{}` commands registered under {}.",
check::CHECK_PREFIX,
check::COMMANDS_DIR
)
}),
)],
Ok(report) if report.surface.status == "loaded" && report.surface.entries.is_empty() => {
vec![pass_check(
"repo_native_check",
check::COMMANDS_DIR,
report.surface.note.unwrap_or_else(|| {
format!(
"No repo-native `{}` commands registered under {}.",
check::CHECK_PREFIX,
check::COMMANDS_DIR
)
}),
)]
}
Ok(report) => report
.checks
.into_iter()
.map(|result| match result.severity {
"info" => pass_check("repo_native_check", &result.path, result.message),
"warning" => warn_check("repo_native_check", &result.path, result.message),
_ => fail_check(
"repo_native_check",
&result.path,
format!(
"{} Rerun `ccd check --path .` for captured stdout/stderr.",
result.message
),
),
})
.collect(),
Err(error) => vec![fail_check(
"repo_native_check",
check::COMMANDS_DIR,
format!("failed to inspect repo-native checks: {error:#}"),
)],
}
}
fn known_file_rules<'a>(
file: &'a str,
sync: &sync::SyncProfile,
) -> (&'a [&'a str], Option<(usize, usize)>) {
match file {
"AGENTS.md" => (
AGENTS_PLACEHOLDERS,
Some((sync.soft_line_limit, sync.hard_line_limit)),
),
"MEMORY.md" => (MEMORY_PLACEHOLDERS, None),
_ => (&[], None),
}
}
fn inspect_core_file(
path: &Path,
label: &str,
placeholders: &[&str],
budget: Option<(usize, usize)>,
) -> Result<InspectedCoreFile> {
let mut checks = Vec::new();
let contents = match fs::read_to_string(path) {
Ok(contents) => contents,
Err(error) if error.kind() == std::io::ErrorKind::NotFound => {
checks.push(fail_check("exists", label, format!("{label} is missing")));
return Ok(InspectedCoreFile {
checks,
contents: None,
});
}
Err(error) => return Err(error.into()),
};
checks.push(pass_check("exists", label, format!("{label} exists")));
let unresolved = unresolved_placeholders(&contents, placeholders);
if !unresolved.is_empty() {
checks.push(warn_check(
"placeholders",
label,
format!(
"{label} still contains template placeholders: {}",
unresolved.join(", ")
),
));
}
if let Some((soft, hard)) = budget {
let lines = sync::line_count(&contents);
if lines > hard {
checks.push(fail_check(
"line_budget",
label,
format!("{label} is {lines} lines (hard limit: {hard})"),
));
} else if lines > soft {
checks.push(warn_check(
"line_budget",
label,
format!("{label} is {lines} lines (soft limit: {soft})"),
));
}
}
Ok(InspectedCoreFile {
checks,
contents: Some(contents),
})
}
fn validate_structured_memory_file(path: &Path) -> Vec<DoctorCheck> {
let label = path.display().to_string();
let contents = match fs::read_to_string(path) {
Ok(contents) => contents,
Err(error) => {
return vec![fail_check(
"memory_structure",
&label,
format!("failed to read {}: {error}", path.display()),
)];
}
};
let report = memory_entries::parse_document(&contents);
if !report.diagnostics.is_empty() {
return vec![fail_check(
"memory_structure",
&label,
format!(
"{} has invalid structured CCD memory entries: {}",
path.display(),
report.diagnostics.join("; ")
),
)];
}
if report.block_count == 0 {
return Vec::new();
}
let noun = if report.entries.len() == 1 {
"entry"
} else {
"entries"
};
vec![pass_check(
"memory_structure",
&label,
format!(
"{} has {} valid structured CCD memory {}",
path.display(),
report.entries.len(),
noun
),
)]
}
fn unresolved_placeholders<'a>(contents: &str, placeholders: &[&'a str]) -> Vec<&'a str> {
placeholders
.iter()
.copied()
.filter(|placeholder| contents.contains(placeholder))
.collect()
}
fn validate_handoff_quality(
label: &str,
contents: &str,
handoff_profile: &HandoffDoctorValidationProfile,
) -> Vec<DoctorCheck> {
let mut checks = Vec::new();
let mut missing_sections = Vec::new();
for §ion in handoff::REQUIRED_SECTIONS {
if !handoff::has_section(contents, section) {
missing_sections.push(section);
}
}
if missing_sections.is_empty() {
checks.push(pass_check(
"handoff_quality",
label,
format!("{label} has all required sections"),
));
} else {
checks.push(configured_check(
"handoff_sections",
label,
format!(
"{label} is missing required sections: {}",
missing_sections.join(", ")
),
handoff_profile.sections,
));
}
let matched: Vec<&&str> = SESSION_LOG_PHRASES
.iter()
.filter(|phrase| contains_ascii_case_insensitive(contents, phrase))
.collect();
if !matched.is_empty() {
checks.push(configured_check(
"handoff_narration",
label,
format!(
"{label} reads like a session log — found narration phrases: {}. \
Prefer factual state descriptions over action narration.",
matched
.iter()
.map(|p| format!("\"{p}\""))
.collect::<Vec<_>>()
.join(", ")
),
handoff_profile.narration,
));
}
checks
}
fn contains_ascii_case_insensitive(contents: &str, needle: &str) -> bool {
let haystack = contents.as_bytes();
let needle = needle.as_bytes();
haystack
.windows(needle.len())
.any(|window| window.eq_ignore_ascii_case(needle))
}
#[cfg(test)]
mod tests {
use super::contains_ascii_case_insensitive;
#[test]
fn contains_ascii_case_insensitive_matches_mixed_case_boundaries() {
assert!(contains_ascii_case_insensitive("Session Log", "session"));
assert!(contains_ascii_case_insensitive("Session Log", "LOG"));
assert!(!contains_ascii_case_insensitive("Session Log", "logout"));
}
}
fn validate_installed_skills() -> Vec<DoctorCheck> {
let home = match std::env::var_os("HOME") {
Some(h) => PathBuf::from(h),
None => return Vec::new(),
};
let embedded_names: HashSet<&str> = crate::shipped_skills::embedded_skill_names()
.into_iter()
.collect();
let mut checks = Vec::new();
for runtime in skills::RUNTIMES {
let skills_dir = home.join(runtime.global_dir);
if !skills_dir.is_dir() {
continue;
}
let entries = match fs::read_dir(&skills_dir) {
Ok(entries) => entries,
Err(_) => continue,
};
let mut installed_names: HashSet<String> = HashSet::new();
for entry in entries.flatten() {
let path = entry.path();
if !path.is_dir() {
continue;
}
let dir_name = match path.file_name().and_then(|n| n.to_str()) {
Some(name) => name.to_owned(),
None => continue,
};
if !dir_name.starts_with("ccd-") {
continue;
}
installed_names.insert(dir_name.clone());
if !embedded_names.contains(dir_name.as_str()) {
checks.push(warn_check(
"installed_skills",
&path.display().to_string(),
format!(
"stale skill `{dir_name}` installed for {} but no longer shipped; run `ccd skills install` to clean up",
runtime.name
),
));
}
}
for &name in &embedded_names {
if !installed_names.contains(name) {
checks.push(warn_check(
"installed_skills",
&skills_dir.join(name).display().to_string(),
format!(
"skill `{name}` is shipped but not installed for {}; run `ccd skills install` to install",
runtime.name
),
));
}
}
}
checks
}
fn pass_check(check: &str, file: &str, message: impl Into<String>) -> DoctorCheck {
DoctorCheck {
check: check.to_owned(),
file: file.to_owned(),
status: "pass",
severity: "info",
message: message.into(),
}
}
fn warn_check(check: &str, file: &str, message: impl Into<String>) -> DoctorCheck {
DoctorCheck {
check: check.to_owned(),
file: file.to_owned(),
status: "warn",
severity: "warning",
message: message.into(),
}
}
fn consistency_doctor_checks(
layout: &StateLayout,
locality_id: &str,
axes: &[consistency::ConsistencyAxis],
) -> Result<Vec<DoctorCheck>> {
axes.iter()
.map(|axis| {
let label = consistency_axis_surface(layout, locality_id, axis.id)?;
let check = match axis.status {
consistency::ConsistencyStatus::Aligned
| consistency::ConsistencyStatus::NoSignal => {
pass_check(axis.id, &label, axis.summary.clone())
}
consistency::ConsistencyStatus::Drift => {
warn_check(axis.id, &label, axis.summary.clone())
}
};
Ok(check)
})
.collect()
}
fn consistency_axis_surface(
layout: &StateLayout,
locality_id: &str,
axis_id: &str,
) -> Result<String> {
let label = match axis_id {
consistency::MEMORY_COHERENCE_AXIS => format!(
"{}, {}",
layout.profile_runtime_state_path().display(),
layout.locality_runtime_state_path(locality_id)?.display()
),
_ => layout.state_db_path().display().to_string(),
};
Ok(label)
}
struct InspectedCoreFile {
checks: Vec<DoctorCheck>,
contents: Option<String>,
}
fn fail_check(check: &str, file: &str, message: impl Into<String>) -> DoctorCheck {
DoctorCheck {
check: check.to_owned(),
file: file.to_owned(),
status: "fail",
severity: "error",
message: message.into(),
}
}
fn configured_check(
check: &str,
file: &str,
message: impl Into<String>,
severity: ValidationSeverity,
) -> DoctorCheck {
match severity {
ValidationSeverity::Warning => warn_check(check, file, message),
ValidationSeverity::Error => fail_check(check, file, message),
}
}