1use crate::core::broker::DbBroker;
7use crate::core::capsule_policy::{self, POLICY_SCHEMA_VERSION};
8use crate::core::context_capsule::DeterministicContextCapsule;
9use crate::core::error;
10use crate::core::migration;
11use crate::core::output;
12use crate::core::plan_governance;
13use crate::core::project_specs::{
14 LOCAL_PROJECT_SPECS, LOCAL_PROJECT_SPECS_ARCHITECTURE, LOCAL_PROJECT_SPECS_DIR,
15 LOCAL_PROJECT_SPECS_INTENT, LOCAL_PROJECT_SPECS_INTERFACES, LOCAL_PROJECT_SPECS_MANIFEST,
16 LOCAL_PROJECT_SPECS_MANIFEST_SCHEMA, LOCAL_PROJECT_SPECS_OPERATIONS,
17 LOCAL_PROJECT_SPECS_SECURITY, LOCAL_PROJECT_SPECS_SEMANTICS, LOCAL_PROJECT_SPECS_VALIDATION,
18 hash_text, read_specs_manifest, repo_signal_fingerprint,
19};
20use crate::core::scaffold::DECAPOD_GITIGNORE_RULES;
21use crate::core::store::{Store, StoreKind};
22use crate::core::workunit::{self, WorkUnitManifest, WorkUnitStatus};
23use crate::plugins::aptitude::{SkillCard, SkillResolution};
24use crate::plugins::internalize::{self, DeterminismClass, InternalizationManifest, ReplayClass};
25use crate::{db, primitives, todo};
26use fancy_regex::Regex;
27use serde::Serialize;
28use serde_json;
29use std::collections::HashSet;
30use std::fs;
31use std::path::{Path, PathBuf};
32use std::sync::Mutex;
33use std::sync::atomic::{AtomicU32, Ordering};
34use std::time::{Duration, Instant};
35
36macro_rules! gate {
40 ($_scope:expr, $timings:expr, $ctx:expr, $name:literal, $body:expr) => {{
41 let start = Instant::now();
42 if let Err(e) = $body {
43 fail(&format!("gate error: {e}"), $ctx);
44 }
45 $timings.lock().unwrap().push(($name, start.elapsed()));
46 }};
47}
48
49struct ValidationContext {
50 pass_count: AtomicU32,
51 fail_count: AtomicU32,
52 warn_count: AtomicU32,
53 fails: Mutex<Vec<String>>,
54 warns: Mutex<Vec<String>>,
55 repo_files_cache: Mutex<Vec<(PathBuf, Vec<PathBuf>)>>,
56}
57
58#[derive(Debug, Clone, Serialize)]
59pub struct ValidationGateTiming {
60 pub name: String,
61 pub elapsed_ms: u64,
62}
63
64#[derive(Debug, Clone, Serialize)]
65pub struct ValidationReport {
66 pub status: String,
67 pub elapsed_ms: u64,
68 pub pass_count: u32,
69 pub fail_count: u32,
70 pub warn_count: u32,
71 pub failures: Vec<String>,
72 pub warnings: Vec<String>,
73 pub gate_timings: Vec<ValidationGateTiming>,
74}
75
76impl ValidationContext {
77 fn new() -> Self {
78 Self {
79 pass_count: AtomicU32::new(0),
80 fail_count: AtomicU32::new(0),
81 warn_count: AtomicU32::new(0),
82 fails: Mutex::new(Vec::new()),
83 warns: Mutex::new(Vec::new()),
84 repo_files_cache: Mutex::new(Vec::new()),
85 }
86 }
87}
88
89fn collect_repo_files(
90 root: &Path,
91 out: &mut Vec<PathBuf>,
92 ctx: &ValidationContext,
93) -> Result<(), error::DecapodError> {
94 let cached = {
96 let cache = ctx.repo_files_cache.lock().unwrap();
97 cache
98 .iter()
99 .find(|(k, _)| k == root)
100 .map(|(_, v)| v.clone())
101 };
102 if let Some(files) = cached {
103 out.extend(files);
104 return Ok(());
105 }
106
107 fn recurse(dir: &Path, out: &mut Vec<PathBuf>) -> Result<(), error::DecapodError> {
108 if !dir.is_dir() {
109 return Ok(());
110 }
111
112 let name = dir.file_name().and_then(|s| s.to_str()).unwrap_or("");
113 if matches!(
116 name,
117 ".git"
118 | "target"
119 | ".decapod"
120 | "artifacts"
121 | "node_modules"
122 | ".venv"
123 | ".mypy_cache"
124 | ".pytest_cache"
125 ) {
126 return Ok(());
127 }
128
129 for entry in fs::read_dir(dir).map_err(error::DecapodError::IoError)? {
130 let entry = entry.map_err(error::DecapodError::IoError)?;
131 let path = entry.path();
132 if path.is_dir() {
133 recurse(&path, out)?;
134 } else if path.is_file() {
135 out.push(path);
136 }
137 }
138 Ok(())
139 }
140
141 let start = out.len();
142 recurse(root, out)?;
143 ctx.repo_files_cache
145 .lock()
146 .unwrap()
147 .push((root.to_path_buf(), out[start..].to_vec()));
148 Ok(())
149}
150
151fn validate_no_legacy_namespaces(
152 ctx: &ValidationContext,
153 decapod_dir: &Path,
154) -> Result<(), error::DecapodError> {
155 info("Namespace Purge Gate");
156
157 let mut files = Vec::new();
158 collect_repo_files(decapod_dir, &mut files, ctx)?;
159
160 let needles = [
161 [".".to_string(), "globex".to_string()].concat(),
162 [".".to_string(), "codex".to_string()].concat(),
163 ];
164 let mut offenders: Vec<(PathBuf, String)> = Vec::new();
165
166 for path in files {
167 if path.extension().is_some_and(|e| e == "db") {
169 continue;
170 }
171 let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
172 let is_texty = matches!(
173 ext,
174 "md" | "rs" | "toml" | "json" | "jsonl" | "yml" | "yaml" | "sh" | "lock"
175 );
176 if !is_texty {
177 continue;
178 }
179 let content = match fs::read_to_string(&path) {
180 Ok(c) => c,
181 Err(_) => continue,
182 };
183 for n in needles.iter() {
184 if content.contains(n) {
185 offenders.push((path.clone(), n.clone()));
186 }
187 }
188 }
189
190 if offenders.is_empty() {
191 pass(
192 "No legacy namespace references found in repo text sources",
193 ctx,
194 );
195 } else {
196 let mut msg = String::from("Forbidden legacy namespace references found:");
197 for (p, n) in offenders.iter().take(12) {
198 msg.push_str(&format!(" {}({})", p.display(), n));
199 }
200 if offenders.len() > 12 {
201 msg.push_str(&format!(" ... ({} total)", offenders.len()));
202 }
203 fail(&msg, ctx);
204 }
205 Ok(())
206}
207
208fn validate_embedded_self_contained(
209 ctx: &ValidationContext,
210 repo_root: &Path,
211) -> Result<(), error::DecapodError> {
212 info("Embedded Self-Contained Gate");
213
214 let constitution_dir = repo_root.join("constitution");
215 if !constitution_dir.exists() {
216 skip("No constitution/ directory found (decapod repo)", ctx);
218 return Ok(());
219 }
220
221 let mut files = Vec::new();
222 collect_repo_files(&constitution_dir, &mut files, ctx)?;
223
224 let mut offenders: Vec<PathBuf> = Vec::new();
225
226 for path in files {
227 if path.extension().and_then(|e| e.to_str()) != Some("md") {
228 continue;
229 }
230
231 let content = match fs::read_to_string(&path) {
232 Ok(c) => c,
233 Err(_) => continue,
234 };
235
236 if content.contains(".decapod/") {
238 let mut legitimate_ref_count = 0usize;
240 for line in content.lines() {
241 let refs_on_line = line.matches(".decapod/").count();
242 if refs_on_line == 0 {
243 continue;
244 }
245 let is_legitimate_line = line.contains("<repo>")
246 || line.contains("store:")
247 || line.contains("directory")
248 || line.contains("override")
249 || line.contains("Override")
250 || line.contains("OVERRIDE.md")
251 || line.contains("Location:")
252 || line.contains("primarily contain")
253 || line.contains(".decapod/context/")
254 || line.contains(".decapod/memory/")
255 || line.contains("intended as")
256 || line.contains(".decapod/knowledge/")
257 || line.contains(".decapod/data/")
258 || line.contains(".decapod/workspaces/")
259 || line.contains(".decapod/generated/")
260 || line.contains(".decapod/generated/specs/")
261 || line.contains(".decapod/generated/policy/")
262 || line.contains(".decapod/policy/")
263 || line.contains("repo-scoped");
264 if is_legitimate_line {
265 legitimate_ref_count += refs_on_line;
266 }
267 }
268
269 let total_decapod_refs = content.matches(".decapod/").count();
270 if total_decapod_refs > legitimate_ref_count {
271 offenders.push(path);
272 }
273 }
274 }
275
276 if offenders.is_empty() {
277 pass(
278 "Embedded constitution files contain no invalid .decapod/ references",
279 ctx,
280 );
281 } else {
282 let mut msg =
283 String::from("Embedded constitution files contain invalid .decapod/ references:");
284 for p in offenders.iter().take(8) {
285 msg.push_str(&format!(" {}", p.display()));
286 }
287 if offenders.len() > 8 {
288 msg.push_str(&format!(" ... ({} total)", offenders.len()));
289 }
290 fail(&msg, ctx);
291 }
292 Ok(())
293}
294
295fn pass(_message: &str, ctx: &ValidationContext) {
296 ctx.pass_count.fetch_add(1, Ordering::Relaxed);
297}
298
299fn fail(message: &str, ctx: &ValidationContext) {
300 ctx.fail_count.fetch_add(1, Ordering::Relaxed);
301 ctx.fails.lock().unwrap().push(message.to_string());
302}
303
304fn skip(_message: &str, ctx: &ValidationContext) {
305 ctx.pass_count.fetch_add(1, Ordering::Relaxed);
306}
307
308fn warn(message: &str, ctx: &ValidationContext) {
309 ctx.warn_count.fetch_add(1, Ordering::Relaxed);
310 ctx.warns.lock().unwrap().push(message.to_string());
311}
312
313fn info(_message: &str) {}
314
315fn count_tasks_in_db(db_path: &Path) -> Result<i64, error::DecapodError> {
316 let conn = db::db_connect_for_validate(&db_path.to_string_lossy())?;
317 let count: i64 = conn
318 .query_row("SELECT COUNT(*) FROM tasks", [], |row| row.get(0))
319 .map_err(error::DecapodError::RusqliteError)?;
320 Ok(count)
321}
322
323fn fetch_tasks_fingerprint(db_path: &Path) -> Result<String, error::DecapodError> {
324 let conn = db::db_connect_for_validate(&db_path.to_string_lossy())?;
325 let mut stmt = conn
326 .prepare("SELECT id,title,status,updated_at,dir_path,scope,priority FROM tasks ORDER BY id")
327 .map_err(error::DecapodError::RusqliteError)?;
328 let rows = stmt
329 .query_map([], |row| {
330 Ok(serde_json::json!({
331 "id": row.get::<_, String>(0)?,
332 "title": row.get::<_, String>(1)?,
333 "status": row.get::<_, String>(2)?,
334 "updated_at": row.get::<_, String>(3)?,
335 "dir_path": row.get::<_, String>(4)?,
336 "scope": row.get::<_, String>(5)?,
337 "priority": row.get::<_, String>(6)?,
338 }))
339 })
340 .map_err(error::DecapodError::RusqliteError)?;
341
342 let mut out = Vec::new();
343 for r in rows {
344 out.push(r.map_err(error::DecapodError::RusqliteError)?);
345 }
346 Ok(serde_json::to_string(&out).unwrap())
347}
348
349fn validate_user_store_blank_slate(ctx: &ValidationContext) -> Result<(), error::DecapodError> {
350 info("Store: user (blank-slate semantics)");
351 let tmp_root = std::env::temp_dir().join(format!(
352 "decapod_validate_user_{}",
353 crate::core::ulid::new_ulid()
354 ));
355 fs::create_dir_all(&tmp_root).map_err(error::DecapodError::IoError)?;
356
357 todo::initialize_todo_db(&tmp_root)?;
358 let db_path = tmp_root.join("todo.db");
359 let n = count_tasks_in_db(&db_path)?;
360
361 if n == 0 {
362 pass("User store starts empty (no automatic seeding)", ctx);
363 } else {
364 fail(
365 &format!(
366 "User store is not empty on fresh init ({} task(s) found)",
367 n
368 ),
369 ctx,
370 );
371 }
372 Ok(())
373}
374
375fn validate_repo_store_dogfood(
376 store: &Store,
377 ctx: &ValidationContext,
378 _decapod_dir: &Path,
379) -> Result<(), error::DecapodError> {
380 info("Store: repo (dogfood backlog semantics)");
381
382 let events = store.root.join("todo.events.jsonl");
383 if !events.is_file() {
384 fail("Repo store missing todo.events.jsonl", ctx);
385 return Ok(());
386 }
387 let content = fs::read_to_string(&events).map_err(error::DecapodError::IoError)?;
388 let add_count = content
389 .lines()
390 .filter(|l| l.contains("\"event_type\":\"task.add\""))
391 .count();
392
393 pass(
395 &format!(
396 "Repo backlog event log present ({} task.add events)",
397 add_count
398 ),
399 ctx,
400 );
401
402 let db_path = store.root.join("todo.db");
403 if !db_path.is_file() {
404 fail("Repo store missing todo.db", ctx);
405 return Ok(());
406 }
407
408 let broker = DbBroker::new(&store.root);
410 let replay_report = broker.verify_replay()?;
411 if replay_report.divergences.is_empty() {
412 pass("Audit log integrity verified (no pending event gaps)", ctx);
413 } else {
414 warn(
415 &format!(
416 "Audit log contains {} potential crash divergence(s); historical pending entries detected. Run `decapod data broker verify` for details.",
417 replay_report.divergences.len(),
418 ),
419 ctx,
420 );
421 }
422
423 let tmp_root = std::env::temp_dir().join(format!(
424 "decapod_validate_repo_{}",
425 crate::core::ulid::new_ulid()
426 ));
427 fs::create_dir_all(&tmp_root).map_err(error::DecapodError::IoError)?;
428 let tmp_db = tmp_root.join("todo.db");
429 let _events = todo::rebuild_db_from_events(&events, &tmp_db)?;
430
431 let fp_a = fetch_tasks_fingerprint(&db_path)?;
432 let fp_b = fetch_tasks_fingerprint(&tmp_db)?;
433 if fp_a == fp_b {
434 pass(
435 "Repo todo.db matches deterministic rebuild from todo.events.jsonl",
436 ctx,
437 );
438 } else {
439 fail(
440 "Repo todo.db does NOT match rebuild from todo.events.jsonl",
441 ctx,
442 );
443 }
444
445 Ok(())
446}
447
448fn validate_repo_map(
449 ctx: &ValidationContext,
450 _decapod_dir: &Path, ) -> Result<(), error::DecapodError> {
452 info("Repo Map");
453
454 pass(
457 "Methodology constitution checks will verify embedded docs.",
458 ctx,
459 );
460
461 let required_specs = ["specs/INTENT.md", "specs/SYSTEM.md"];
462 let required_methodology = ["methodology/ARCHITECTURE.md"];
463 for r in required_specs {
464 if crate::core::assets::get_doc(r).is_some() {
465 pass(&format!("Constitution doc {} present (embedded)", r), ctx);
466 } else {
467 fail(&format!("Constitution doc {} missing (embedded)", r), ctx);
468 }
469 }
470 for r in required_methodology {
471 if crate::core::assets::get_doc(r).is_some() {
472 pass(&format!("Constitution doc {} present (embedded)", r), ctx);
473 } else {
474 fail(&format!("Constitution doc {} missing (embedded)", r), ctx);
475 }
476 }
477 Ok(())
478}
479
480fn validate_docs_templates_bucket(
481 ctx: &ValidationContext,
482 decapod_dir: &Path,
483) -> Result<(), error::DecapodError> {
484 info("Entrypoint Gate");
485
486 let required = ["AGENTS.md", "CLAUDE.md", "GEMINI.md", "CODEX.md"];
488 for a in required {
489 let p = decapod_dir.join(a);
490 if p.is_file() {
491 pass(&format!("Root entrypoint {} present", a), ctx);
492 } else {
493 fail(
494 &format!("Root entrypoint {} missing from project root", a),
495 ctx,
496 );
497 }
498 }
499
500 if decapod_dir.join(".decapod").join("README.md").is_file() {
501 pass(".decapod/README.md present", ctx);
502 } else {
503 fail(".decapod/README.md missing", ctx);
504 }
505
506 let forbidden_docs = decapod_dir.join(".decapod").join("docs");
508 if forbidden_docs.exists() {
509 fail(
510 "Decapod internal docs were copied into .decapod/docs/ (Forbidden)",
511 ctx,
512 );
513 } else {
514 pass(
515 "Decapod internal docs correctly excluded from project repo",
516 ctx,
517 );
518 }
519
520 let forbidden_projects = decapod_dir.join(".decapod").join("projects");
522 if forbidden_projects.exists() {
523 fail("Legacy .decapod/projects/ directory found (Forbidden)", ctx);
524 } else {
525 pass(".decapod/projects/ correctly absent", ctx);
526 }
527
528 Ok(())
529}
530
531fn validate_entrypoint_invariants(
532 ctx: &ValidationContext,
533 decapod_dir: &Path,
534) -> Result<(), error::DecapodError> {
535 info("Four Invariants Gate");
536
537 let agents_path = decapod_dir.join("AGENTS.md");
539 if !agents_path.is_file() {
540 fail("AGENTS.md missing, cannot check invariants", ctx);
541 return Ok(());
542 }
543
544 let content = fs::read_to_string(&agents_path).map_err(error::DecapodError::IoError)?;
545 let normalized = content.to_ascii_lowercase();
546
547 let exact_invariants = [
549 ("core/decapod.md", "Router pointer to core/DECAPOD.md"),
550 ("cargo install decapod", "Version update gate language"),
551 ("decapod validate", "Validation gate language"),
552 (
553 "decapod docs ingest",
554 "Core constitution ingestion mandate language",
555 ),
556 ("stop if", "Stop-if-missing behavior"),
557 ("docker git workspaces", "Docker workspace mandate language"),
558 (
559 "decapod todo claim --id <task-id>",
560 "Task claim-before-work mandate language",
561 ),
562 (
563 "request elevated permissions before docker/container workspace commands",
564 "Elevated-permissions mandate language",
565 ),
566 (
567 "decapod_session_password",
568 "Per-agent session password mandate language",
569 ),
570 ("via decapod cli", "Jail rule: .decapod access is CLI-only"),
571 (
572 "interface abstraction boundary",
573 "Control-plane opacity language",
574 ),
575 (
576 "strict dependency: you are strictly bound to the decapod control plane",
577 "Agent dependency enforcement language",
578 ),
579 ("✅", "Four invariants checklist format"),
580 ];
581
582 let mut all_present = true;
583 for (marker, description) in exact_invariants {
584 let present = if marker == "✅" {
585 content.contains(marker)
586 } else {
587 normalized.contains(marker)
588 };
589 if present {
590 pass(&format!("Invariant present: {}", description), ctx);
591 } else {
592 fail(&format!("Invariant missing: {}", description), ctx);
593 all_present = false;
594 }
595 }
596
597 let legacy_routers = ["MAESTRO.md", "GLOBEX.md", "CODEX.md\" as router"];
599 for legacy in legacy_routers {
600 if content.contains(legacy) {
601 fail(
602 &format!("AGENTS.md contains legacy router reference: {}", legacy),
603 ctx,
604 );
605 all_present = false;
606 }
607 }
608
609 let line_count = content.lines().count();
611 const MAX_AGENTS_LINES: usize = 100;
612 if line_count <= MAX_AGENTS_LINES {
613 pass(
614 &format!(
615 "AGENTS.md is thin ({} lines ≤ {})",
616 line_count, MAX_AGENTS_LINES
617 ),
618 ctx,
619 );
620 } else {
621 fail(
622 &format!(
623 "AGENTS.md exceeds line limit ({} lines > {})",
624 line_count, MAX_AGENTS_LINES
625 ),
626 ctx,
627 );
628 all_present = false;
629 }
630
631 const MAX_AGENT_SPECIFIC_LINES: usize = 70;
633 for agent_file in ["CLAUDE.md", "GEMINI.md", "CODEX.md"] {
634 let agent_path = decapod_dir.join(agent_file);
635 if !agent_path.is_file() {
636 fail(&format!("{} missing from project root", agent_file), ctx);
637 all_present = false;
638 continue;
639 }
640
641 let agent_content =
642 fs::read_to_string(&agent_path).map_err(error::DecapodError::IoError)?;
643
644 if agent_content.contains("See `AGENTS.md`") || agent_content.contains("AGENTS.md") {
646 pass(&format!("{} defers to AGENTS.md", agent_file), ctx);
647 } else {
648 fail(&format!("{} does not reference AGENTS.md", agent_file), ctx);
649 all_present = false;
650 }
651
652 if agent_content.contains("core/DECAPOD.md") {
654 pass(&format!("{} references canonical router", agent_file), ctx);
655 } else {
656 fail(
657 &format!("{} missing canonical router reference", agent_file),
658 ctx,
659 );
660 all_present = false;
661 }
662
663 if agent_content.contains("decapod docs show constitution/")
665 || agent_content.contains("(constitution/")
666 {
667 fail(
668 &format!(
669 "{} references direct constitution filesystem paths; use embedded doc paths (e.g. core/*, specs/*, docs/*)",
670 agent_file
671 ),
672 ctx,
673 );
674 all_present = false;
675 } else if agent_content.contains("decapod docs show docs/") {
676 pass(
677 &format!("{} references embedded docs path convention", agent_file),
678 ctx,
679 );
680 } else {
681 fail(
682 &format!(
683 "{} missing embedded docs path reference (`decapod docs show docs/...`)",
684 agent_file
685 ),
686 ctx,
687 );
688 all_present = false;
689 }
690
691 if agent_content.contains(".decapod files are accessed only via decapod CLI") {
693 pass(
694 &format!("{} includes .decapod CLI-only jail rule", agent_file),
695 ctx,
696 );
697 } else {
698 fail(
699 &format!("{} missing .decapod CLI-only jail rule marker", agent_file),
700 ctx,
701 );
702 all_present = false;
703 }
704
705 if agent_content.contains("Docker git workspaces") {
707 pass(
708 &format!("{} includes Docker workspace mandate", agent_file),
709 ctx,
710 );
711 } else {
712 fail(
713 &format!("{} missing Docker workspace mandate marker", agent_file),
714 ctx,
715 );
716 all_present = false;
717 }
718
719 if agent_content
721 .contains("request elevated permissions before Docker/container workspace commands")
722 {
723 pass(
724 &format!("{} includes elevated-permissions mandate", agent_file),
725 ctx,
726 );
727 } else {
728 fail(
729 &format!("{} missing elevated-permissions mandate marker", agent_file),
730 ctx,
731 );
732 all_present = false;
733 }
734
735 if agent_content.contains("DECAPOD_SESSION_PASSWORD") {
737 pass(
738 &format!("{} includes per-agent session password mandate", agent_file),
739 ctx,
740 );
741 } else {
742 fail(
743 &format!(
744 "{} missing per-agent session password mandate marker",
745 agent_file
746 ),
747 ctx,
748 );
749 all_present = false;
750 }
751
752 if agent_content.contains("decapod todo claim --id <task-id>") {
754 pass(
755 &format!("{} includes claim-before-work mandate", agent_file),
756 ctx,
757 );
758 } else {
759 fail(
760 &format!("{} missing claim-before-work mandate marker", agent_file),
761 ctx,
762 );
763 all_present = false;
764 }
765
766 if agent_content.contains("decapod todo add \"<task>\"") {
768 pass(
769 &format!("{} includes task creation mandate", agent_file),
770 ctx,
771 );
772 } else {
773 fail(
774 &format!("{} missing task creation mandate marker", agent_file),
775 ctx,
776 );
777 all_present = false;
778 }
779
780 if agent_content.contains(".decapod/workspaces") {
782 pass(
783 &format!("{} includes canonical workspace path mandate", agent_file),
784 ctx,
785 );
786 } else {
787 fail(
788 &format!(
789 "{} missing canonical workspace path marker (`.decapod/workspaces`)",
790 agent_file
791 ),
792 ctx,
793 );
794 all_present = false;
795 }
796
797 if agent_content.contains(".claude/worktrees") {
798 let mut has_forbidden_positive_reference = false;
799 for line in agent_content.lines() {
800 if !line.contains(".claude/worktrees") {
801 continue;
802 }
803 let lower = line.to_ascii_lowercase();
804 let is_negative_context = lower.contains("never")
805 || lower.contains("forbid")
806 || lower.contains("non-canonical")
807 || lower.contains("must not")
808 || lower.contains("do not");
809 if !is_negative_context {
810 has_forbidden_positive_reference = true;
811 break;
812 }
813 }
814 if has_forbidden_positive_reference {
815 fail(
816 &format!(
817 "{} references forbidden non-canonical worktree path `.claude/worktrees`",
818 agent_file
819 ),
820 ctx,
821 );
822 all_present = false;
823 } else {
824 pass(
825 &format!(
826 "{} explicitly forbids `.claude/worktrees` non-canonical path",
827 agent_file
828 ),
829 ctx,
830 );
831 }
832 }
833
834 if agent_content.contains("decapod docs ingest") {
836 pass(
837 &format!(
838 "{} includes core constitution ingestion mandate",
839 agent_file
840 ),
841 ctx,
842 );
843 } else {
844 fail(
845 &format!(
846 "{} missing core constitution ingestion mandate marker",
847 agent_file
848 ),
849 ctx,
850 );
851 all_present = false;
852 }
853
854 if agent_content.contains("cargo install decapod") {
856 pass(&format!("{} includes version update step", agent_file), ctx);
857 } else {
858 fail(
859 &format!(
860 "{} missing version update step (`cargo install decapod`)",
861 agent_file
862 ),
863 ctx,
864 );
865 all_present = false;
866 }
867
868 let agent_lines = agent_content.lines().count();
870 if agent_lines <= MAX_AGENT_SPECIFIC_LINES {
871 pass(
872 &format!(
873 "{} is thin ({} lines ≤ {})",
874 agent_file, agent_lines, MAX_AGENT_SPECIFIC_LINES
875 ),
876 ctx,
877 );
878 } else {
879 fail(
880 &format!(
881 "{} exceeds line limit ({} lines > {})",
882 agent_file, agent_lines, MAX_AGENT_SPECIFIC_LINES
883 ),
884 ctx,
885 );
886 all_present = false;
887 }
888
889 let duplication_markers = [
891 "## Lifecycle States", "## Validation Rules", "### Proof Gates", "## Store Model", ];
896 for marker in duplication_markers {
897 if agent_content.contains(marker) {
898 fail(
899 &format!(
900 "{} contains duplicated contract details ({})",
901 agent_file, marker
902 ),
903 ctx,
904 );
905 all_present = false;
906 }
907 }
908 }
909
910 if all_present {
911 pass("All entrypoint files follow thin waist architecture", ctx);
912 }
913
914 Ok(())
915}
916
917fn validate_interface_contract_bootstrap(
918 ctx: &ValidationContext,
919 repo_root: &Path,
920) -> Result<(), error::DecapodError> {
921 info("Interface Contract Bootstrap Gate");
922
923 let constitution_dir = repo_root.join("constitution");
926 if !constitution_dir.exists() {
927 skip(
928 "No constitution/ directory found (project repo); skipping interface bootstrap checks",
929 ctx,
930 );
931 return Ok(());
932 }
933
934 let risk_policy_doc = repo_root.join("constitution/interfaces/RISK_POLICY_GATE.md");
935 let context_pack_doc = repo_root.join("constitution/interfaces/AGENT_CONTEXT_PACK.md");
936 for (path, label) in [
937 (&risk_policy_doc, "RISK_POLICY_GATE interface"),
938 (&context_pack_doc, "AGENT_CONTEXT_PACK interface"),
939 ] {
940 if path.is_file() {
941 pass(&format!("{} present at {}", label, path.display()), ctx);
942 } else {
943 fail(&format!("{} missing at {}", label, path.display()), ctx);
944 }
945 }
946
947 if risk_policy_doc.is_file() {
948 let content = fs::read_to_string(&risk_policy_doc).map_err(error::DecapodError::IoError)?;
949 for marker in [
950 "**Authority:**",
951 "**Layer:** Interfaces",
952 "**Binding:** Yes",
953 "**Scope:**",
954 "**Non-goals:**",
955 "## 3. Current-Head SHA Discipline",
956 "## 6. Browser Evidence Manifest (UI/Critical Flows)",
957 "## 8. Truth Labels and Upgrade Path",
958 "## 10. Contract Example (JSON)",
959 "## Links",
960 ] {
961 if content.contains(marker) {
962 pass(
963 &format!("RISK_POLICY_GATE includes marker: {}", marker),
964 ctx,
965 );
966 } else {
967 fail(&format!("RISK_POLICY_GATE missing marker: {}", marker), ctx);
968 }
969 }
970 }
971
972 if context_pack_doc.is_file() {
973 let content =
974 fs::read_to_string(&context_pack_doc).map_err(error::DecapodError::IoError)?;
975 for marker in [
976 "**Authority:**",
977 "**Layer:** Interfaces",
978 "**Binding:** Yes",
979 "**Scope:**",
980 "**Non-goals:**",
981 "## 2. Deterministic Load Order",
982 "## 3. Mutation Authority",
983 "## 4. Memory Distillation Contract",
984 "## 8. Truth Labels and Upgrade Path",
985 "## Links",
986 ] {
987 if content.contains(marker) {
988 pass(
989 &format!("AGENT_CONTEXT_PACK includes marker: {}", marker),
990 ctx,
991 );
992 } else {
993 fail(
994 &format!("AGENT_CONTEXT_PACK missing marker: {}", marker),
995 ctx,
996 );
997 }
998 }
999 }
1000
1001 Ok(())
1002}
1003
1004fn extract_md_version(content: &str) -> Option<String> {
1005 for line in content.lines() {
1006 let line = line.trim();
1007 if let Some(rest) = line.strip_prefix("- v") {
1008 let v_and_rest = rest.trim();
1009 if !v_and_rest.is_empty() {
1010 return v_and_rest.split(':').next().map(|s| s.trim().to_string());
1012 }
1013 }
1014 }
1015 None
1016}
1017
1018fn validate_health_purity(
1019 ctx: &ValidationContext,
1020 decapod_dir: &Path,
1021) -> Result<(), error::DecapodError> {
1022 info("Health Purity Gate");
1023 let mut files = Vec::new();
1024 collect_repo_files(decapod_dir, &mut files, ctx)?;
1025
1026 let forbidden =
1027 Regex::new(r"(?i)\(health:\s*(VERIFIED|ASSERTED|STALE|CONTRADICTED)\)").unwrap();
1028 let mut offenders = Vec::new();
1029
1030 let generated_path = decapod_dir.join(".decapod").join("generated");
1031
1032 for path in files {
1033 if path.extension().is_some_and(|e| e == "md") {
1034 if path.starts_with(&generated_path) {
1036 continue;
1037 }
1038
1039 let content = fs::read_to_string(&path).unwrap_or_default();
1040 if forbidden.is_match(&content).unwrap_or(false) {
1041 offenders.push(path);
1042 }
1043 }
1044 }
1045
1046 if offenders.is_empty() {
1047 pass(
1048 "No manual health status values found in authoritative docs",
1049 ctx,
1050 );
1051 } else {
1052 fail(
1053 &format!(
1054 "Manual health values found in non-generated files: {:?}",
1055 offenders
1056 ),
1057 ctx,
1058 );
1059 }
1060 Ok(())
1061}
1062
1063fn validate_project_scoped_state(
1064 store: &Store,
1065 ctx: &ValidationContext,
1066 decapod_dir: &Path,
1067) -> Result<(), error::DecapodError> {
1068 info("Project-Scoped State Gate");
1069 if store.kind != StoreKind::Repo {
1070 skip("Not in repo mode; skipping state scoping check", ctx);
1071 return Ok(());
1072 }
1073
1074 let mut offenders = Vec::new();
1076 for entry in fs::read_dir(decapod_dir).map_err(error::DecapodError::IoError)? {
1077 let entry = entry.map_err(error::DecapodError::IoError)?;
1078 let path = entry.path();
1079 if path.is_file() {
1080 let ext = path.extension().and_then(|s| s.to_str()).unwrap_or("");
1081 if matches!(ext, "db" | "jsonl") {
1082 offenders.push(path);
1083 }
1084 }
1085 }
1086
1087 if offenders.is_empty() {
1088 pass("All state is correctly scoped within .decapod/", ctx);
1089 } else {
1090 fail(
1091 &format!(
1092 "Found Decapod state files outside .decapod/: {:?}",
1093 offenders
1094 ),
1095 ctx,
1096 );
1097 }
1098 Ok(())
1099}
1100
1101fn validate_generated_artifact_whitelist(
1102 store: &Store,
1103 ctx: &ValidationContext,
1104 decapod_dir: &Path,
1105) -> Result<(), error::DecapodError> {
1106 info("Generated Artifact Whitelist Gate");
1107
1108 if store.kind != StoreKind::Repo {
1109 skip(
1110 "Not in repo mode; skipping generated artifact whitelist check",
1111 ctx,
1112 );
1113 return Ok(());
1114 }
1115
1116 let gitignore_path = decapod_dir.join(".gitignore");
1117 let gitignore = fs::read_to_string(&gitignore_path).map_err(error::DecapodError::IoError)?;
1118 for rule in DECAPOD_GITIGNORE_RULES {
1119 if gitignore.lines().any(|line| line.trim() == *rule) {
1120 pass(&format!("Gitignore contains required rule '{}'", rule), ctx);
1121 } else {
1122 fail(
1123 &format!(
1124 "Missing .gitignore rule '{}' for generated/data whitelist enforcement",
1125 rule
1126 ),
1127 ctx,
1128 );
1129 }
1130 }
1131
1132 let output = std::process::Command::new("git")
1133 .arg("-C")
1134 .arg(decapod_dir)
1135 .args(["ls-files", ".decapod/generated", ".decapod/data"])
1136 .output();
1137
1138 let output = match output {
1139 Ok(o) if o.status.success() => o,
1140 Ok(_) | Err(_) => {
1141 warn(
1142 "Unable to evaluate tracked generated artifacts via git ls-files; skipping tracked whitelist check",
1143 ctx,
1144 );
1145 return Ok(());
1146 }
1147 };
1148
1149 let allowed_tracked = [
1150 ".decapod/generated/Dockerfile",
1151 ".decapod/data/knowledge.promotions.jsonl",
1152 ".decapod/generated/specs/.manifest.json",
1153 ".decapod/generated/policy/context_capsule_policy.json",
1154 ".decapod/generated/artifacts/provenance/kcr_trend.jsonl",
1155 ];
1156 let mut offenders = Vec::new();
1157 for line in String::from_utf8_lossy(&output.stdout).lines() {
1158 let path = line.trim();
1159 if path.is_empty() {
1160 continue;
1161 }
1162 let is_allowed_exact = allowed_tracked.iter().any(|allowed| allowed == &path);
1163 let is_allowed_context_json = path.starts_with(".decapod/generated/context/")
1164 && path.ends_with(".json")
1165 && !path.contains("/../");
1166 let is_allowed_provenance_json = path
1167 .starts_with(".decapod/generated/artifacts/provenance/")
1168 && path.ends_with(".json")
1169 && !path.contains("/../");
1170 let is_allowed_specs_md = path.starts_with(".decapod/generated/specs/")
1171 && path.ends_with(".md")
1172 && !path.contains("/../");
1173 if !is_allowed_exact
1174 && !is_allowed_context_json
1175 && !is_allowed_provenance_json
1176 && !is_allowed_specs_md
1177 {
1178 offenders.push(path.to_string());
1179 }
1180 }
1181
1182 if offenders.is_empty() {
1183 pass(
1184 "Tracked generated artifacts are restricted to the whitelist",
1185 ctx,
1186 );
1187 } else {
1188 fail(
1189 &format!(
1190 "Tracked non-whitelisted generated artifacts found: {:?}. Keep generated files ignored unless explicitly allowlisted.",
1191 offenders
1192 ),
1193 ctx,
1194 );
1195 }
1196
1197 Ok(())
1198}
1199
1200fn validate_project_config_toml(
1201 ctx: &ValidationContext,
1202 repo_root: &Path,
1203) -> Result<(), error::DecapodError> {
1204 info("Project Config Gate");
1205 let config_path = repo_root.join(".decapod").join("config.toml");
1206 if !config_path.exists() {
1207 warn(
1208 "Missing .decapod/config.toml; rerun `decapod init` to scaffold repo context configuration.",
1209 ctx,
1210 );
1211 return Ok(());
1212 }
1213 let raw = fs::read_to_string(&config_path).map_err(error::DecapodError::IoError)?;
1214 let value: toml::Value = toml::from_str(&raw).map_err(|e| {
1215 error::DecapodError::ValidationError(format!("Invalid .decapod/config.toml syntax: {}", e))
1216 })?;
1217 let schema_version = value
1218 .get("schema_version")
1219 .and_then(|v| v.as_str())
1220 .unwrap_or("");
1221 if schema_version == "1.0.0" {
1222 pass("Project config schema_version is valid (1.0.0)", ctx);
1223 } else {
1224 fail(
1225 "Project config schema_version must be 1.0.0 in .decapod/config.toml",
1226 ctx,
1227 );
1228 }
1229 if value.get("repo").is_some() && value.get("init").is_some() {
1230 pass(
1231 "Project config contains required [repo] and [init] tables",
1232 ctx,
1233 );
1234 } else {
1235 fail(
1236 "Project config missing required [repo] or [init] table",
1237 ctx,
1238 );
1239 }
1240 let repo_table = value.get("repo").and_then(|v| v.as_table());
1241 let has_intent_anchor = repo_table
1242 .and_then(|t| t.get("product_summary"))
1243 .and_then(|v| v.as_str())
1244 .map(|s| !s.trim().is_empty())
1245 .unwrap_or(false);
1246 if has_intent_anchor {
1247 pass(
1248 "Project config captures repo.product_summary intent anchor",
1249 ctx,
1250 );
1251 } else {
1252 fail(
1253 "Project config missing repo.product_summary (intent anchor).",
1254 ctx,
1255 );
1256 }
1257
1258 let has_architecture_direction = repo_table
1259 .and_then(|t| {
1260 t.get("architecture_direction")
1261 .or_else(|| t.get("architecture_intent"))
1262 })
1263 .and_then(|v| v.as_str())
1264 .map(|s| !s.trim().is_empty())
1265 .unwrap_or(false);
1266 if has_architecture_direction {
1267 pass("Project config captures repo.architecture_direction", ctx);
1268 } else {
1269 fail("Project config missing repo.architecture_direction.", ctx);
1270 }
1271
1272 let has_done_criteria = repo_table
1273 .and_then(|t| t.get("done_criteria"))
1274 .and_then(|v| v.as_str())
1275 .map(|s| !s.trim().is_empty())
1276 .unwrap_or(false);
1277 if has_done_criteria {
1278 pass(
1279 "Project config captures repo.done_criteria proof target",
1280 ctx,
1281 );
1282 } else {
1283 warn(
1284 "Project config missing repo.done_criteria; init should capture explicit done evidence.",
1285 ctx,
1286 );
1287 }
1288 Ok(())
1289}
1290
1291fn validate_project_specs_docs(
1292 ctx: &ValidationContext,
1293 repo_root: &Path,
1294) -> Result<(), error::DecapodError> {
1295 info("Project Specs Architecture Gate");
1296
1297 let specs_dir = repo_root.join(LOCAL_PROJECT_SPECS_DIR);
1298 if !specs_dir.exists() {
1299 warn(
1300 "Project specs directory missing (.decapod/generated/specs/). Run `decapod init --force` to scaffold intent/architecture docs.",
1301 ctx,
1302 );
1303 return Ok(());
1304 }
1305
1306 for spec in LOCAL_PROJECT_SPECS {
1307 let path = repo_root.join(spec.path);
1308 let file = spec.path;
1309 if path.exists() {
1310 pass(&format!("Project specs file present: {}", file), ctx);
1311 } else if matches!(
1312 file,
1313 LOCAL_PROJECT_SPECS_SEMANTICS
1314 | LOCAL_PROJECT_SPECS_OPERATIONS
1315 | LOCAL_PROJECT_SPECS_SECURITY
1316 ) {
1317 warn(
1318 &format!(
1319 "Recommended project spec missing (scaffold-v2+): {}. Run `decapod init --force` to add the expanded spec surface.",
1320 file
1321 ),
1322 ctx,
1323 );
1324 } else {
1325 fail(
1326 &format!("Missing required project specs file: {}", file),
1327 ctx,
1328 );
1329 }
1330 }
1331
1332 let manifest_path = repo_root.join(LOCAL_PROJECT_SPECS_MANIFEST);
1333 let manifest = read_specs_manifest(repo_root)?;
1334 if manifest.is_none() {
1335 warn(
1336 &format!(
1337 "TASK: Project specs manifest missing at {}. Run `decapod init --force` to generate scaffold metadata, then hydrate `.decapod/generated/specs/*.md`.",
1338 manifest_path.display()
1339 ),
1340 ctx,
1341 );
1342 }
1343 if let Some(manifest) = manifest {
1344 if manifest.schema_version == LOCAL_PROJECT_SPECS_MANIFEST_SCHEMA {
1345 pass("Project specs manifest schema is current", ctx);
1346 } else {
1347 warn(
1348 &format!(
1349 "TASK: Project specs manifest schema mismatch (found {}, expected {}). Re-run `decapod init --force` then refresh specs.",
1350 manifest.schema_version, LOCAL_PROJECT_SPECS_MANIFEST_SCHEMA
1351 ),
1352 ctx,
1353 );
1354 }
1355
1356 let mut untouched_templates = Vec::new();
1357 for entry in &manifest.files {
1358 let path = repo_root.join(&entry.path);
1359 if !path.exists() {
1360 continue;
1361 }
1362 let body = fs::read_to_string(&path).map_err(error::DecapodError::IoError)?;
1363 let current_hash = hash_text(&body);
1364 if current_hash == entry.template_hash {
1365 untouched_templates.push(entry.path.clone());
1366 }
1367 }
1368 if untouched_templates.is_empty() {
1369 pass(
1370 "Project specs are not raw scaffold templates (content evolved)",
1371 ctx,
1372 );
1373 } else {
1374 warn(
1375 &format!(
1376 "TASK: Generated specs still match scaffold template for {:?}. Hydrate these docs with repo-specific details before implementation promotion.",
1377 untouched_templates
1378 ),
1379 ctx,
1380 );
1381 }
1382
1383 let current_repo_fp = repo_signal_fingerprint(repo_root)?;
1384 if current_repo_fp == manifest.repo_signal_fingerprint {
1385 pass(
1386 "Project specs manifest repo-signal fingerprint is current",
1387 ctx,
1388 );
1389 } else {
1390 warn(
1391 "TASK: Significant repo surfaces changed since specs scaffold/hydration. Review and update INTENT/ARCHITECTURE/INTERFACES/VALIDATION accordingly.",
1392 ctx,
1393 );
1394 }
1395 }
1396
1397 let architecture_path = repo_root.join(LOCAL_PROJECT_SPECS_ARCHITECTURE);
1398 if architecture_path.exists() {
1399 let architecture =
1400 fs::read_to_string(&architecture_path).map_err(error::DecapodError::IoError)?;
1401 let required_new = [
1402 "# Architecture",
1403 "## Direction",
1404 "## Current Facts",
1405 "## Topology",
1406 "## Execution Path",
1407 "## Concurrency and Runtime Model",
1408 "## Deployment Topology",
1409 "## Data and Contracts",
1410 "## Delivery Plan",
1411 "## Risks and Mitigations",
1412 ];
1413 let required_legacy = [
1414 "# Architecture",
1415 "## Integrated Surface",
1416 "## Implementation Strategy",
1417 "## System Topology",
1418 "## Service Contracts",
1419 "## Delivery Plan",
1420 "## Risks and Mitigations",
1421 ];
1422 let has_new = required_new.iter().all(|s| architecture.contains(s));
1423 let has_legacy = required_legacy.iter().all(|s| architecture.contains(s));
1424 if has_new || has_legacy {
1425 pass(
1426 "Architecture spec contains required engineering sections",
1427 ctx,
1428 );
1429 } else {
1430 fail(
1431 "Architecture spec missing required section groups (expected new or legacy scaffold structure).",
1432 ctx,
1433 );
1434 }
1435
1436 if architecture.contains("```mermaid") || architecture.contains("```text") {
1437 pass(
1438 "Architecture spec contains required topology diagram block",
1439 ctx,
1440 );
1441 } else {
1442 fail(
1443 "Architecture spec missing topology diagram block (`mermaid` or `text` fenced block)",
1444 ctx,
1445 );
1446 }
1447 if architecture.contains(
1448 "Describe the architecture in 5-8 dense sentences focused on deployment reality, system boundaries, and operational risks.",
1449 ) {
1450 fail(
1451 "Architecture spec still has placeholder executive summary; derive architecture from explicit intent.",
1452 ctx,
1453 );
1454 } else {
1455 pass("Architecture spec has non-placeholder executive summary", ctx);
1456 }
1457
1458 let dense_line_count = architecture
1459 .lines()
1460 .filter(|line| !line.trim().is_empty())
1461 .count();
1462 if dense_line_count >= 35 {
1463 pass("Architecture spec meets minimum density threshold", ctx);
1464 } else {
1465 fail(
1466 "Architecture spec is too sparse (<35 non-empty lines); expand it to an engineer-ready overview",
1467 ctx,
1468 );
1469 }
1470 }
1471
1472 let intent_path = repo_root.join(LOCAL_PROJECT_SPECS_INTENT);
1473 if intent_path.exists() {
1474 let intent = fs::read_to_string(intent_path).map_err(error::DecapodError::IoError)?;
1475 let required_intent_sections = [
1476 "# Intent",
1477 "## Product Outcome",
1478 "## Scope",
1479 "## Constraints",
1480 "## Acceptance Criteria",
1481 ];
1482 let mut missing = Vec::new();
1483 for section in required_intent_sections {
1484 if !intent.contains(section) {
1485 missing.push(section);
1486 }
1487 }
1488 if missing.is_empty() {
1489 pass("Intent spec contains required planning sections", ctx);
1490 } else {
1491 fail(
1492 &format!("Intent spec missing required sections: {:?}", missing),
1493 ctx,
1494 );
1495 }
1496 if intent.contains("Define the user-visible outcome in one paragraph.") {
1497 fail(
1498 "Intent spec still has placeholder product outcome; capture explicit intent before implementation.",
1499 ctx,
1500 );
1501 } else if intent.contains("against explicit user intent with proof-backed completion.") {
1502 warn(
1503 "TASK: Intent outcome still reads as generic scaffold text; replace it with explicit user/problem outcome.",
1504 ctx,
1505 );
1506 } else {
1507 pass("Intent spec has non-placeholder product outcome", ctx);
1508 }
1509 }
1510
1511 let interfaces_path = repo_root.join(LOCAL_PROJECT_SPECS_INTERFACES);
1512 if interfaces_path.exists() {
1513 let interfaces =
1514 fs::read_to_string(&interfaces_path).map_err(error::DecapodError::IoError)?;
1515 for section in [
1516 "# Interfaces",
1517 "## Inbound Contracts",
1518 "## Outbound Dependencies",
1519 "## Data Ownership",
1520 "## Failure Semantics",
1521 ] {
1522 if !interfaces.contains(section) {
1523 fail(
1524 &format!("Interfaces spec missing required section: {}", section),
1525 ctx,
1526 );
1527 }
1528 }
1529 pass("Interfaces spec contains required contract sections", ctx);
1530 }
1531
1532 let validation_path = repo_root.join(LOCAL_PROJECT_SPECS_VALIDATION);
1533 if validation_path.exists() {
1534 let validation =
1535 fs::read_to_string(&validation_path).map_err(error::DecapodError::IoError)?;
1536 for section in [
1537 "# Validation",
1538 "## Proof Surfaces",
1539 "## Promotion Gates",
1540 "## Evidence Artifacts",
1541 "## Regression Guardrails",
1542 ] {
1543 if !validation.contains(section) {
1544 fail(
1545 &format!("Validation spec missing required section: {}", section),
1546 ctx,
1547 );
1548 }
1549 }
1550 pass("Validation spec contains required proof/gate sections", ctx);
1551 if validation.contains("Add repository-specific test command(s) here.") {
1552 warn(
1553 "TASK: Validation spec still has placeholder test command guidance; add concrete test/integration commands.",
1554 ctx,
1555 );
1556 }
1557 }
1558
1559 let semantics_path = repo_root.join(LOCAL_PROJECT_SPECS_SEMANTICS);
1560 if semantics_path.exists() {
1561 let semantics =
1562 fs::read_to_string(&semantics_path).map_err(error::DecapodError::IoError)?;
1563 for section in ["# Semantics", "## State Machines", "## Invariants"] {
1564 if !semantics.contains(section) {
1565 fail(
1566 &format!("Semantics spec missing required section: {}", section),
1567 ctx,
1568 );
1569 }
1570 }
1571 pass("Semantics spec contains required sections", ctx);
1572 }
1573
1574 let operations_path = repo_root.join(LOCAL_PROJECT_SPECS_OPERATIONS);
1575 if operations_path.exists() {
1576 let operations =
1577 fs::read_to_string(&operations_path).map_err(error::DecapodError::IoError)?;
1578 for section in [
1579 "# Operations",
1580 "## Service Level Objectives",
1581 "## Monitoring",
1582 "## Incident Response",
1583 ] {
1584 if !operations.contains(section) {
1585 fail(
1586 &format!("Operations spec missing required section: {}", section),
1587 ctx,
1588 );
1589 }
1590 }
1591 pass("Operations spec contains required sections", ctx);
1592 }
1593
1594 let security_path = repo_root.join(LOCAL_PROJECT_SPECS_SECURITY);
1595 if security_path.exists() {
1596 let security = fs::read_to_string(&security_path).map_err(error::DecapodError::IoError)?;
1597 for section in [
1598 "# Security",
1599 "## Threat Model",
1600 "## Authentication",
1601 "## Authorization",
1602 "## Data Classification",
1603 ] {
1604 if !security.contains(section) {
1605 fail(
1606 &format!("Security spec missing required section: {}", section),
1607 ctx,
1608 );
1609 }
1610 }
1611 pass("Security spec contains required sections", ctx);
1612 }
1613
1614 Ok(())
1615}
1616
1617fn validate_machine_contract(
1618 ctx: &ValidationContext,
1619 repo_root: &Path,
1620) -> Result<(), error::DecapodError> {
1621 info("Machine Contract Drift Detection Gate");
1622
1623 let binary_path =
1624 std::env::current_exe().map_err(|e| error::DecapodError::ValidationError(e.to_string()))?;
1625 let capabilities_output = std::process::Command::new(&binary_path)
1626 .current_dir(repo_root)
1627 .args(["capabilities", "--format", "json"])
1628 .output()
1629 .map_err(|e| {
1630 error::DecapodError::ValidationError(format!("Failed to run capabilities: {}", e))
1631 })?;
1632
1633 if !capabilities_output.status.success() {
1634 pass(
1635 "Could not verify machine contract (capabilities failed)",
1636 ctx,
1637 );
1638 return Ok(());
1639 }
1640
1641 let capabilities_json: serde_json::Value =
1642 serde_json::from_str(&String::from_utf8_lossy(&capabilities_output.stdout)).map_err(
1643 |e| error::DecapodError::ValidationError(format!("Invalid capabilities JSON: {}", e)),
1644 )?;
1645
1646 let interlock_codes = capabilities_json["interlock_codes"]
1647 .as_array()
1648 .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect::<Vec<_>>())
1649 .unwrap_or_default();
1650
1651 let required_interlock = [
1652 "workspace_required",
1653 "verification_required",
1654 "store_boundary_violation",
1655 ];
1656
1657 let mut missing_interlock = Vec::new();
1658 for code in required_interlock {
1659 if !interlock_codes.contains(&code) {
1660 missing_interlock.push(code);
1661 }
1662 }
1663
1664 if missing_interlock.is_empty() {
1665 pass("Machine contract interlock codes match binary", ctx);
1666 } else {
1667 fail(
1668 &format!(
1669 "Binary capabilities missing interlock codes: {:?}. Binary and specs are out of sync.",
1670 missing_interlock
1671 ),
1672 ctx,
1673 );
1674 }
1675
1676 let capabilities_list = capabilities_json["capabilities"]
1677 .as_array()
1678 .map(|arr| {
1679 arr.iter()
1680 .filter_map(|v| v["name"].as_str())
1681 .collect::<Vec<_>>()
1682 })
1683 .unwrap_or_default();
1684
1685 let required_caps = [
1686 "daemonless",
1687 "deterministic",
1688 "context.resolve",
1689 "validate.run",
1690 "workspace.ensure",
1691 "preflight.check",
1692 "impact.predict",
1693 ];
1694 let mut missing_caps = Vec::new();
1695 for cap in required_caps {
1696 if !capabilities_list.contains(&cap) {
1697 missing_caps.push(cap);
1698 }
1699 }
1700
1701 if missing_caps.is_empty() {
1702 pass("Machine contract capabilities match binary", ctx);
1703 } else {
1704 warn(
1705 &format!(
1706 "Binary capabilities missing expected capabilities: {:?}",
1707 missing_caps
1708 ),
1709 ctx,
1710 );
1711 }
1712
1713 Ok(())
1714}
1715
1716fn validate_spec_drift(
1717 ctx: &ValidationContext,
1718 repo_root: &Path,
1719) -> Result<(), error::DecapodError> {
1720 info("Spec Drift Detection Gate (Hygiene)");
1721
1722 let interfaces_path = repo_root.join(LOCAL_PROJECT_SPECS_INTERFACES);
1723 if !interfaces_path.exists() {
1724 pass("No INTERFACES.md to check for hygiene", ctx);
1725 return Ok(());
1726 }
1727
1728 let interfaces = fs::read_to_string(&interfaces_path).map_err(error::DecapodError::IoError)?;
1729
1730 warn(
1731 "Spec markdown drift checks are hygiene-only. Use validate_machine_contract for authoritative governance.",
1732 ctx,
1733 );
1734
1735 let key_sections = ["# Interfaces", "## Inbound Contracts", "## Data Ownership"];
1736
1737 let mut missing_sections = Vec::new();
1738 for section in key_sections {
1739 if !interfaces.contains(section) {
1740 missing_sections.push(section);
1741 }
1742 }
1743
1744 if missing_sections.is_empty() {
1745 pass("INTERFACES.md has structural sections", ctx);
1746 } else {
1747 warn(
1748 &format!("INTERFACES.md missing sections: {:?}", missing_sections),
1749 ctx,
1750 );
1751 }
1752
1753 for (path, name, sections) in [
1754 (
1755 LOCAL_PROJECT_SPECS_SEMANTICS,
1756 "SEMANTICS.md",
1757 vec!["# Semantics", "## State Machines", "## Invariants"],
1758 ),
1759 (
1760 LOCAL_PROJECT_SPECS_OPERATIONS,
1761 "OPERATIONS.md",
1762 vec![
1763 "# Operations",
1764 "## Service Level Objectives",
1765 "## Monitoring",
1766 "## Incident Response",
1767 ],
1768 ),
1769 (
1770 LOCAL_PROJECT_SPECS_SECURITY,
1771 "SECURITY.md",
1772 vec![
1773 "# Security",
1774 "## Threat Model",
1775 "## Authentication",
1776 "## Authorization",
1777 "## Data Classification",
1778 ],
1779 ),
1780 ] {
1781 let path = repo_root.join(path);
1782 if !path.exists() {
1783 warn(
1784 &format!(
1785 "{} missing (hygiene check only). Run `decapod init --force` to scaffold it.",
1786 name
1787 ),
1788 ctx,
1789 );
1790 continue;
1791 }
1792 let body = fs::read_to_string(&path).map_err(error::DecapodError::IoError)?;
1793 let missing = sections
1794 .iter()
1795 .filter(|section| !body.contains(**section))
1796 .copied()
1797 .collect::<Vec<_>>();
1798 if missing.is_empty() {
1799 pass(&format!("{} has structural sections", name), ctx);
1800 } else {
1801 warn(&format!("{} missing sections: {:?}", name, missing), ctx);
1802 }
1803 }
1804
1805 Ok(())
1806}
1807
1808fn validate_workunit_manifests_if_present(
1809 ctx: &ValidationContext,
1810 repo_root: &Path,
1811) -> Result<(), error::DecapodError> {
1812 info("Work Unit Manifest Gate");
1813
1814 let workunits_dir = repo_root
1815 .join(".decapod")
1816 .join("governance")
1817 .join("workunits");
1818 if !workunits_dir.exists() {
1819 skip("No workunit manifests found; skipping workunit gate", ctx);
1820 return Ok(());
1821 }
1822
1823 let mut files = 0usize;
1824 for entry in fs::read_dir(&workunits_dir).map_err(error::DecapodError::IoError)? {
1825 let entry = entry.map_err(error::DecapodError::IoError)?;
1826 let path = entry.path();
1827 if path.extension().and_then(|s| s.to_str()) != Some("json") {
1828 continue;
1829 }
1830 files += 1;
1831 let raw = fs::read_to_string(&path).map_err(error::DecapodError::IoError)?;
1832 let parsed: WorkUnitManifest = serde_json::from_str(&raw).map_err(|e| {
1833 error::DecapodError::ValidationError(format!(
1834 "invalid workunit manifest {}: {}",
1835 path.display(),
1836 e
1837 ))
1838 })?;
1839 let _ = parsed.canonical_json_bytes().map_err(|e| {
1840 error::DecapodError::ValidationError(format!(
1841 "workunit canonicalization failed for {}: {}",
1842 path.display(),
1843 e
1844 ))
1845 })?;
1846 if parsed.status == WorkUnitStatus::Verified {
1847 workunit::validate_verified_manifest(&parsed).map_err(|e| {
1848 error::DecapodError::ValidationError(format!(
1849 "invalid VERIFIED workunit manifest: {} ({})",
1850 e,
1851 path.display()
1852 ))
1853 })?;
1854 workunit::verify_capsule_policy_lineage_for_task(repo_root, &parsed).map_err(|e| {
1855 error::DecapodError::ValidationError(format!(
1856 "invalid VERIFIED workunit manifest: {} ({})",
1857 e,
1858 path.display()
1859 ))
1860 })?;
1861 }
1862 }
1863
1864 pass(
1865 &format!(
1866 "Workunit manifest schema check passed for {} file(s)",
1867 files
1868 ),
1869 ctx,
1870 );
1871 Ok(())
1872}
1873
1874fn validate_context_capsules_if_present(
1875 ctx: &ValidationContext,
1876 repo_root: &Path,
1877) -> Result<(), error::DecapodError> {
1878 info("Context Capsule Gate");
1879
1880 let capsules_dir = repo_root.join(".decapod").join("generated").join("context");
1881 if !capsules_dir.exists() {
1882 skip(
1883 "No context capsules found; skipping context capsule gate",
1884 ctx,
1885 );
1886 return Ok(());
1887 }
1888
1889 let mut files = 0usize;
1890 for entry in fs::read_dir(&capsules_dir).map_err(error::DecapodError::IoError)? {
1891 let entry = entry.map_err(error::DecapodError::IoError)?;
1892 let path = entry.path();
1893 if path.extension().and_then(|s| s.to_str()) != Some("json") {
1894 continue;
1895 }
1896 files += 1;
1897 let raw = fs::read_to_string(&path).map_err(error::DecapodError::IoError)?;
1898 let parsed: DeterministicContextCapsule = serde_json::from_str(&raw).map_err(|e| {
1899 error::DecapodError::ValidationError(format!(
1900 "invalid context capsule {}: {}",
1901 path.display(),
1902 e
1903 ))
1904 })?;
1905 let expected = parsed.computed_hash_hex().map_err(|e| {
1906 error::DecapodError::ValidationError(format!(
1907 "context capsule hash computation failed for {}: {}",
1908 path.display(),
1909 e
1910 ))
1911 })?;
1912 if parsed.capsule_hash != expected {
1913 fail(
1914 &format!(
1915 "Context capsule hash mismatch in {} (expected {}, got {})",
1916 path.display(),
1917 expected,
1918 parsed.capsule_hash
1919 ),
1920 ctx,
1921 );
1922 }
1923 }
1924
1925 pass(
1926 &format!("Context capsule integrity checked for {} file(s)", files),
1927 ctx,
1928 );
1929 Ok(())
1930}
1931
1932fn validate_context_capsule_policy_contract(
1933 ctx: &ValidationContext,
1934 repo_root: &Path,
1935) -> Result<(), error::DecapodError> {
1936 info("Context Capsule Policy Gate");
1937 let (policy, path) = match capsule_policy::load_policy_contract(repo_root) {
1938 Ok(v) => v,
1939 Err(error::DecapodError::ValidationError(msg))
1940 if msg.starts_with("CAPSULE_POLICY_MISSING:") =>
1941 {
1942 warn(
1943 "Context capsule policy contract missing; run `decapod init --force` to scaffold .decapod/generated/policy/context_capsule_policy.json",
1944 ctx,
1945 );
1946 return Ok(());
1947 }
1948 Err(e) => return Err(e),
1949 };
1950 if policy.schema_version != POLICY_SCHEMA_VERSION {
1951 fail(
1952 &format!(
1953 "Context capsule policy schema mismatch at {} (actual={}, expected={})",
1954 path.display(),
1955 policy.schema_version,
1956 POLICY_SCHEMA_VERSION
1957 ),
1958 ctx,
1959 );
1960 }
1961 if !policy.tiers.contains_key(&policy.default_risk_tier) {
1962 fail(
1963 &format!(
1964 "Context capsule policy default_risk_tier '{}' is not declared in tiers",
1965 policy.default_risk_tier
1966 ),
1967 ctx,
1968 );
1969 }
1970 for (tier, rule) in &policy.tiers {
1971 if rule.allowed_scopes.is_empty() {
1972 fail(
1973 &format!(
1974 "Context capsule policy tier '{}' has no allowed_scopes (fail closed)",
1975 tier
1976 ),
1977 ctx,
1978 );
1979 }
1980 if rule.max_limit == 0 {
1981 fail(
1982 &format!(
1983 "Context capsule policy tier '{}' has max_limit=0 (invalid)",
1984 tier
1985 ),
1986 ctx,
1987 );
1988 }
1989 }
1990 pass(
1991 &format!(
1992 "Context capsule policy contract parsed and validated ({})",
1993 path.display()
1994 ),
1995 ctx,
1996 );
1997 Ok(())
1998}
1999
2000fn validate_knowledge_promotions_if_present(
2001 ctx: &ValidationContext,
2002 repo_root: &Path,
2003) -> Result<(), error::DecapodError> {
2004 info("Knowledge Promotion Ledger Gate");
2005
2006 let ledger = repo_root
2007 .join(".decapod")
2008 .join("data")
2009 .join("knowledge.promotions.jsonl");
2010 if !ledger.exists() {
2011 skip(
2012 "No knowledge promotion ledger found; skipping promotion ledger gate",
2013 ctx,
2014 );
2015 return Ok(());
2016 }
2017
2018 let raw = fs::read_to_string(&ledger).map_err(error::DecapodError::IoError)?;
2019 for (idx, line) in raw.lines().enumerate() {
2020 if line.trim().is_empty() {
2021 continue;
2022 }
2023 let v: serde_json::Value = serde_json::from_str(line).map_err(|e| {
2024 error::DecapodError::ValidationError(format!(
2025 "invalid promotion ledger line {} in {}: {}",
2026 idx + 1,
2027 ledger.display(),
2028 e
2029 ))
2030 })?;
2031 for key in [
2032 "event_id",
2033 "ts",
2034 "source_entry_id",
2035 "target_class",
2036 "evidence_refs",
2037 "approved_by",
2038 "actor",
2039 "reason",
2040 ] {
2041 if v.get(key).is_none() {
2042 fail(
2043 &format!(
2044 "Knowledge promotion ledger missing '{}' on line {} ({})",
2045 key,
2046 idx + 1,
2047 ledger.display()
2048 ),
2049 ctx,
2050 );
2051 }
2052 }
2053
2054 if v.get("target_class").and_then(|x| x.as_str()) != Some("procedural") {
2055 fail(
2056 &format!(
2057 "Knowledge promotion ledger requires target_class='procedural' on line {} ({})",
2058 idx + 1,
2059 ledger.display()
2060 ),
2061 ctx,
2062 );
2063 }
2064
2065 let evidence_ok = v
2066 .get("evidence_refs")
2067 .and_then(|x| x.as_array())
2068 .map(|arr| {
2069 !arr.is_empty()
2070 && arr
2071 .iter()
2072 .all(|item| item.as_str().map(|s| !s.trim().is_empty()).unwrap_or(false))
2073 })
2074 .unwrap_or(false);
2075 if !evidence_ok {
2076 fail(
2077 &format!(
2078 "Knowledge promotion ledger evidence_refs must be a non-empty string array on line {} ({})",
2079 idx + 1,
2080 ledger.display()
2081 ),
2082 ctx,
2083 );
2084 }
2085
2086 for key in ["approved_by", "actor", "reason"] {
2087 let non_empty = v
2088 .get(key)
2089 .and_then(|x| x.as_str())
2090 .map(|s| !s.trim().is_empty())
2091 .unwrap_or(false);
2092 if !non_empty {
2093 fail(
2094 &format!(
2095 "Knowledge promotion ledger '{}' must be a non-empty string on line {} ({})",
2096 key,
2097 idx + 1,
2098 ledger.display()
2099 ),
2100 ctx,
2101 );
2102 }
2103 }
2104 }
2105
2106 pass("Knowledge promotion ledger schema check passed", ctx);
2107 Ok(())
2108}
2109
2110fn validate_skill_cards_if_present(
2111 ctx: &ValidationContext,
2112 repo_root: &Path,
2113) -> Result<(), error::DecapodError> {
2114 info("Skill Card Artifact Gate");
2115
2116 let dir = repo_root.join(".decapod").join("skills");
2117 if !dir.exists() {
2118 skip("No skill cards found; skipping skill card gate", ctx);
2119 return Ok(());
2120 }
2121
2122 let mut files = 0usize;
2123 for entry in fs::read_dir(&dir).map_err(error::DecapodError::IoError)? {
2124 let entry = entry.map_err(error::DecapodError::IoError)?;
2125 let path = entry.path();
2126 if path.extension().and_then(|s| s.to_str()) != Some("json") {
2127 continue;
2128 }
2129 files += 1;
2130 let raw = fs::read_to_string(&path).map_err(error::DecapodError::IoError)?;
2131 let parsed: SkillCard = serde_json::from_str(&raw).map_err(|e| {
2132 error::DecapodError::ValidationError(format!(
2133 "invalid skill card {}: {}",
2134 path.display(),
2135 e
2136 ))
2137 })?;
2138 if parsed.kind != "skill_card" || parsed.schema_version != "1.0.0" {
2139 fail(
2140 &format!(
2141 "skill card {} has invalid kind/schema_version",
2142 path.display()
2143 ),
2144 ctx,
2145 );
2146 continue;
2147 }
2148 let mut normalized = parsed.clone();
2149 let expected = parsed.card_hash.clone();
2150 normalized.card_hash.clear();
2151 normalized.generated_at.clear();
2152 let canonical = serde_json::to_vec(&normalized).map_err(|e| {
2153 error::DecapodError::ValidationError(format!(
2154 "skill card canonicalization failed for {}: {}",
2155 path.display(),
2156 e
2157 ))
2158 })?;
2159 let actual = {
2160 use sha2::{Digest, Sha256};
2161 let mut hasher = Sha256::new();
2162 hasher.update(&canonical);
2163 format!("{:x}", hasher.finalize())
2164 };
2165 if actual != expected {
2166 fail(
2167 &format!(
2168 "skill card hash mismatch in {} (expected {}, got {})",
2169 path.display(),
2170 expected,
2171 actual
2172 ),
2173 ctx,
2174 );
2175 }
2176 }
2177
2178 pass(
2179 &format!("Skill card integrity checked for {} file(s)", files),
2180 ctx,
2181 );
2182 Ok(())
2183}
2184
2185fn validate_skill_resolutions_if_present(
2186 ctx: &ValidationContext,
2187 repo_root: &Path,
2188) -> Result<(), error::DecapodError> {
2189 info("Skill Resolution Artifact Gate");
2190
2191 let dir = repo_root.join(".decapod").join("generated").join("skills");
2192 if !dir.exists() {
2193 skip(
2194 "No skill resolution artifacts found; skipping skill resolution gate",
2195 ctx,
2196 );
2197 return Ok(());
2198 }
2199
2200 let mut files = 0usize;
2201 for entry in fs::read_dir(&dir).map_err(error::DecapodError::IoError)? {
2202 let entry = entry.map_err(error::DecapodError::IoError)?;
2203 let path = entry.path();
2204 if path.extension().and_then(|s| s.to_str()) != Some("json") {
2205 continue;
2206 }
2207 files += 1;
2208 let raw = fs::read_to_string(&path).map_err(error::DecapodError::IoError)?;
2209 let parsed: SkillResolution = serde_json::from_str(&raw).map_err(|e| {
2210 error::DecapodError::ValidationError(format!(
2211 "invalid skill resolution {}: {}",
2212 path.display(),
2213 e
2214 ))
2215 })?;
2216 if parsed.kind != "skill_resolution" || parsed.schema_version != "1.0.0" {
2217 fail(
2218 &format!(
2219 "skill resolution {} has invalid kind/schema_version",
2220 path.display()
2221 ),
2222 ctx,
2223 );
2224 continue;
2225 }
2226 let mut normalized = parsed.clone();
2227 let expected = parsed.resolution_hash.clone();
2228 normalized.resolution_hash.clear();
2229 normalized.generated_at.clear();
2230 let canonical = serde_json::to_vec(&normalized).map_err(|e| {
2231 error::DecapodError::ValidationError(format!(
2232 "skill resolution canonicalization failed for {}: {}",
2233 path.display(),
2234 e
2235 ))
2236 })?;
2237 let actual = {
2238 use sha2::{Digest, Sha256};
2239 let mut hasher = Sha256::new();
2240 hasher.update(&canonical);
2241 format!("{:x}", hasher.finalize())
2242 };
2243 if actual != expected {
2244 fail(
2245 &format!(
2246 "skill resolution hash mismatch in {} (expected {}, got {})",
2247 path.display(),
2248 expected,
2249 actual
2250 ),
2251 ctx,
2252 );
2253 }
2254 }
2255
2256 pass(
2257 &format!("Skill resolution integrity checked for {} file(s)", files),
2258 ctx,
2259 );
2260 Ok(())
2261}
2262
2263fn validate_internalization_artifacts_if_present(
2264 ctx: &ValidationContext,
2265 repo_root: &Path,
2266) -> Result<(), error::DecapodError> {
2267 info("Internalization Artifact Gate");
2268
2269 let artifacts_dir = repo_root
2270 .join(".decapod")
2271 .join("generated")
2272 .join("artifacts")
2273 .join("internalizations");
2274 if !artifacts_dir.exists() {
2275 skip(
2276 "No internalization artifacts found; skipping internalization gate",
2277 ctx,
2278 );
2279 return Ok(());
2280 }
2281
2282 let mut files = 0usize;
2283 for entry in fs::read_dir(&artifacts_dir).map_err(error::DecapodError::IoError)? {
2284 let entry = entry.map_err(error::DecapodError::IoError)?;
2285 let path = entry.path();
2286 if !path.is_dir() {
2287 continue;
2288 }
2289 let manifest_path = path.join("manifest.json");
2290 if !manifest_path.exists() {
2291 fail(
2292 &format!(
2293 "Internalization artifact is missing manifest.json ({})",
2294 path.display()
2295 ),
2296 ctx,
2297 );
2298 continue;
2299 }
2300
2301 files += 1;
2302 let raw = fs::read_to_string(&manifest_path).map_err(error::DecapodError::IoError)?;
2303 let manifest: InternalizationManifest = serde_json::from_str(&raw).map_err(|e| {
2304 error::DecapodError::ValidationError(format!(
2305 "invalid internalization manifest {}: {}",
2306 manifest_path.display(),
2307 e
2308 ))
2309 })?;
2310
2311 if manifest.schema_version != internalize::SCHEMA_VERSION {
2312 fail(
2313 &format!(
2314 "Internalization manifest schema mismatch in {} (actual={}, expected={})",
2315 manifest_path.display(),
2316 manifest.schema_version,
2317 internalize::SCHEMA_VERSION
2318 ),
2319 ctx,
2320 );
2321 }
2322 if manifest.base_model_id.trim().is_empty() {
2323 fail(
2324 &format!(
2325 "Internalization manifest missing base_model_id ({})",
2326 manifest_path.display()
2327 ),
2328 ctx,
2329 );
2330 }
2331 if manifest.capabilities_contract.permitted_tools.is_empty() {
2332 fail(
2333 &format!(
2334 "Internalization manifest must declare permitted_tools ({})",
2335 manifest_path.display()
2336 ),
2337 ctx,
2338 );
2339 }
2340 if manifest.replay_recipe.mode == ReplayClass::Replayable
2341 && manifest.determinism_class != DeterminismClass::Deterministic
2342 {
2343 fail(
2344 &format!(
2345 "Internalization manifest claims replayable despite non-deterministic profile ({})",
2346 manifest_path.display()
2347 ),
2348 ctx,
2349 );
2350 }
2351 if manifest.determinism_class == DeterminismClass::BestEffort
2352 && (manifest.binary_hash.trim().is_empty()
2353 || manifest.runtime_fingerprint.trim().is_empty())
2354 {
2355 fail(
2356 &format!(
2357 "Best-effort internalization manifest must include binary_hash and runtime_fingerprint ({})",
2358 manifest_path.display()
2359 ),
2360 ctx,
2361 );
2362 }
2363
2364 let inspect =
2365 internalize::inspect_internalization(&repo_root.join(".decapod"), &manifest.id)
2366 .map_err(|e| {
2367 error::DecapodError::ValidationError(format!(
2368 "internalization inspect failed for {}: {}",
2369 manifest_path.display(),
2370 e
2371 ))
2372 })?;
2373 if !inspect.integrity.adapter_hash_valid {
2374 fail(
2375 &format!(
2376 "Internalization adapter hash mismatch ({})",
2377 manifest_path.display()
2378 ),
2379 ctx,
2380 );
2381 }
2382 if inspect.integrity.source_verification == "mismatch" {
2383 fail(
2384 &format!(
2385 "Internalization source hash mismatch ({})",
2386 manifest_path.display()
2387 ),
2388 ctx,
2389 );
2390 }
2391 if !inspect.integrity.replayable_claim_valid {
2392 fail(
2393 &format!(
2394 "Internalization replay metadata is inconsistent ({})",
2395 manifest_path.display()
2396 ),
2397 ctx,
2398 );
2399 }
2400 }
2401
2402 let sessions_dir = repo_root
2403 .join(".decapod")
2404 .join("generated")
2405 .join("sessions");
2406 if sessions_dir.exists() {
2407 for session_entry in fs::read_dir(&sessions_dir).map_err(error::DecapodError::IoError)? {
2408 let session_entry = session_entry.map_err(error::DecapodError::IoError)?;
2409 let mounts_dir = session_entry.path().join("internalize_mounts");
2410 if !mounts_dir.exists() {
2411 continue;
2412 }
2413 for mount_entry in fs::read_dir(&mounts_dir).map_err(error::DecapodError::IoError)? {
2414 let mount_entry = mount_entry.map_err(error::DecapodError::IoError)?;
2415 let mount_path = mount_entry.path();
2416 if mount_path.extension().and_then(|s| s.to_str()) != Some("json") {
2417 continue;
2418 }
2419 let raw = fs::read_to_string(&mount_path).map_err(error::DecapodError::IoError)?;
2420 let mount: serde_json::Value = serde_json::from_str(&raw).map_err(|e| {
2421 error::DecapodError::ValidationError(format!(
2422 "invalid internalization mount lease {}: {}",
2423 mount_path.display(),
2424 e
2425 ))
2426 })?;
2427 let lease_expires_at = mount
2428 .get("lease_expires_at")
2429 .and_then(|v| v.as_str())
2430 .unwrap_or("");
2431 if lease_expires_at.is_empty() {
2432 fail(
2433 &format!(
2434 "Internalization mount missing lease_expires_at ({})",
2435 mount_path.display()
2436 ),
2437 ctx,
2438 );
2439 continue;
2440 }
2441 if lease_expires_at < internalize::now_iso8601().as_str() {
2442 fail(
2443 &format!(
2444 "Internalization mount lease expired but still present ({})",
2445 mount_path.display()
2446 ),
2447 ctx,
2448 );
2449 }
2450 }
2451 }
2452 }
2453
2454 pass(
2455 &format!(
2456 "Internalization artifact contract checked for {} artifact(s)",
2457 files
2458 ),
2459 ctx,
2460 );
2461 Ok(())
2462}
2463
2464fn validate_schema_determinism(
2465 ctx: &ValidationContext,
2466 _decapod_dir: &Path,
2467) -> Result<(), error::DecapodError> {
2468 info("Schema Determinism Gate");
2469 let run_schema = || -> Result<String, error::DecapodError> {
2470 let snapshot = crate::deterministic_schema_envelope();
2471 serde_json::to_string(&snapshot).map_err(|e| {
2472 error::DecapodError::ValidationError(format!(
2473 "schema determinism serialization failed: {}",
2474 e
2475 ))
2476 })
2477 };
2478
2479 let s1 = run_schema()?;
2481 let s2 = run_schema()?;
2482
2483 if s1 == s2 && !s1.is_empty() {
2484 pass("Schema output is deterministic", ctx);
2485 } else {
2486 fail("Schema output is non-deterministic or empty", ctx);
2487 }
2488 Ok(())
2489}
2490
2491fn validate_database_schema_versions(
2492 store: &Store,
2493 ctx: &ValidationContext,
2494) -> Result<(), error::DecapodError> {
2495 info("Database Schema Version Gate");
2496 if !matches!(store.kind, StoreKind::Repo) {
2497 skip(
2498 "Database schema version gate applies to repo store only",
2499 ctx,
2500 );
2501 return Ok(());
2502 }
2503 let checks = migration::check_versioned_db_schema_expectations(&store.root)?;
2504 for check in checks {
2505 if !check.exists {
2506 fail(
2507 &format!(
2508 "Versioned database {} is missing (expected schema_version={})",
2509 check.db_name, check.expected_version
2510 ),
2511 ctx,
2512 );
2513 continue;
2514 }
2515 match check.actual_version {
2516 Some(actual) if actual == check.expected_version => {
2517 pass(
2518 &format!(
2519 "{} schema_version matches expected {}",
2520 check.db_name, check.expected_version
2521 ),
2522 ctx,
2523 );
2524 }
2525 Some(actual) => {
2526 fail(
2527 &format!(
2528 "{} schema_version mismatch: actual={}, expected={}",
2529 check.db_name, actual, check.expected_version
2530 ),
2531 ctx,
2532 );
2533 }
2534 None => {
2535 fail(
2536 &format!(
2537 "{} missing readable schema_version in meta table (expected {})",
2538 check.db_name, check.expected_version
2539 ),
2540 ctx,
2541 );
2542 }
2543 }
2544 }
2545 Ok(())
2546}
2547
2548fn validate_eval_gate_if_required(
2549 store: &Store,
2550 ctx: &ValidationContext,
2551) -> Result<(), error::DecapodError> {
2552 info("Eval Gate Requirement");
2553 let failures = crate::plugins::eval::validate_eval_gate_if_required(&store.root)?;
2554 if failures.is_empty() {
2555 pass("Eval gate requirement satisfied or not configured", ctx);
2556 } else {
2557 for failure in failures {
2558 fail(&failure, ctx);
2559 }
2560 }
2561 Ok(())
2562}
2563
2564fn validate_health_cache_integrity(
2565 store: &Store,
2566 ctx: &ValidationContext,
2567) -> Result<(), error::DecapodError> {
2568 info("Health Cache Non-Authoritative Gate");
2569 let db_path = store.root.join("health.db");
2570 if !db_path.exists() {
2571 skip("health.db not found; skipping health integrity check", ctx);
2572 return Ok(());
2573 }
2574
2575 let conn = db::db_connect_for_validate(&db_path.to_string_lossy())?;
2576
2577 let orphaned: i64 = conn.query_row(
2579 "SELECT COUNT(*) FROM health_cache hc LEFT JOIN proof_events pe ON hc.claim_id = pe.claim_id WHERE pe.event_id IS NULL",
2580 [],
2581 |row| row.get(0),
2582 ).map_err(error::DecapodError::RusqliteError)?;
2583
2584 if orphaned == 0 {
2585 pass("No orphaned health cache entries (integrity pass)", ctx);
2586 } else {
2587 warn(
2588 &format!(
2589 "Found {} health cache entries without proof events (might be manual writes)",
2590 orphaned
2591 ),
2592 ctx,
2593 );
2594 }
2595 Ok(())
2596}
2597
2598fn validate_risk_map(store: &Store, ctx: &ValidationContext) -> Result<(), error::DecapodError> {
2599 info("Risk Map Gate");
2600 let map_path = store.root.join("RISKMAP.json");
2601 if map_path.exists() {
2602 pass("Risk map (blast-radius) is present", ctx);
2603 } else {
2604 warn("Risk map missing (run `decapod riskmap init`)", ctx);
2605 }
2606 Ok(())
2607}
2608
2609fn validate_risk_map_violations(
2610 store: &Store,
2611 ctx: &ValidationContext,
2612 pre_read_broker: Option<&str>,
2613) -> Result<(), error::DecapodError> {
2614 info("Zone Violation Gate");
2615 let fallback;
2616 let content = match pre_read_broker {
2617 Some(c) => c,
2618 None => {
2619 let audit_log = store.root.join("broker.events.jsonl");
2620 if !audit_log.exists() {
2621 return Ok(());
2622 }
2623 fallback = fs::read_to_string(audit_log)?;
2624 &fallback
2625 }
2626 };
2627 {
2628 let mut offenders = Vec::new();
2629 for line in content.lines() {
2630 if line.contains("\".decapod/\"") && line.contains("\"op\":\"todo.add\"") {
2631 offenders.push(line.to_string());
2632 }
2633 }
2634 if offenders.is_empty() {
2635 pass("No risk zone violations detected in audit log", ctx);
2636 } else {
2637 fail(
2638 &format!("Detected operations in protected zones: {:?}", offenders),
2639 ctx,
2640 );
2641 }
2642 }
2643 Ok(())
2644}
2645
2646fn validate_policy_integrity(
2647 store: &Store,
2648 ctx: &ValidationContext,
2649 pre_read_broker: Option<&str>,
2650) -> Result<(), error::DecapodError> {
2651 info("Policy Integrity Gates");
2652 let db_path = store.root.join("policy.db");
2653 if !db_path.exists() {
2654 skip("policy.db not found; skipping policy check", ctx);
2655 return Ok(());
2656 }
2657
2658 let _conn = db::db_connect_for_validate(&db_path.to_string_lossy())?;
2659
2660 let fallback;
2661 let content_opt = match pre_read_broker {
2662 Some(c) => Some(c),
2663 None => {
2664 let audit_log = store.root.join("broker.events.jsonl");
2665 if audit_log.exists() {
2666 fallback = fs::read_to_string(audit_log)?;
2667 Some(fallback.as_str())
2668 } else {
2669 None
2670 }
2671 }
2672 };
2673 if let Some(content) = content_opt {
2674 let mut offenders = Vec::new();
2675 for line in content.lines() {
2676 if line.contains("\"op\":\"policy.approve\"")
2677 && line.contains("\"db_id\":\"health.db\"")
2678 {
2679 offenders.push(line.to_string());
2680 }
2681 }
2682 if offenders.is_empty() {
2683 pass(
2684 "Approval isolation verified (no direct health mutations)",
2685 ctx,
2686 );
2687 } else {
2688 fail(
2689 &format!(
2690 "Policy approval directly mutated health state: {:?}",
2691 offenders
2692 ),
2693 ctx,
2694 );
2695 }
2696 }
2697
2698 Ok(())
2699}
2700
2701fn validate_knowledge_integrity(
2702 store: &Store,
2703 ctx: &ValidationContext,
2704 pre_read_broker: Option<&str>,
2705) -> Result<(), error::DecapodError> {
2706 info("Knowledge Integrity Gate");
2707 let db_path = store.root.join("knowledge.db");
2708 if !db_path.exists() {
2709 skip(
2710 "knowledge.db not found; skipping knowledge integrity check",
2711 ctx,
2712 );
2713 return Ok(());
2714 }
2715
2716 let query_missing_provenance = |conn: &rusqlite::Connection| -> Result<i64, rusqlite::Error> {
2717 conn.query_row(
2718 "SELECT COUNT(*) FROM knowledge WHERE provenance IS NULL OR provenance = ''",
2719 [],
2720 |row| row.get(0),
2721 )
2722 };
2723
2724 let mut conn = db::db_connect_for_validate(&db_path.to_string_lossy())?;
2725 let missing_provenance: i64 = match query_missing_provenance(&conn) {
2726 Ok(v) => v,
2727 Err(rusqlite::Error::SqliteFailure(_, Some(msg)))
2728 if msg.contains("no such table: knowledge") =>
2729 {
2730 db::initialize_knowledge_db(&store.root)?;
2732 conn = db::db_connect_for_validate(&db_path.to_string_lossy())?;
2733 query_missing_provenance(&conn).map_err(error::DecapodError::RusqliteError)?
2734 }
2735 Err(e) => return Err(error::DecapodError::RusqliteError(e)),
2736 };
2737
2738 if missing_provenance == 0 {
2739 pass(
2740 "Knowledge provenance verified (all entries have pointers)",
2741 ctx,
2742 );
2743 } else {
2744 fail(
2745 &format!(
2746 "Found {} knowledge entries missing mandatory provenance",
2747 missing_provenance
2748 ),
2749 ctx,
2750 );
2751 }
2752
2753 let procedural_missing_event_provenance: i64 = conn
2754 .query_row(
2755 "SELECT COUNT(*) FROM knowledge
2756 WHERE id LIKE 'procedural/%'
2757 AND (provenance IS NULL OR provenance = '' OR provenance NOT LIKE 'event:%')",
2758 [],
2759 |row| row.get(0),
2760 )
2761 .map_err(error::DecapodError::RusqliteError)?;
2762 if procedural_missing_event_provenance == 0 {
2763 pass(
2764 "Knowledge promotion firewall verified (procedural entries carry event provenance)",
2765 ctx,
2766 );
2767 } else {
2768 fail(
2769 &format!(
2770 "Found {} procedural knowledge entries without event-backed provenance",
2771 procedural_missing_event_provenance
2772 ),
2773 ctx,
2774 );
2775 }
2776
2777 let event_ids = load_knowledge_promotion_event_ids(&store.root)?;
2778 let mut stmt = conn
2779 .prepare(
2780 "SELECT provenance FROM knowledge
2781 WHERE id LIKE 'procedural/%' AND provenance LIKE 'event:%'",
2782 )
2783 .map_err(error::DecapodError::RusqliteError)?;
2784 let rows = stmt
2785 .query_map([], |row| row.get::<_, String>(0))
2786 .map_err(error::DecapodError::RusqliteError)?;
2787 let mut missing_event_refs = 0usize;
2788 for row in rows {
2789 let prov = row.map_err(error::DecapodError::RusqliteError)?;
2790 let event_id = prov.trim_start_matches("event:");
2791 if !event_ids.contains(event_id) {
2792 missing_event_refs += 1;
2793 }
2794 }
2795 if missing_event_refs == 0 {
2796 pass("Knowledge promotion firewall ledger linkage verified", ctx);
2797 } else {
2798 fail(
2799 &format!(
2800 "Found {} procedural knowledge entries referencing missing promotion events",
2801 missing_event_refs
2802 ),
2803 ctx,
2804 );
2805 }
2806
2807 let fallback;
2808 let content_opt = match pre_read_broker {
2809 Some(c) => Some(c),
2810 None => {
2811 let audit_log = store.root.join("broker.events.jsonl");
2812 if audit_log.exists() {
2813 fallback = fs::read_to_string(audit_log)?;
2814 Some(fallback.as_str())
2815 } else {
2816 None
2817 }
2818 }
2819 };
2820 if let Some(content) = content_opt {
2821 let mut offenders = Vec::new();
2822 for line in content.lines() {
2823 if line.contains("\"op\":\"knowledge.add\"") && line.contains("\"db_id\":\"health.db\"")
2824 {
2825 offenders.push(line.to_string());
2826 }
2827 }
2828 if offenders.is_empty() {
2829 pass("No direct health promotion from knowledge detected", ctx);
2830 } else {
2831 fail(
2832 &format!(
2833 "Knowledge system directly mutated health state: {:?}",
2834 offenders
2835 ),
2836 ctx,
2837 );
2838 }
2839 }
2840
2841 Ok(())
2842}
2843
2844fn load_knowledge_promotion_event_ids(
2845 store_root: &Path,
2846) -> Result<HashSet<String>, error::DecapodError> {
2847 let ledger = store_root.join("knowledge.promotions.jsonl");
2848 if !ledger.exists() {
2849 return Ok(HashSet::new());
2850 }
2851
2852 let raw = fs::read_to_string(&ledger).map_err(error::DecapodError::IoError)?;
2853 let mut ids = HashSet::new();
2854 for (idx, line) in raw.lines().enumerate() {
2855 if line.trim().is_empty() {
2856 continue;
2857 }
2858 let v: serde_json::Value = serde_json::from_str(line).map_err(|e| {
2859 error::DecapodError::ValidationError(format!(
2860 "invalid promotion ledger line {} in {}: {}",
2861 idx + 1,
2862 ledger.display(),
2863 e
2864 ))
2865 })?;
2866 if let Some(id) = v.get("event_id").and_then(|x| x.as_str()) {
2867 ids.insert(id.to_string());
2868 }
2869 }
2870 Ok(ids)
2871}
2872
2873fn validate_lineage_hard_gate(
2874 store: &Store,
2875 ctx: &ValidationContext,
2876) -> Result<(), error::DecapodError> {
2877 info("Lineage Hard Gate");
2878 let todo_events = store.root.join("todo.events.jsonl");
2879 let federation_db = store.root.join("federation.db");
2880 let todo_db = store.root.join("todo.db");
2881
2882 if !todo_events.exists() || !federation_db.exists() || !todo_db.exists() {
2884 skip("lineage inputs missing; skipping", ctx);
2885 return Ok(());
2886 }
2887
2888 if let Ok(metadata) = fs::metadata(&todo_events)
2890 && metadata.len() < 100
2891 {
2892 skip("todo.events.jsonl too small; skipping", ctx);
2893 return Ok(());
2894 }
2895
2896 let content = match fs::read_to_string(&todo_events) {
2897 Ok(c) => c,
2898 Err(_) => {
2899 skip("cannot read todo.events.jsonl; skipping", ctx);
2900 return Ok(());
2901 }
2902 };
2903
2904 if !content.contains("intent:") {
2906 pass("no intent-tagged events found; skipping", ctx);
2907 return Ok(());
2908 }
2909
2910 let mut add_candidates = Vec::new();
2911 let mut done_candidates = Vec::new();
2912 for line in content.lines() {
2913 let Ok(v) = serde_json::from_str::<serde_json::Value>(line) else {
2914 continue;
2915 };
2916 let event_type = v.get("event_type").and_then(|x| x.as_str()).unwrap_or("");
2917 let task_id = v.get("task_id").and_then(|x| x.as_str()).unwrap_or("");
2918 if task_id.is_empty() {
2919 continue;
2920 }
2921 let intent_ref = v
2922 .get("payload")
2923 .and_then(|p| p.get("intent_ref"))
2924 .and_then(|x| x.as_str())
2925 .unwrap_or("");
2926 if !intent_ref.starts_with("intent:") {
2928 continue;
2929 }
2930 if event_type == "task.add" {
2931 add_candidates.push(task_id.to_string());
2932 } else if event_type == "task.done" {
2933 done_candidates.push(task_id.to_string());
2934 }
2935 }
2936
2937 if add_candidates.is_empty() && done_candidates.is_empty() {
2939 pass("no intent-tagged task events to validate", ctx);
2940 return Ok(());
2941 }
2942
2943 let conn = db::db_connect_for_validate(&federation_db.to_string_lossy())?;
2944 let todo_conn = db::db_connect_for_validate(&todo_db.to_string_lossy())?;
2945 let mut violations = Vec::new();
2946
2947 for task_id in add_candidates {
2948 let exists: i64 = todo_conn
2949 .query_row(
2950 "SELECT COUNT(*) FROM tasks WHERE id = ?1",
2951 rusqlite::params![task_id.clone()],
2952 |row| row.get(0),
2953 )
2954 .map_err(error::DecapodError::RusqliteError)?;
2955 if exists == 0 {
2956 continue;
2957 }
2958 let source = format!("event:{}", task_id);
2959 let commitment_count: i64 = conn
2960 .query_row(
2961 "SELECT COUNT(*) FROM nodes n JOIN sources s ON s.node_id = n.id WHERE s.source = ?1 AND n.node_type = 'commitment'",
2962 rusqlite::params![source],
2963 |row| row.get(0),
2964 )
2965 .map_err(error::DecapodError::RusqliteError)?;
2966 if commitment_count == 0 {
2967 violations.push(format!(
2968 "task.add {} missing commitment lineage node",
2969 task_id
2970 ));
2971 }
2972 }
2973
2974 for task_id in done_candidates {
2975 let exists: i64 = todo_conn
2976 .query_row(
2977 "SELECT COUNT(*) FROM tasks WHERE id = ?1",
2978 rusqlite::params![task_id.clone()],
2979 |row| row.get(0),
2980 )
2981 .map_err(error::DecapodError::RusqliteError)?;
2982 if exists == 0 {
2983 continue;
2984 }
2985 let source = format!("event:{}", task_id);
2986 let commitment_count: i64 = conn
2987 .query_row(
2988 "SELECT COUNT(*) FROM nodes n JOIN sources s ON s.node_id = n.id WHERE s.source = ?1 AND n.node_type = 'commitment'",
2989 rusqlite::params![source.clone()],
2990 |row| row.get(0),
2991 )
2992 .map_err(error::DecapodError::RusqliteError)?;
2993 let decision_count: i64 = conn
2994 .query_row(
2995 "SELECT COUNT(*) FROM nodes n JOIN sources s ON s.node_id = n.id WHERE s.source = ?1 AND n.node_type = 'decision'",
2996 rusqlite::params![source],
2997 |row| row.get(0),
2998 )
2999 .map_err(error::DecapodError::RusqliteError)?;
3000 if commitment_count == 0 || decision_count == 0 {
3001 violations.push(format!(
3002 "task.done {} missing commitment/decision lineage nodes",
3003 task_id
3004 ));
3005 }
3006 }
3007
3008 if violations.is_empty() {
3009 pass(
3010 "Intent-tagged task.add/task.done events have commitment+proof lineage",
3011 ctx,
3012 );
3013 } else {
3014 fail(&format!("Lineage gate violations: {:?}", violations), ctx);
3015 }
3016 Ok(())
3017}
3018
3019fn validate_repomap_determinism(
3020 ctx: &ValidationContext,
3021 decapod_dir: &Path,
3022) -> Result<(), error::DecapodError> {
3023 info("Repo Map Determinism Gate");
3024 use crate::core::repomap;
3025 let dir1 = decapod_dir.to_path_buf();
3026 let dir2 = decapod_dir.to_path_buf();
3027 let h1 =
3028 std::thread::spawn(move || serde_json::to_string(&repomap::generate_map(&dir1)).unwrap());
3029 let h2 =
3030 std::thread::spawn(move || serde_json::to_string(&repomap::generate_map(&dir2)).unwrap());
3031
3032 let m1 = h1
3033 .join()
3034 .map_err(|_| error::DecapodError::ValidationError("repomap thread panicked".into()))?;
3035 let m2 = h2
3036 .join()
3037 .map_err(|_| error::DecapodError::ValidationError("repomap thread panicked".into()))?;
3038
3039 if m1 == m2 && !m1.is_empty() {
3040 pass("Repo map output is deterministic", ctx);
3041 } else {
3042 fail("Repo map output is non-deterministic or empty", ctx);
3043 }
3044 Ok(())
3045}
3046
3047fn validate_watcher_audit(
3048 store: &Store,
3049 ctx: &ValidationContext,
3050) -> Result<(), error::DecapodError> {
3051 info("Watcher Audit Gate");
3052 let audit_log = store.root.join("watcher.events.jsonl");
3053 if audit_log.exists() {
3054 pass("Watcher audit trail present", ctx);
3055 } else {
3056 warn(
3057 "Watcher audit trail missing (run `decapod govern watcher run`)",
3058 ctx,
3059 );
3060 }
3061 Ok(())
3062}
3063
3064fn validate_watcher_purity(
3065 store: &Store,
3066 ctx: &ValidationContext,
3067 pre_read_broker: Option<&str>,
3068) -> Result<(), error::DecapodError> {
3069 info("Watcher Purity Gate");
3070 let fallback;
3071 let content_opt = match pre_read_broker {
3072 Some(c) => Some(c),
3073 None => {
3074 let audit_log = store.root.join("broker.events.jsonl");
3075 if audit_log.exists() {
3076 fallback = fs::read_to_string(audit_log)?;
3077 Some(fallback.as_str())
3078 } else {
3079 None
3080 }
3081 }
3082 };
3083 if let Some(content) = content_opt {
3084 let mut offenders = Vec::new();
3085 for line in content.lines() {
3086 if line.contains("\"actor\":\"watcher\"") {
3087 offenders.push(line.to_string());
3088 }
3089 }
3090 if offenders.is_empty() {
3091 pass("Watcher purity verified (read-only checks only)", ctx);
3092 } else {
3093 fail(
3094 &format!(
3095 "Watcher subsystem attempted brokered mutations: {:?}",
3096 offenders
3097 ),
3098 ctx,
3099 );
3100 }
3101 }
3102 Ok(())
3103}
3104
3105fn validate_archive_integrity(
3106 store: &Store,
3107 ctx: &ValidationContext,
3108) -> Result<(), error::DecapodError> {
3109 info("Archive Integrity Gate");
3110 let db_path = store.root.join("archive.db");
3111 if !db_path.exists() {
3112 skip("archive.db not found; skipping archive check", ctx);
3113 return Ok(());
3114 }
3115
3116 use crate::archive;
3117 let failures = archive::verify_archives(store)?;
3118 if failures.is_empty() {
3119 pass(
3120 "All session archives verified (content and hash match)",
3121 ctx,
3122 );
3123 } else {
3124 fail(
3125 &format!("Archive integrity failures detected: {:?}", failures),
3126 ctx,
3127 );
3128 }
3129 Ok(())
3130}
3131
3132fn validate_control_plane_contract(
3133 store: &Store,
3134 ctx: &ValidationContext,
3135) -> Result<(), error::DecapodError> {
3136 info("Control Plane Contract Gate");
3137
3138 let data_dir = &store.root;
3141 let mut violations = Vec::new();
3142
3143 let broker_log = data_dir.join("broker.events.jsonl");
3145 if !broker_log.exists() {
3146 pass("No broker events yet (first run)", ctx);
3148 return Ok(());
3149 }
3150
3151 let todo_db = data_dir.join("todo.db");
3153 if todo_db.exists() {
3154 let todo_events = data_dir.join("todo.events.jsonl");
3155 if !todo_events.exists() {
3156 violations.push("todo.db exists but todo.events.jsonl is missing".to_string());
3157 }
3158 }
3159
3160 let federation_db = data_dir.join("federation.db");
3161 if federation_db.exists() {
3162 let federation_events = data_dir.join("federation.events.jsonl");
3163 if !federation_events.exists() {
3164 violations
3165 .push("federation.db exists but federation.events.jsonl is missing".to_string());
3166 }
3167 }
3168
3169 #[cfg(target_os = "linux")]
3172 {
3173 use std::process::Command;
3174 if let Ok(output) = Command::new("timeout")
3175 .args(["3s", "lsof", "+D", data_dir.to_string_lossy().as_ref()])
3176 .output()
3177 && output.status.success()
3178 {
3179 let stdout = String::from_utf8_lossy(&output.stdout);
3180 for line in stdout.lines() {
3181 if line.contains("sqlite") && !line.contains("decapod") {
3182 violations.push(format!("External SQLite process accessing store: {}", line));
3183 }
3184 }
3185 }
3186 }
3187
3188 if violations.is_empty() {
3189 pass(
3190 "Control plane contract honored (all mutations brokered)",
3191 ctx,
3192 );
3193 } else {
3194 fail(
3195 &format!(
3196 "Control plane contract violations detected: {:?}",
3197 violations
3198 ),
3199 ctx,
3200 );
3201 }
3202
3203 Ok(())
3204}
3205
3206fn validate_canon_mutation(
3207 store: &Store,
3208 ctx: &ValidationContext,
3209 pre_read_broker: Option<&str>,
3210) -> Result<(), error::DecapodError> {
3211 info("Canon Mutation Gate");
3212 let fallback;
3213 let content_opt = match pre_read_broker {
3214 Some(c) => Some(c),
3215 None => {
3216 let audit_log = store.root.join("broker.events.jsonl");
3217 if audit_log.exists() {
3218 fallback = fs::read_to_string(audit_log)?;
3219 Some(fallback.as_str())
3220 } else {
3221 None
3222 }
3223 }
3224 };
3225 if let Some(content) = content_opt {
3226 let mut offenders = Vec::new();
3227 for line in content.lines() {
3228 if line.contains("\"op\":\"write\"")
3229 && (line.contains(".md\"") || line.contains(".json\""))
3230 && !line.contains("\"actor\":\"decapod\"")
3231 && !line.contains("\"actor\":\"scaffold\"")
3232 {
3233 offenders.push(line.to_string());
3234 }
3235 }
3236 if offenders.is_empty() {
3237 pass("No unauthorized canon mutations detected", ctx);
3238 } else {
3239 warn(
3240 &format!(
3241 "Detected direct mutations to canonical documents: {:?}",
3242 offenders
3243 ),
3244 ctx,
3245 );
3246 }
3247 }
3248 Ok(())
3249}
3250
3251fn validate_heartbeat_invocation_gate(
3252 ctx: &ValidationContext,
3253 decapod_dir: &Path,
3254) -> Result<(), error::DecapodError> {
3255 info("Heartbeat Invocation Gate");
3256
3257 let lib_rs = decapod_dir.join("src").join("lib.rs");
3258 let todo_rs = decapod_dir.join("src").join("plugins").join("todo.rs");
3259 if lib_rs.exists() && todo_rs.exists() {
3260 let lib_content = fs::read_to_string(&lib_rs).unwrap_or_default();
3261 let todo_content = fs::read_to_string(&todo_rs).unwrap_or_default();
3262
3263 let code_markers = [
3264 (
3265 lib_content.contains("should_auto_clock_in(&cli.command)")
3266 && lib_content.contains("todo::clock_in_agent_presence(&project_store)?"),
3267 "Top-level command dispatch auto-clocks heartbeat",
3268 ),
3269 (
3270 lib_content
3271 .contains("Command::Todo(todo_cli) => !todo::is_heartbeat_command(todo_cli)"),
3272 "Decorator excludes explicit todo heartbeat to prevent duplicates",
3273 ),
3274 (
3275 todo_content.contains("pub fn clock_in_agent_presence")
3276 && todo_content.contains("record_heartbeat"),
3277 "TODO plugin exposes reusable clock-in helper",
3278 ),
3279 ];
3280
3281 for (ok, msg) in code_markers {
3282 if ok {
3283 pass(msg, ctx);
3284 } else {
3285 fail(msg, ctx);
3286 }
3287 }
3288 } else {
3289 skip(
3290 "Heartbeat wiring source files absent; skipping code-level heartbeat checks",
3291 ctx,
3292 );
3293 }
3294
3295 let doc_markers = [
3296 (
3297 crate::core::assets::get_doc("core/DECAPOD.md")
3298 .unwrap_or_default()
3299 .contains("invocation heartbeat"),
3300 "Router documents invocation heartbeat contract",
3301 ),
3302 (
3303 crate::core::assets::get_doc("interfaces/CONTROL_PLANE.md")
3304 .unwrap_or_default()
3305 .contains("invocation heartbeat"),
3306 "Control-plane interface documents invocation heartbeat",
3307 ),
3308 (
3309 crate::core::assets::get_doc("plugins/TODO.md")
3310 .unwrap_or_default()
3311 .contains("auto-clocks liveness"),
3312 "TODO plugin documents automatic liveness clock-in",
3313 ),
3314 (
3315 crate::core::assets::get_doc("plugins/REFLEX.md")
3316 .unwrap_or_default()
3317 .contains("todo.heartbeat.autoclaim"),
3318 "REFLEX plugin documents heartbeat autoclaim action",
3319 ),
3320 ];
3321
3322 for (ok, msg) in doc_markers {
3323 if ok {
3324 pass(msg, ctx);
3325 } else {
3326 fail(msg, ctx);
3327 }
3328 }
3329
3330 Ok(())
3331}
3332
3333fn validate_federation_gates(
3334 store: &Store,
3335 ctx: &ValidationContext,
3336) -> Result<(), error::DecapodError> {
3337 info("Federation Gates");
3338
3339 let results = crate::plugins::federation::validate_federation(&store.root)?;
3340
3341 for (gate_name, passed, message) in results {
3342 if passed {
3343 pass(&format!("[{}] {}", gate_name, message), ctx);
3344 } else {
3345 warn(&format!("[{}] {}", gate_name, message), ctx);
3349 }
3350 }
3351
3352 Ok(())
3353}
3354
3355fn validate_markdown_primitives_roundtrip_gate(
3356 store: &Store,
3357 ctx: &ValidationContext,
3358) -> Result<(), error::DecapodError> {
3359 info("Markdown Primitive Round-Trip Gate");
3360 match primitives::validate_roundtrip_gate(store) {
3361 Ok(()) => {
3362 pass(
3363 "Markdown primitives export and round-trip validation pass",
3364 ctx,
3365 );
3366 }
3367 Err(err) => {
3368 fail(
3369 &format!("Markdown primitive round-trip failed: {}", err),
3370 ctx,
3371 );
3372 }
3373 }
3374 Ok(())
3375}
3376
3377fn validate_git_workspace_context(
3380 ctx: &ValidationContext,
3381 repo_root: &Path,
3382) -> Result<(), error::DecapodError> {
3383 info("Git Workspace Context Gate");
3384
3385 if std::env::var("DECAPOD_VALIDATE_SKIP_GIT_GATES").is_ok() {
3387 skip(
3388 "Git workspace gates skipped (DECAPOD_VALIDATE_SKIP_GIT_GATES set)",
3389 ctx,
3390 );
3391 return Ok(());
3392 }
3393
3394 let args: Vec<String> = std::env::args().collect();
3396 let is_schema_command = args.iter().any(|a| {
3397 a == "schema"
3398 || (a == "lcm"
3399 && args
3400 .iter()
3401 .skip_while(|x| *x != "lcm")
3402 .nth(1)
3403 .is_some_and(|x| x == "schema"))
3404 || (a == "map"
3405 && args
3406 .iter()
3407 .skip_while(|x| *x != "map")
3408 .nth(1)
3409 .is_some_and(|x| x == "schema"))
3410 });
3411 if is_schema_command {
3412 skip(
3413 "Schema command exempted from workspace requirement (read-only)",
3414 ctx,
3415 );
3416 return Ok(());
3417 }
3418
3419 let signals_container = [
3420 (
3421 std::env::var("DECAPOD_CONTAINER").ok().as_deref() == Some("1"),
3422 "DECAPOD_CONTAINER=1",
3423 ),
3424 (repo_root.join(".dockerenv").exists(), ".dockerenv marker"),
3425 (
3426 repo_root.join(".devcontainer").exists(),
3427 ".devcontainer marker",
3428 ),
3429 (
3430 std::env::var("DOCKER_CONTAINER").is_ok(),
3431 "DOCKER_CONTAINER env",
3432 ),
3433 ];
3434
3435 let in_container = signals_container.iter().any(|(signal, _)| *signal);
3436 if in_container {
3437 let reasons: Vec<&str> = signals_container
3438 .iter()
3439 .filter(|(signal, _)| *signal)
3440 .map(|(_, name)| *name)
3441 .collect();
3442 pass(
3443 &format!(
3444 "Running in container workspace (signals: {})",
3445 reasons.join(", ")
3446 ),
3447 ctx,
3448 );
3449 } else {
3450 fail(
3451 "Not running in container workspace - git-tracked work must execute in Docker-isolated workspace (claim.git.container_workspace_required)",
3452 ctx,
3453 );
3454 }
3455
3456 let git_dir = repo_root.join(".git");
3457 let is_worktree = git_dir.is_file() && {
3458 let content = fs::read_to_string(&git_dir).unwrap_or_default();
3459 content.contains("gitdir:")
3460 };
3461
3462 if is_worktree {
3463 pass("Running in git worktree (isolated branch)", ctx);
3464 } else if in_container {
3465 pass(
3466 "Container workspace detected (worktree check informational)",
3467 ctx,
3468 );
3469 } else {
3470 fail(
3471 "Not running in isolated git worktree - must use container workspace for implementation work",
3472 ctx,
3473 );
3474 }
3475
3476 validate_commit_often_gate(ctx, repo_root)?;
3477
3478 Ok(())
3479}
3480
3481fn validate_commit_often_gate(
3482 ctx: &ValidationContext,
3483 repo_root: &Path,
3484) -> Result<(), error::DecapodError> {
3485 let max_dirty_files = std::env::var("DECAPOD_COMMIT_OFTEN_MAX_DIRTY_FILES")
3486 .ok()
3487 .and_then(|v| v.parse::<usize>().ok())
3488 .filter(|v| *v > 0)
3489 .unwrap_or(6);
3490
3491 let status_output = std::process::Command::new("git")
3492 .args(["status", "--porcelain"])
3493 .current_dir(repo_root)
3494 .output()
3495 .map_err(error::DecapodError::IoError)?;
3496
3497 if !status_output.status.success() {
3498 warn("Commit-often gate skipped: unable to read git status", ctx);
3499 return Ok(());
3500 }
3501
3502 let dirty_count = String::from_utf8_lossy(&status_output.stdout)
3503 .lines()
3504 .filter(|line| !line.trim().is_empty())
3505 .count();
3506
3507 if dirty_count == 0 {
3508 pass("Commit-often gate: working tree is clean", ctx);
3509 return Ok(());
3510 }
3511
3512 if dirty_count > max_dirty_files {
3513 fail(
3514 &format!(
3515 "Commit-often mandate violation: {} dirty file(s) exceed limit {}. Commit incremental changes before continuing.",
3516 dirty_count, max_dirty_files
3517 ),
3518 ctx,
3519 );
3520 } else {
3521 pass(
3522 &format!(
3523 "Commit-often gate: {} dirty file(s) within limit {}",
3524 dirty_count, max_dirty_files
3525 ),
3526 ctx,
3527 );
3528 }
3529
3530 Ok(())
3531}
3532
3533fn validate_plan_governed_execution_gate(
3534 store: &Store,
3535 ctx: &ValidationContext,
3536 repo_root: &Path,
3537) -> Result<(), error::DecapodError> {
3538 info("Plan-Governed Execution Gate");
3539
3540 if std::env::var("DECAPOD_VALIDATE_SKIP_GIT_GATES").is_ok() {
3544 skip(
3545 "Plan-governed execution gate skipped (DECAPOD_VALIDATE_SKIP_GIT_GATES set)",
3546 ctx,
3547 );
3548 return Ok(());
3549 }
3550
3551 let plan = plan_governance::load_plan(repo_root)?;
3552 if let Some(plan) = plan {
3553 if plan.state != plan_governance::PlanState::Approved
3554 && plan.state != plan_governance::PlanState::Done
3555 {
3556 fail(
3557 &format!(
3558 "NEEDS_PLAN_APPROVAL: plan state is {:?}; execution/promotion requires APPROVED or DONE",
3559 plan.state
3560 ),
3561 ctx,
3562 );
3563 } else {
3564 pass("Plan artifact state allows governed execution", ctx);
3565 }
3566
3567 if plan.intent.trim().is_empty()
3568 || !plan.unknowns.is_empty()
3569 || !plan.human_questions.is_empty()
3570 {
3571 fail(
3572 "NEEDS_HUMAN_INPUT: governed plan has unresolved intent/unknowns/questions",
3573 ctx,
3574 );
3575 } else {
3576 pass("Plan intent and unknowns are resolved", ctx);
3577 }
3578 } else {
3579 let done_count = plan_governance::count_done_todos(&store.root)?;
3580 if done_count > 0 {
3581 fail(
3582 &format!(
3583 "NEEDS_PLAN_APPROVAL: {} done TODO(s) exist but governed PLAN artifact is missing",
3584 done_count
3585 ),
3586 ctx,
3587 );
3588 } else {
3589 pass(
3590 "No governed plan artifact present; gate is advisory until first done TODO",
3591 ctx,
3592 );
3593 }
3594 }
3595
3596 let unverified = plan_governance::collect_unverified_done_todos(&store.root)?;
3597 if !unverified.is_empty() {
3598 fail(
3599 &format!(
3600 "PROOF_HOOK_FAILED: {} done TODO(s) are CLAIMED but not VERIFIED: {}",
3601 unverified.len(),
3602 output::preview_messages(&unverified, 4, 80)
3603 ),
3604 ctx,
3605 );
3606 } else {
3607 pass("Done TODOs are proof-verified", ctx);
3608 }
3609
3610 Ok(())
3611}
3612
3613fn validate_git_protected_branch(
3614 ctx: &ValidationContext,
3615 repo_root: &Path,
3616) -> Result<(), error::DecapodError> {
3617 info("Git Protected Branch Gate");
3618
3619 if std::env::var("DECAPOD_VALIDATE_SKIP_GIT_GATES").is_ok() {
3621 skip(
3622 "Git protected branch gate skipped (DECAPOD_VALIDATE_SKIP_GIT_GATES set)",
3623 ctx,
3624 );
3625 return Ok(());
3626 }
3627
3628 let protected_patterns = ["master", "main", "production", "stable"];
3629
3630 let current_branch = {
3631 let output = std::process::Command::new("git")
3632 .args(["rev-parse", "--abbrev-ref", "HEAD"])
3633 .current_dir(repo_root)
3634 .output();
3635 output
3636 .ok()
3637 .and_then(|o| {
3638 if o.status.success() {
3639 Some(String::from_utf8_lossy(&o.stdout).trim().to_string())
3640 } else {
3641 None
3642 }
3643 })
3644 .unwrap_or_else(|| "unknown".to_string())
3645 };
3646
3647 let is_protected = protected_patterns
3648 .iter()
3649 .any(|p| current_branch == *p || current_branch.starts_with("release/"));
3650
3651 if is_protected {
3652 fail(
3653 &format!(
3654 "Currently on protected branch '{}' - implementation work must happen in working branch, not directly on protected refs (claim.git.no_direct_main_push)",
3655 current_branch
3656 ),
3657 ctx,
3658 );
3659 } else {
3660 pass(
3661 &format!("On working branch '{}' (not protected)", current_branch),
3662 ctx,
3663 );
3664 }
3665
3666 let has_remote = std::process::Command::new("git")
3667 .args(["remote", "get-url", "origin"])
3668 .current_dir(repo_root)
3669 .output()
3670 .map(|o| o.status.success())
3671 .unwrap_or(false);
3672
3673 if has_remote {
3674 let ahead_behind = std::process::Command::new("git")
3675 .args(["rev-list", "--left-right", "--count", "HEAD...origin/HEAD"])
3676 .current_dir(repo_root)
3677 .output();
3678
3679 if let Ok(out) = ahead_behind
3680 && out.status.success()
3681 {
3682 let counts = String::from_utf8_lossy(&out.stdout);
3683 let parts: Vec<&str> = counts.split_whitespace().collect();
3684 if parts.len() >= 2 {
3685 let ahead: u32 = parts[0].parse().unwrap_or(0);
3686 if ahead > 0 {
3687 let output = std::process::Command::new("git")
3688 .args(["rev-list", "--format=%s", "-n1", "HEAD"])
3689 .current_dir(repo_root)
3690 .output();
3691 let commit_msg = output
3692 .ok()
3693 .and_then(|o| {
3694 if o.status.success() {
3695 Some(String::from_utf8_lossy(&o.stdout).trim().to_string())
3696 } else {
3697 None
3698 }
3699 })
3700 .unwrap_or_else(|| "unknown".to_string());
3701
3702 fail(
3703 &format!(
3704 "Protected branch has {} unpushed commit(s) - direct push to protected branch detected (commit: {})",
3705 ahead, commit_msg
3706 ),
3707 ctx,
3708 );
3709 } else {
3710 pass("No unpushed commits to protected branches", ctx);
3711 }
3712 }
3713 }
3714 }
3715
3716 Ok(())
3717}
3718
3719fn validate_tooling_gate(
3720 ctx: &ValidationContext,
3721 repo_root: &Path,
3722) -> Result<(), error::DecapodError> {
3723 info("Tooling Validation Gate");
3724
3725 let tooling_enabled = std::env::var("DECAPOD_VALIDATE_ENABLE_TOOLING_GATES")
3726 .ok()
3727 .map(|v| matches!(v.as_str(), "1" | "true" | "TRUE" | "yes" | "YES"))
3728 .unwrap_or(false);
3729 if !tooling_enabled {
3730 skip(
3731 "Tooling validation gates disabled by default (set DECAPOD_VALIDATE_ENABLE_TOOLING_GATES=1 to enable)",
3732 ctx,
3733 );
3734 return Ok(());
3735 }
3736
3737 if std::env::var("DECAPOD_VALIDATE_SKIP_TOOLING_GATES").is_ok() {
3738 skip(
3739 "Tooling validation gates skipped (DECAPOD_VALIDATE_SKIP_TOOLING_GATES set)",
3740 ctx,
3741 );
3742 return Ok(());
3743 }
3744
3745 let mut has_failures = false;
3746 let mut has_tooling = false;
3747
3748 let cargo_toml = repo_root.join("Cargo.toml");
3749 if cargo_toml.exists() {
3750 has_tooling = true;
3751 let root_fmt = repo_root.to_path_buf();
3752 let root_clippy = repo_root.to_path_buf();
3753
3754 let fmt_handle = std::thread::spawn(move || {
3755 std::process::Command::new("cargo")
3756 .args(["fmt", "--all", "--", "--check"])
3757 .current_dir(&root_fmt)
3758 .output()
3759 });
3760
3761 let clippy_handle = std::thread::spawn(move || {
3762 std::process::Command::new("cargo")
3763 .args([
3764 "clippy",
3765 "--all-targets",
3766 "--all-features",
3767 "--",
3768 "-D",
3769 "warnings",
3770 ])
3771 .current_dir(&root_clippy)
3772 .output()
3773 });
3774
3775 match fmt_handle.join().expect("fmt thread panicked") {
3776 Ok(output) => {
3777 if output.status.success() {
3778 pass("Rust code formatting passes (cargo fmt)", ctx);
3779 } else {
3780 fail("Rust code formatting failed - run `cargo fmt --all`", ctx);
3781 has_failures = true;
3782 }
3783 }
3784 Err(e) => {
3785 fail(&format!("Failed to run cargo fmt: {}", e), ctx);
3786 has_failures = true;
3787 }
3788 }
3789
3790 match clippy_handle.join().expect("clippy thread panicked") {
3791 Ok(output) => {
3792 if output.status.success() {
3793 pass("Rust linting passes (cargo clippy)", ctx);
3794 } else {
3795 fail(
3796 "Rust linting failed - run `cargo clippy --all-targets --all-features`",
3797 ctx,
3798 );
3799 has_failures = true;
3800 }
3801 }
3802 Err(e) => {
3803 fail(&format!("Failed to run cargo clippy: {}", e), ctx);
3804 has_failures = true;
3805 }
3806 }
3807 }
3808
3809 let pyproject = repo_root.join("pyproject.toml");
3810 let requirements = repo_root.join("requirements.txt");
3811 if pyproject.exists() || requirements.exists() {
3812 has_tooling = true;
3813
3814 if std::process::Command::new("which")
3815 .arg("ruff")
3816 .output()
3817 .map(|o| o.status.success())
3818 .unwrap_or(false)
3819 {
3820 let root_ruff = repo_root.to_path_buf();
3821 let ruff_handle = std::thread::spawn(move || {
3822 std::process::Command::new("ruff")
3823 .args(["check", ".", "--output-format=concise"])
3824 .current_dir(&root_ruff)
3825 .output()
3826 });
3827
3828 match ruff_handle.join().expect("ruff thread panicked") {
3829 Ok(output) => {
3830 if output.status.success() {
3831 pass("Python linting passes (ruff)", ctx);
3832 } else {
3833 fail("Python linting failed - fix ruff violations", ctx);
3834 has_failures = true;
3835 }
3836 }
3837 Err(e) => {
3838 warn(&format!("ruff not available: {}", e), ctx);
3839 }
3840 }
3841 } else {
3842 skip("ruff not installed; skipping Python linting", ctx);
3843 }
3844 }
3845
3846 let shell_check = repo_root.join(".shellcheckrc");
3847 let shell_files_exist = std::fs::read_dir(repo_root)
3848 .into_iter()
3849 .flatten()
3850 .filter_map(|e| e.ok())
3851 .any(|e| {
3852 let p = e.path();
3853 p.is_file() && p.extension().map(|s| s == "sh").unwrap_or(false)
3854 });
3855
3856 if shell_check.exists() || shell_files_exist {
3857 has_tooling = true;
3858
3859 if std::process::Command::new("which")
3860 .arg("shellcheck")
3861 .output()
3862 .map(|o| o.status.success())
3863 .unwrap_or(false)
3864 {
3865 let repo_root_clone = repo_root.to_path_buf();
3866 let shellcheck_handle = std::thread::spawn(move || {
3867 std::process::Command::new("shellcheck")
3868 .args(["--enable=all"])
3869 .current_dir(repo_root_clone)
3870 .output()
3871 });
3872
3873 match shellcheck_handle
3874 .join()
3875 .expect("shellcheck thread panicked")
3876 {
3877 Ok(output) => {
3878 if output.status.success() {
3879 pass("Shell script linting passes (shellcheck)", ctx);
3880 } else {
3881 fail(
3882 "Shell script linting failed - fix shellcheck violations",
3883 ctx,
3884 );
3885 has_failures = true;
3886 }
3887 }
3888 Err(e) => {
3889 warn(&format!("shellcheck failed: {}", e), ctx);
3890 }
3891 }
3892 } else {
3893 skip("shellcheck not installed; skipping shell linting", ctx);
3894 }
3895 }
3896
3897 let yaml_check = repo_root.join(".yamllint");
3898 let yaml_files_exist = std::fs::read_dir(repo_root)
3899 .into_iter()
3900 .flatten()
3901 .filter_map(|e| e.ok())
3902 .any(|e| {
3903 let p = e.path();
3904 p.is_file()
3905 && p.extension()
3906 .map(|s| s == "yaml" || s == "yml")
3907 .unwrap_or(false)
3908 });
3909
3910 if yaml_check.exists() || yaml_files_exist {
3911 has_tooling = true;
3912
3913 if std::process::Command::new("which")
3914 .arg("yamllint")
3915 .output()
3916 .map(|o| o.status.success())
3917 .unwrap_or(false)
3918 {
3919 let repo_root_clone = repo_root.to_path_buf();
3920 let yamllint_handle = std::thread::spawn(move || {
3921 std::process::Command::new("yamllint")
3922 .arg(".")
3923 .current_dir(repo_root_clone)
3924 .output()
3925 });
3926
3927 match yamllint_handle.join().expect("yamllint thread panicked") {
3928 Ok(output) => {
3929 if output.status.success() {
3930 pass("YAML linting passes (yamllint)", ctx);
3931 } else {
3932 fail("YAML linting failed - fix yamllint violations", ctx);
3933 has_failures = true;
3934 }
3935 }
3936 Err(e) => {
3937 warn(&format!("yamllint failed: {}", e), ctx);
3938 }
3939 }
3940 } else {
3941 skip("yamllint not installed; skipping YAML linting", ctx);
3942 }
3943 }
3944
3945 let dockerfile_exists = std::fs::read_dir(repo_root)
3946 .into_iter()
3947 .flatten()
3948 .filter_map(|e| e.ok())
3949 .any(|e| {
3950 e.path()
3951 .file_name()
3952 .and_then(|n| n.to_str())
3953 .map(|n| n.to_lowercase() == "dockerfile")
3954 .unwrap_or(false)
3955 });
3956
3957 if dockerfile_exists {
3958 has_tooling = true;
3959
3960 if std::process::Command::new("which")
3961 .arg("hadolint")
3962 .output()
3963 .map(|o| o.status.success())
3964 .unwrap_or(false)
3965 {
3966 let repo_root_clone = repo_root.to_path_buf();
3967 let hadolint_handle = std::thread::spawn(move || {
3968 std::process::Command::new("hadolint")
3969 .args(["Dockerfile"])
3970 .current_dir(repo_root_clone)
3971 .output()
3972 });
3973
3974 match hadolint_handle.join().expect("hadolint thread panicked") {
3975 Ok(output) => {
3976 if output.status.success() {
3977 pass("Dockerfile linting passes (hadolint)", ctx);
3978 } else {
3979 fail("Dockerfile linting failed - fix hadolint violations", ctx);
3980 has_failures = true;
3981 }
3982 }
3983 Err(e) => {
3984 warn(&format!("hadolint failed: {}", e), ctx);
3985 }
3986 }
3987 } else {
3988 skip("hadolint not installed; skipping Dockerfile linting", ctx);
3989 }
3990 }
3991
3992 if !has_tooling {
3993 skip(
3994 "No recognized project files found; skipping tooling validation",
3995 ctx,
3996 );
3997 } else if !has_failures {
3998 pass(
3999 "All toolchain validations pass - project is ready for promotion",
4000 ctx,
4001 );
4002 }
4003
4004 Ok(())
4005}
4006
4007fn validate_state_commit_gate(
4008 ctx: &ValidationContext,
4009 repo_root: &Path,
4010) -> Result<(), error::DecapodError> {
4011 info("STATE_COMMIT Validation Gate");
4012
4013 let required_ci_job = std::env::var("DECAPOD_STATE_COMMIT_CI_JOB")
4015 .unwrap_or_else(|_| "state_commit_golden_vectors".to_string());
4016
4017 info(&format!(
4018 "STATE_COMMIT: required_ci_job = {}",
4019 required_ci_job
4020 ));
4021
4022 let golden_v1_dir = repo_root
4024 .join("tests")
4025 .join("golden")
4026 .join("state_commit")
4027 .join("v1");
4028 if !golden_v1_dir.exists() {
4029 skip(
4030 "No tests/golden/state_commit/v1 directory found; skipping STATE_COMMIT validation",
4031 ctx,
4032 );
4033 return Ok(());
4034 }
4035
4036 let required_files = ["scope_record_hash.txt", "state_commit_root.txt"];
4038 let mut has_golden = true;
4039 for file in &required_files {
4040 if !golden_v1_dir.join(file).exists() {
4041 fail(
4042 &format!("Missing golden file: tests/golden/state_commit/v1/{}", file),
4043 ctx,
4044 );
4045 has_golden = false;
4046 }
4047 }
4048
4049 if has_golden {
4052 pass("STATE_COMMIT v1 golden vectors present", ctx);
4053
4054 let expected_scope_hash =
4056 "41d7e3729b6f4512887fb3cb6f10140942b600041e0d88308b0177e06ebb4b93";
4057 let expected_root = "28591ac86e52ffac76d5fc3aceeceda5d8592708a8d7fcb75371567fdc481492";
4058
4059 if let Ok(actual_hash) =
4060 std::fs::read_to_string(golden_v1_dir.join("scope_record_hash.txt"))
4061 && actual_hash.trim() != expected_scope_hash
4062 {
4063 fail(
4064 &format!(
4065 "STATE_COMMIT v1 scope_record_hash changed! Expected {}, got {}. This requires a SPEC_VERSION bump to v2.",
4066 expected_scope_hash,
4067 actual_hash.trim()
4068 ),
4069 ctx,
4070 );
4071 }
4072
4073 if let Ok(actual_root) =
4074 std::fs::read_to_string(golden_v1_dir.join("state_commit_root.txt"))
4075 && actual_root.trim() != expected_root
4076 {
4077 fail(
4078 &format!(
4079 "STATE_COMMIT v1 state_commit_root changed! Expected {}, got {}. This requires a SPEC_VERSION bump to v2.",
4080 expected_root,
4081 actual_root.trim()
4082 ),
4083 ctx,
4084 );
4085 }
4086 }
4087
4088 Ok(())
4089}
4090
4091fn validate_obligations(store: &Store, ctx: &ValidationContext) -> Result<(), error::DecapodError> {
4092 crate::core::obligation::initialize_obligation_db(&store.root)?;
4094
4095 let obligations = crate::core::obligation::list_obligations(store)?;
4096 let mut met_count = 0;
4097 for ob in obligations {
4098 if ob.status == crate::core::obligation::ObligationStatus::Met {
4100 let (status, reason) = crate::core::obligation::verify_obligation(store, &ob.id)?;
4101 if status != crate::core::obligation::ObligationStatus::Met {
4102 fail(
4103 &format!("Obligation {} failed verification: {}", ob.id, reason),
4104 ctx,
4105 );
4106 } else {
4107 met_count += 1;
4108 }
4109 }
4110 }
4111 pass(
4112 &format!(
4113 "Obligation Graph Validation Gate ({} met nodes verified)",
4114 met_count
4115 ),
4116 ctx,
4117 );
4118 Ok(())
4119}
4120
4121fn validate_lcm_immutability(
4122 store: &Store,
4123 ctx: &ValidationContext,
4124) -> Result<(), error::DecapodError> {
4125 info("LCM Immutability Gate");
4126 let ledger_path = store.root.join(crate::core::schemas::LCM_EVENTS_NAME);
4127 if !ledger_path.exists() {
4128 pass("No LCM ledger yet; gate trivially passes", ctx);
4129 return Ok(());
4130 }
4131
4132 let failures = crate::plugins::lcm::validate_ledger_integrity(&store.root)?;
4133 if failures.is_empty() {
4134 pass("LCM ledger integrity verified", ctx);
4135 } else {
4136 for f in &failures {
4137 fail(&format!("LCM immutability: {}", f), ctx);
4138 }
4139 }
4140 Ok(())
4141}
4142
4143fn validate_lcm_rebuild_gate(
4144 store: &Store,
4145 ctx: &ValidationContext,
4146) -> Result<(), error::DecapodError> {
4147 info("LCM Rebuild Gate");
4148 let ledger_path = store.root.join(crate::core::schemas::LCM_EVENTS_NAME);
4149 if !ledger_path.exists() {
4150 pass("No LCM ledger yet; rebuild gate trivially passes", ctx);
4151 return Ok(());
4152 }
4153
4154 let result = crate::plugins::lcm::rebuild_index(store, true)?;
4155 if result.get("status").and_then(|v| v.as_str()) == Some("success") {
4156 pass("LCM index rebuild successful", ctx);
4157 } else {
4158 let errors = result
4159 .get("errors")
4160 .and_then(|v| v.as_array())
4161 .map(|a| {
4162 a.iter()
4163 .filter_map(|e| e.as_str())
4164 .collect::<Vec<_>>()
4165 .join(", ")
4166 })
4167 .unwrap_or_default();
4168 fail(&format!("LCM rebuild failed: {}", errors), ctx);
4169 }
4170 Ok(())
4171}
4172
4173fn validate_gatekeeper_gate(
4174 ctx: &ValidationContext,
4175 decapod_dir: &Path,
4176) -> Result<(), error::DecapodError> {
4177 info("Gatekeeper Safety Gate");
4178
4179 let output = std::process::Command::new("git")
4181 .args(["diff", "--cached", "--name-only"])
4182 .current_dir(decapod_dir)
4183 .output();
4184
4185 let staged_paths: Vec<PathBuf> = match output {
4186 Ok(o) if o.status.success() => String::from_utf8_lossy(&o.stdout)
4187 .lines()
4188 .filter(|l| !l.is_empty())
4189 .map(PathBuf::from)
4190 .collect(),
4191 _ => {
4192 skip(
4193 "Git not available or not in a repo; skipping gatekeeper gate",
4194 ctx,
4195 );
4196 return Ok(());
4197 }
4198 };
4199
4200 if staged_paths.is_empty() {
4201 pass("No staged files; gatekeeper gate trivially passes", ctx);
4202 return Ok(());
4203 }
4204
4205 let config = crate::core::gatekeeper::GatekeeperConfig::default();
4206 let result = crate::core::gatekeeper::run_gatekeeper(decapod_dir, &staged_paths, 0, &config)?;
4207
4208 if result.passed {
4209 pass(
4210 &format!(
4211 "Gatekeeper: {} staged file(s) passed safety checks",
4212 staged_paths.len()
4213 ),
4214 ctx,
4215 );
4216 } else {
4217 let secret_count = result
4218 .violations
4219 .iter()
4220 .filter(|v| v.kind == crate::core::gatekeeper::ViolationKind::SecretDetected)
4221 .count();
4222 let blocked_count = result
4223 .violations
4224 .iter()
4225 .filter(|v| v.kind == crate::core::gatekeeper::ViolationKind::PathBlocked)
4226 .count();
4227 let dangerous_count = result
4228 .violations
4229 .iter()
4230 .filter(|v| v.kind == crate::core::gatekeeper::ViolationKind::DangerousPattern)
4231 .count();
4232
4233 let mut parts = Vec::new();
4234 if secret_count > 0 {
4235 parts.push(format!("{} secret(s)", secret_count));
4236 }
4237 if blocked_count > 0 {
4238 parts.push(format!("{} blocked path(s)", blocked_count));
4239 }
4240 if dangerous_count > 0 {
4241 parts.push(format!("{} dangerous pattern(s)", dangerous_count));
4242 }
4243 fail(&format!("Gatekeeper violations: {}", parts.join(", ")), ctx);
4244 }
4245
4246 Ok(())
4247}
4248
4249pub fn evaluate_mandates(
4251 project_root: &Path,
4252 store: &Store,
4253 mandates: &[crate::core::docs::Mandate],
4254) -> Vec<crate::core::rpc::Blocker> {
4255 use crate::core::rpc::{Blocker, BlockerKind};
4256 let mut blockers = Vec::new();
4257
4258 for mandate in mandates {
4259 match mandate.check_tag.as_str() {
4260 "gate.worktree.no_master" => {
4261 let status = crate::core::workspace::get_workspace_status(project_root);
4262 if let Ok(s) = status
4263 && s.git.is_protected
4264 {
4265 blockers.push(Blocker {
4266 kind: BlockerKind::ProtectedBranch,
4267 message: format!("Mandate Violation: {}", mandate.fragment.title),
4268 resolve_hint: "Run `decapod workspace ensure` to create a working branch."
4269 .to_string(),
4270 });
4271 }
4272 }
4273 "gate.worktree.isolated" => {
4274 let status = crate::core::workspace::get_workspace_status(project_root);
4275 if let Ok(s) = status
4276 && !s.git.in_worktree
4277 {
4278 blockers.push(Blocker {
4279 kind: BlockerKind::WorkspaceRequired,
4280 message: format!("Mandate Violation: {}", mandate.fragment.title),
4281 resolve_hint:
4282 "Run `decapod workspace ensure` to create an isolated git worktree."
4283 .to_string(),
4284 });
4285 }
4286 }
4287 "gate.session.active" => {
4288 }
4291 "gate.todo.active_task" => {
4292 let agent_id =
4293 std::env::var("DECAPOD_AGENT_ID").unwrap_or_else(|_| "unknown".to_string());
4294 if agent_id != "unknown" {
4295 let mut active_tasks = crate::core::todo::list_tasks(
4296 &store.root,
4297 Some("open".to_string()),
4298 None,
4299 None,
4300 None,
4301 None,
4302 );
4303 if let Ok(ref mut tasks) = active_tasks {
4304 let pre_filter_count = tasks.len();
4305 let debug_info = if !tasks.is_empty() {
4306 format!(
4307 "First task assigned to: '{}', My ID: '{}'",
4308 tasks[0].assigned_to, agent_id
4309 )
4310 } else {
4311 format!(
4312 "No tasks found. My ID: '{}', Root: '{}'",
4313 agent_id,
4314 project_root.display()
4315 )
4316 };
4317
4318 tasks.retain(|t| t.assigned_to == agent_id);
4319 if tasks.is_empty() {
4320 blockers.push(Blocker {
4321 kind: BlockerKind::MissingProof,
4322 message: format!("Mandate Violation: {} (Pre-filter: {}, {})", mandate.fragment.title, pre_filter_count, debug_info),
4323 resolve_hint: "You MUST create and claim a `todo` before starting work. Run `decapod todo add \"...\"` then `decapod todo claim --id <id>`.".to_string(),
4324 });
4325 }
4326 }
4327 }
4328 }
4329 "gate.validation.pass" => {
4330 }
4332 _ => {}
4333 }
4334 }
4335
4336 blockers
4337}
4338
4339fn validate_coplayer_policy_tightening(
4345 ctx: &ValidationContext,
4346 _decapod_dir: &Path,
4347) -> Result<(), error::DecapodError> {
4348 info("Co-Player Policy Tightening Gate");
4349
4350 use crate::core::coplayer::{CoPlayerSnapshot, derive_policy};
4351
4352 let profiles = vec![
4355 ("unknown", 0.0, 0),
4356 ("high", 0.5, 20),
4357 ("medium", 0.8, 20),
4358 ("low", 0.95, 100),
4359 ];
4360
4361 let mut prev_policy = None;
4362 let mut all_valid = true;
4363
4364 for (risk, reliability, total) in &profiles {
4365 let snap = CoPlayerSnapshot {
4366 agent_id: format!("gate-test-{}", risk),
4367 reliability_score: *reliability,
4368 total_ops: *total,
4369 successful_ops: (*total as f64 * reliability) as usize,
4370 failed_ops: *total - (*total as f64 * reliability) as usize,
4371 last_active: "gate-test".to_string(),
4372 common_ops: vec![],
4373 risk_profile: risk.to_string(),
4374 };
4375
4376 let policy = derive_policy(&snap);
4377
4378 if !policy.require_validation {
4380 fail(
4381 &format!(
4382 "Co-player policy for '{}' does not require validation (MUST always be true)",
4383 risk
4384 ),
4385 ctx,
4386 );
4387 all_valid = false;
4388 }
4389
4390 if let Some(prev) = &prev_policy {
4392 let prev: &crate::core::coplayer::CoPlayerPolicy = prev;
4393 if policy.max_diff_lines < prev.max_diff_lines {
4395 }
4399 }
4400
4401 prev_policy = Some(policy);
4402 }
4403
4404 if all_valid {
4405 pass("Co-player policies only tighten constraints", ctx);
4406 }
4407
4408 Ok(())
4409}
4410
4411pub fn run_validation(
4412 store: &Store,
4413 decapod_dir: &Path,
4414 _home_dir: &Path,
4415 _verbose: bool,
4416) -> Result<ValidationReport, error::DecapodError> {
4417 let total_start = Instant::now();
4418
4419 let ctx = ValidationContext::new();
4420
4421 let broker_events_path = store.root.join("broker.events.jsonl");
4423 let broker_content: Option<String> = if broker_events_path.exists() {
4424 fs::read_to_string(&broker_events_path).ok()
4425 } else {
4426 None
4427 };
4428
4429 match store.kind {
4431 StoreKind::User => {
4432 let start = Instant::now();
4433 validate_user_store_blank_slate(&ctx)?;
4434 let _ = start;
4435 }
4436 StoreKind::Repo => {
4437 let start = Instant::now();
4438 validate_repo_store_dogfood(store, &ctx, decapod_dir)?;
4439 let _ = start;
4440 }
4441 }
4442
4443 let timings: Mutex<Vec<(&str, Duration)>> = Mutex::new(Vec::new());
4445 {
4446 let _s = ();
4447 let ctx = &ctx;
4448 let timings = &timings;
4449 let broker = broker_content.as_deref();
4450
4451 gate!(
4452 s,
4453 timings,
4454 ctx,
4455 "validate_repo_map",
4456 validate_repo_map(ctx, decapod_dir)
4457 );
4458 gate!(
4459 s,
4460 timings,
4461 ctx,
4462 "validate_no_legacy_namespaces",
4463 validate_no_legacy_namespaces(ctx, decapod_dir)
4464 );
4465 gate!(
4466 s,
4467 timings,
4468 ctx,
4469 "validate_embedded_self_contained",
4470 validate_embedded_self_contained(ctx, decapod_dir)
4471 );
4472 gate!(
4473 s,
4474 timings,
4475 ctx,
4476 "validate_docs_templates_bucket",
4477 validate_docs_templates_bucket(ctx, decapod_dir)
4478 );
4479 gate!(
4480 s,
4481 timings,
4482 ctx,
4483 "validate_entrypoint_invariants",
4484 validate_entrypoint_invariants(ctx, decapod_dir)
4485 );
4486 gate!(
4487 s,
4488 timings,
4489 ctx,
4490 "validate_interface_contract_bootstrap",
4491 validate_interface_contract_bootstrap(ctx, decapod_dir)
4492 );
4493 gate!(
4494 s,
4495 timings,
4496 ctx,
4497 "validate_health_purity",
4498 validate_health_purity(ctx, decapod_dir)
4499 );
4500 gate!(
4501 s,
4502 timings,
4503 ctx,
4504 "validate_project_scoped_state",
4505 validate_project_scoped_state(store, ctx, decapod_dir)
4506 );
4507 gate!(
4508 s,
4509 timings,
4510 ctx,
4511 "validate_generated_artifact_whitelist",
4512 validate_generated_artifact_whitelist(store, ctx, decapod_dir)
4513 );
4514 gate!(
4515 s,
4516 timings,
4517 ctx,
4518 "validate_project_config_toml",
4519 validate_project_config_toml(ctx, decapod_dir)
4520 );
4521 gate!(
4522 s,
4523 timings,
4524 ctx,
4525 "validate_project_specs_docs",
4526 validate_project_specs_docs(ctx, decapod_dir)
4527 );
4528 gate!(
4529 s,
4530 timings,
4531 ctx,
4532 "validate_spec_drift",
4533 validate_spec_drift(ctx, decapod_dir)
4534 );
4535 gate!(
4536 s,
4537 timings,
4538 ctx,
4539 "validate_machine_contract",
4540 validate_machine_contract(ctx, decapod_dir)
4541 );
4542 gate!(
4543 s,
4544 timings,
4545 ctx,
4546 "validate_workunit_manifests_if_present",
4547 validate_workunit_manifests_if_present(ctx, decapod_dir)
4548 );
4549 gate!(
4550 s,
4551 timings,
4552 ctx,
4553 "validate_context_capsule_policy_contract",
4554 validate_context_capsule_policy_contract(ctx, decapod_dir)
4555 );
4556 gate!(
4557 s,
4558 timings,
4559 ctx,
4560 "validate_context_capsules_if_present",
4561 validate_context_capsules_if_present(ctx, decapod_dir)
4562 );
4563 gate!(
4564 s,
4565 timings,
4566 ctx,
4567 "validate_knowledge_promotions_if_present",
4568 validate_knowledge_promotions_if_present(ctx, decapod_dir)
4569 );
4570 gate!(
4571 s,
4572 timings,
4573 ctx,
4574 "validate_skill_cards_if_present",
4575 validate_skill_cards_if_present(ctx, decapod_dir)
4576 );
4577 gate!(
4578 s,
4579 timings,
4580 ctx,
4581 "validate_skill_resolutions_if_present",
4582 validate_skill_resolutions_if_present(ctx, decapod_dir)
4583 );
4584 gate!(
4585 s,
4586 timings,
4587 ctx,
4588 "validate_internalization_artifacts_if_present",
4589 validate_internalization_artifacts_if_present(ctx, decapod_dir)
4590 );
4591 gate!(
4592 s,
4593 timings,
4594 ctx,
4595 "validate_eval_gate_if_required",
4596 validate_eval_gate_if_required(store, ctx)
4597 );
4598 gate!(
4599 s,
4600 timings,
4601 ctx,
4602 "validate_schema_determinism",
4603 validate_schema_determinism(ctx, decapod_dir)
4604 );
4605 gate!(
4606 s,
4607 timings,
4608 ctx,
4609 "validate_database_schema_versions",
4610 validate_database_schema_versions(store, ctx)
4611 );
4612 gate!(
4613 s,
4614 timings,
4615 ctx,
4616 "validate_health_cache_integrity",
4617 validate_health_cache_integrity(store, ctx)
4618 );
4619 gate!(
4620 s,
4621 timings,
4622 ctx,
4623 "validate_risk_map",
4624 validate_risk_map(store, ctx)
4625 );
4626 gate!(
4627 s,
4628 timings,
4629 ctx,
4630 "validate_risk_map_violations",
4631 validate_risk_map_violations(store, ctx, broker)
4632 );
4633 gate!(
4634 s,
4635 timings,
4636 ctx,
4637 "validate_policy_integrity",
4638 validate_policy_integrity(store, ctx, broker)
4639 );
4640 gate!(
4641 s,
4642 timings,
4643 ctx,
4644 "validate_knowledge_integrity",
4645 validate_knowledge_integrity(store, ctx, broker)
4646 );
4647 gate!(
4648 s,
4649 timings,
4650 ctx,
4651 "validate_lineage_hard_gate",
4652 validate_lineage_hard_gate(store, ctx)
4653 );
4654 gate!(
4655 s,
4656 timings,
4657 ctx,
4658 "validate_repomap_determinism",
4659 validate_repomap_determinism(ctx, decapod_dir)
4660 );
4661 gate!(
4662 s,
4663 timings,
4664 ctx,
4665 "validate_watcher_audit",
4666 validate_watcher_audit(store, ctx)
4667 );
4668 gate!(
4669 s,
4670 timings,
4671 ctx,
4672 "validate_watcher_purity",
4673 validate_watcher_purity(store, ctx, broker)
4674 );
4675 gate!(
4676 s,
4677 timings,
4678 ctx,
4679 "validate_archive_integrity",
4680 validate_archive_integrity(store, ctx)
4681 );
4682 gate!(
4683 s,
4684 timings,
4685 ctx,
4686 "validate_control_plane_contract",
4687 validate_control_plane_contract(store, ctx)
4688 );
4689 gate!(
4690 s,
4691 timings,
4692 ctx,
4693 "validate_canon_mutation",
4694 validate_canon_mutation(store, ctx, broker)
4695 );
4696 gate!(
4697 s,
4698 timings,
4699 ctx,
4700 "validate_heartbeat_invocation_gate",
4701 validate_heartbeat_invocation_gate(ctx, decapod_dir)
4702 );
4703 gate!(
4704 s,
4705 timings,
4706 ctx,
4707 "validate_markdown_primitives_roundtrip_gate",
4708 validate_markdown_primitives_roundtrip_gate(store, ctx)
4709 );
4710 gate!(
4711 s,
4712 timings,
4713 ctx,
4714 "validate_federation_gates",
4715 validate_federation_gates(store, ctx)
4716 );
4717 gate!(
4718 s,
4719 timings,
4720 ctx,
4721 "validate_git_workspace_context",
4722 validate_git_workspace_context(ctx, decapod_dir)
4723 );
4724 gate!(
4725 s,
4726 timings,
4727 ctx,
4728 "validate_git_protected_branch",
4729 validate_git_protected_branch(ctx, decapod_dir)
4730 );
4731 gate!(
4732 s,
4733 timings,
4734 ctx,
4735 "validate_tooling_gate",
4736 validate_tooling_gate(ctx, decapod_dir)
4737 );
4738 gate!(
4739 s,
4740 timings,
4741 ctx,
4742 "validate_state_commit_gate",
4743 validate_state_commit_gate(ctx, decapod_dir)
4744 );
4745 gate!(
4746 s,
4747 timings,
4748 ctx,
4749 "validate_obligations",
4750 validate_obligations(store, ctx)
4751 );
4752
4753 gate!(
4754 s,
4755 timings,
4756 ctx,
4757 "validate_gatekeeper_gate",
4758 validate_gatekeeper_gate(ctx, decapod_dir)
4759 );
4760 gate!(
4761 s,
4762 timings,
4763 ctx,
4764 "validate_coplayer_policy_tightening",
4765 validate_coplayer_policy_tightening(ctx, decapod_dir)
4766 );
4767 gate!(
4768 s,
4769 timings,
4770 ctx,
4771 "validate_lcm_immutability",
4772 validate_lcm_immutability(store, ctx)
4773 );
4774 gate!(
4775 s,
4776 timings,
4777 ctx,
4778 "validate_lcm_rebuild_gate",
4779 validate_lcm_rebuild_gate(store, ctx)
4780 );
4781 gate!(
4782 s,
4783 timings,
4784 ctx,
4785 "validate_plan_governed_execution_gate",
4786 validate_plan_governed_execution_gate(store, ctx, decapod_dir)
4787 );
4788 }
4789
4790 let elapsed = total_start.elapsed();
4791 let pass_count = ctx.pass_count.load(Ordering::Relaxed);
4792 let fail_count = ctx.fail_count.load(Ordering::Relaxed);
4793 let warn_count = ctx.warn_count.load(Ordering::Relaxed);
4794 let fails = ctx.fails.lock().unwrap().clone();
4795 let warns = ctx.warns.lock().unwrap().clone();
4796 let fail_total = (fails.len() as u32).max(fail_count);
4797 let warn_total = (warns.len() as u32).max(warn_count);
4798 let mut gate_timings = timings.into_inner().unwrap();
4799 gate_timings.sort_by(|a, b| b.1.cmp(&a.1));
4800
4801 Ok(ValidationReport {
4802 status: if fail_total > 0 { "fail" } else { "ok" }.to_string(),
4803 elapsed_ms: elapsed.as_millis() as u64,
4804 pass_count,
4805 fail_count: fail_total,
4806 warn_count: warn_total,
4807 failures: fails,
4808 warnings: warns,
4809 gate_timings: gate_timings
4810 .into_iter()
4811 .map(|(name, elapsed)| ValidationGateTiming {
4812 name: name.to_string(),
4813 elapsed_ms: elapsed.as_millis() as u64,
4814 })
4815 .collect(),
4816 })
4817}
4818
4819pub fn render_validation_report(report: &ValidationReport, verbose: bool) {
4820 use crate::core::ansi::AnsiExt;
4821
4822 let intent_content = crate::core::assets::get_doc("specs/INTENT.md").unwrap_or_default();
4823 let intent_version =
4824 extract_md_version(&intent_content).unwrap_or_else(|| "unknown".to_string());
4825
4826 println!(
4827 "{} {}",
4828 "â–¶".bright_green().bold(),
4829 "validate".bright_cyan().bold()
4830 );
4831 println!(
4832 " {} intent_version={}",
4833 "spec".bright_cyan(),
4834 intent_version.bright_white()
4835 );
4836 println!(
4837 " {} {}",
4838 "gate".bright_magenta().bold(),
4839 "Four Invariants Gate".bright_white()
4840 );
4841
4842 if verbose {
4843 println!(
4844 " {} {}",
4845 "gates".bright_magenta().bold(),
4846 "timings".bright_white()
4847 );
4848 for gate in &report.gate_timings {
4849 println!(
4850 " {} [{}] {}ms",
4851 "✓".bright_green(),
4852 gate.name.bright_cyan(),
4853 gate.elapsed_ms
4854 );
4855 }
4856 }
4857
4858 println!(
4859 " {} pass={} fail={} warn={} ({:.2}s)",
4860 "summary".bright_cyan().bold(),
4861 report.pass_count.to_string().bright_green(),
4862 report.fail_count.to_string().bright_red(),
4863 report.warn_count.to_string().bright_yellow(),
4864 report.elapsed_ms as f64 / 1000.0
4865 );
4866
4867 if !report.failures.is_empty() {
4868 println!(
4869 " {} {}",
4870 "failures".bright_red().bold(),
4871 output::preview_messages(&report.failures, 3, 120)
4872 );
4873 }
4874
4875 if !report.warnings.is_empty() {
4876 println!(
4877 " {} {}",
4878 "warnings".bright_yellow().bold(),
4879 output::preview_messages(&report.warnings, 3, 120)
4880 );
4881 }
4882
4883 if report.fail_count == 0 {
4884 println!(
4885 "{} {}",
4886 "✓".bright_green().bold(),
4887 "validation passed".bright_green().bold()
4888 );
4889 }
4890}