Skip to main content

hivemind/core/
registry.rs

1//! Project registry for managing projects via events.
2//!
3//! The registry derives project state from events and provides
4//! operations that emit new events.
5
6use crate::core::diff::{unified_diff, Baseline, ChangeType, Diff, FileChange};
7use crate::core::enforcement::{ScopeEnforcer, VerificationResult};
8use crate::core::error::{ErrorCategory, HivemindError, Result};
9use crate::core::events::{CorrelationIds, Event, EventPayload, RuntimeOutputStream};
10use crate::core::flow::{FlowState, RetryMode, TaskExecState, TaskFlow};
11use crate::core::graph::{GraphState, GraphTask, RetryPolicy, SuccessCriteria, TaskGraph};
12use crate::core::runtime_event_projection::{ProjectedRuntimeObservation, RuntimeEventProjector};
13use crate::core::scope::{check_compatibility, RepoAccessMode, Scope, ScopeCompatibility};
14use crate::core::state::{
15    AppState, AttemptCheckpointState, AttemptState, Project, ProjectRuntimeConfig, Task, TaskState,
16};
17use crate::core::worktree::{WorktreeConfig, WorktreeError, WorktreeManager, WorktreeStatus};
18use crate::storage::event_store::{EventFilter, EventStore, IndexedEventStore};
19use chrono::Utc;
20use fs2::FileExt;
21use serde::{Deserialize, Serialize};
22use std::collections::{HashMap, HashSet};
23use std::env;
24use std::fmt::Write as _;
25use std::fs;
26use std::fs::OpenOptions;
27use std::io::{Read, Write};
28use std::path::{Path, PathBuf};
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::{Duration, Instant};
32use uuid::Uuid;
33
34use crate::adapters::claude_code::{ClaudeCodeAdapter, ClaudeCodeConfig};
35use crate::adapters::codex::{CodexAdapter, CodexConfig};
36use crate::adapters::kilo::{KiloAdapter, KiloConfig};
37use crate::adapters::opencode::OpenCodeConfig;
38use crate::adapters::runtime::{
39    AttemptSummary, ExecutionInput, InteractiveAdapterEvent, InteractiveExecutionResult,
40    RuntimeAdapter, RuntimeError,
41};
42use crate::adapters::{runtime_descriptors, SUPPORTED_ADAPTERS};
43
44/// Configuration for the registry.
45#[derive(Debug, Clone)]
46pub struct RegistryConfig {
47    /// Base directory for hivemind data.
48    pub data_dir: PathBuf,
49}
50
51impl RegistryConfig {
52    /// Creates a new config with default data directory.
53    #[must_use]
54    pub fn default_dir() -> Self {
55        if let Ok(data_dir) = env::var("HIVEMIND_DATA_DIR") {
56            return Self {
57                data_dir: PathBuf::from(data_dir),
58            };
59        }
60
61        let data_dir =
62            dirs::home_dir().map_or_else(|| PathBuf::from(".hivemind"), |h| h.join(".hivemind"));
63        Self { data_dir }
64    }
65
66    /// Creates a config with custom data directory.
67    #[must_use]
68    pub fn with_dir(data_dir: PathBuf) -> Self {
69        Self { data_dir }
70    }
71
72    /// Returns the path to the global events file.
73    #[must_use]
74    pub fn events_path(&self) -> PathBuf {
75        self.data_dir.join("events.jsonl")
76    }
77}
78
79/// The project registry manages projects via event sourcing.
80pub struct Registry {
81    store: Arc<dyn EventStore>,
82    config: RegistryConfig,
83}
84
85#[derive(Debug, Clone, Serialize, Deserialize)]
86pub struct RuntimeListEntry {
87    pub adapter_name: String,
88    pub default_binary: String,
89    pub available: bool,
90    pub opencode_compatible: bool,
91}
92
93#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct RuntimeHealthStatus {
95    pub adapter_name: String,
96    pub binary_path: String,
97    pub healthy: bool,
98    #[serde(default)]
99    pub target: Option<String>,
100    #[serde(default)]
101    pub details: Option<String>,
102}
103
104#[derive(Debug, Clone, Serialize, Deserialize)]
105struct DiffArtifact {
106    diff: Diff,
107    unified: String,
108}
109
110struct CompletionArtifacts<'a> {
111    baseline_id: Uuid,
112    artifact: &'a DiffArtifact,
113    checkpoint_commit_sha: Option<String>,
114}
115
116struct CheckpointCommitSpec<'a> {
117    flow_id: Uuid,
118    task_id: Uuid,
119    attempt_id: Uuid,
120    checkpoint_id: &'a str,
121    order: u32,
122    total: u32,
123    summary: Option<&'a str>,
124}
125
126#[derive(Debug, Clone, Serialize)]
127pub struct GraphValidationResult {
128    pub graph_id: Uuid,
129    pub valid: bool,
130    pub issues: Vec<String>,
131}
132
133#[derive(Debug, Clone, Serialize)]
134pub struct CheckpointCompletionResult {
135    pub flow_id: Uuid,
136    pub task_id: Uuid,
137    pub attempt_id: Uuid,
138    pub checkpoint_id: String,
139    pub order: u32,
140    pub total: u32,
141    #[serde(default)]
142    pub next_checkpoint_id: Option<String>,
143    pub all_completed: bool,
144    pub commit_hash: String,
145}
146
147enum SelectedRuntimeAdapter {
148    OpenCode(crate::adapters::opencode::OpenCodeAdapter),
149    Codex(CodexAdapter),
150    ClaudeCode(ClaudeCodeAdapter),
151    Kilo(KiloAdapter),
152}
153
154impl SelectedRuntimeAdapter {
155    fn initialize(&mut self) -> std::result::Result<(), RuntimeError> {
156        match self {
157            Self::OpenCode(a) => a.initialize(),
158            Self::Codex(a) => a.initialize(),
159            Self::ClaudeCode(a) => a.initialize(),
160            Self::Kilo(a) => a.initialize(),
161        }
162    }
163
164    fn prepare(
165        &mut self,
166        task_id: Uuid,
167        worktree: &std::path::Path,
168    ) -> std::result::Result<(), RuntimeError> {
169        match self {
170            Self::OpenCode(a) => a.prepare(task_id, worktree),
171            Self::Codex(a) => a.prepare(task_id, worktree),
172            Self::ClaudeCode(a) => a.prepare(task_id, worktree),
173            Self::Kilo(a) => a.prepare(task_id, worktree),
174        }
175    }
176
177    fn execute(
178        &mut self,
179        input: ExecutionInput,
180    ) -> std::result::Result<crate::adapters::runtime::ExecutionReport, RuntimeError> {
181        match self {
182            Self::OpenCode(a) => a.execute(input),
183            Self::Codex(a) => a.execute(input),
184            Self::ClaudeCode(a) => a.execute(input),
185            Self::Kilo(a) => a.execute(input),
186        }
187    }
188
189    fn execute_interactive<F>(
190        &mut self,
191        input: &ExecutionInput,
192        on_event: F,
193    ) -> std::result::Result<InteractiveExecutionResult, RuntimeError>
194    where
195        F: FnMut(InteractiveAdapterEvent) -> std::result::Result<(), String>,
196    {
197        match self {
198            Self::OpenCode(a) => a.execute_interactive(input, on_event),
199            Self::Codex(a) => a.execute_interactive(input, on_event),
200            Self::ClaudeCode(a) => a.execute_interactive(input, on_event),
201            Self::Kilo(a) => a.execute_interactive(input, on_event),
202        }
203    }
204}
205
206impl Registry {
207    fn format_checkpoint_commit_message(spec: &CheckpointCommitSpec<'_>) -> String {
208        let mut message = String::new();
209        let _ = writeln!(message, "hivemind(checkpoint): {}", spec.checkpoint_id);
210        let _ = writeln!(message);
211        let _ = writeln!(message, "Flow: {}", spec.flow_id);
212        let _ = writeln!(message, "Task: {}", spec.task_id);
213        let _ = writeln!(message, "Attempt: {}", spec.attempt_id);
214        let _ = writeln!(message, "Checkpoint: {}/{}", spec.order, spec.total);
215        let _ = writeln!(message, "Schema: checkpoint-v1");
216        if let Some(summary) = spec.summary {
217            let _ = writeln!(message);
218            let _ = writeln!(message, "Summary:");
219            let _ = writeln!(message, "{summary}");
220        }
221        let _ = writeln!(message);
222        let _ = writeln!(message, "---");
223        let _ = writeln!(
224            message,
225            "Generated-by: Hivemind {}",
226            env!("CARGO_PKG_VERSION")
227        );
228        message
229    }
230
231    fn create_checkpoint_commit(
232        worktree_path: &Path,
233        spec: &CheckpointCommitSpec<'_>,
234        origin: &'static str,
235    ) -> Result<String> {
236        let add = std::process::Command::new("git")
237            .current_dir(worktree_path)
238            .args(["add", "-A"])
239            .output()
240            .map_err(|e| HivemindError::git("git_add_failed", e.to_string(), origin))?;
241        if !add.status.success() {
242            return Err(HivemindError::git(
243                "git_add_failed",
244                String::from_utf8_lossy(&add.stderr).to_string(),
245                origin,
246            ));
247        }
248
249        let message = Self::format_checkpoint_commit_message(spec);
250        let commit = std::process::Command::new("git")
251            .current_dir(worktree_path)
252            .args([
253                "-c",
254                "user.name=Hivemind",
255                "-c",
256                "user.email=hivemind@example.com",
257                "commit",
258                "--allow-empty",
259                "-m",
260                &message,
261            ])
262            .output()
263            .map_err(|e| HivemindError::git("git_commit_failed", e.to_string(), origin))?;
264        if !commit.status.success() {
265            return Err(HivemindError::git(
266                "git_commit_failed",
267                String::from_utf8_lossy(&commit.stderr).to_string(),
268                origin,
269            ));
270        }
271
272        let head = std::process::Command::new("git")
273            .current_dir(worktree_path)
274            .args(["rev-parse", "HEAD"])
275            .output()
276            .map_err(|e| HivemindError::git("git_rev_parse_failed", e.to_string(), origin))?;
277        if !head.status.success() {
278            return Err(HivemindError::git(
279                "git_rev_parse_failed",
280                String::from_utf8_lossy(&head.stderr).to_string(),
281                origin,
282            ));
283        }
284        Ok(String::from_utf8_lossy(&head.stdout).trim().to_string())
285    }
286
287    fn checkout_and_clean_worktree(
288        worktree_path: &Path,
289        branch: &str,
290        base: &str,
291        origin: &'static str,
292    ) -> Result<()> {
293        let checkout = std::process::Command::new("git")
294            .current_dir(worktree_path)
295            .args(["checkout", "-f", "-B", branch, base])
296            .output();
297        if !checkout.as_ref().is_ok_and(|o| o.status.success()) {
298            return Err(HivemindError::git(
299                "git_checkout_failed",
300                checkout.map_or_else(
301                    |e| e.to_string(),
302                    |o| String::from_utf8_lossy(&o.stderr).to_string(),
303                ),
304                origin,
305            ));
306        }
307
308        let clean = std::process::Command::new("git")
309            .current_dir(worktree_path)
310            .args(["clean", "-fdx"])
311            .output();
312        if !clean.as_ref().is_ok_and(|o| o.status.success()) {
313            return Err(HivemindError::git(
314                "git_clean_failed",
315                clean.map_or_else(
316                    |e| e.to_string(),
317                    |o| String::from_utf8_lossy(&o.stderr).to_string(),
318                ),
319                origin,
320            ));
321        }
322
323        Ok(())
324    }
325
326    fn append_event(&self, event: Event, origin: &'static str) -> Result<()> {
327        self.store
328            .append(event)
329            .map(|_| ())
330            .map_err(|e| HivemindError::system("event_append_failed", e.to_string(), origin))
331    }
332
333    fn projected_runtime_event_payload(
334        attempt_id: Uuid,
335        observation: ProjectedRuntimeObservation,
336    ) -> EventPayload {
337        match observation {
338            ProjectedRuntimeObservation::CommandObserved { stream, command } => {
339                EventPayload::RuntimeCommandObserved {
340                    attempt_id,
341                    stream,
342                    command,
343                }
344            }
345            ProjectedRuntimeObservation::ToolCallObserved {
346                stream,
347                tool_name,
348                details,
349            } => EventPayload::RuntimeToolCallObserved {
350                attempt_id,
351                stream,
352                tool_name,
353                details,
354            },
355            ProjectedRuntimeObservation::TodoSnapshotUpdated { stream, items } => {
356                EventPayload::RuntimeTodoSnapshotUpdated {
357                    attempt_id,
358                    stream,
359                    items,
360                }
361            }
362            ProjectedRuntimeObservation::NarrativeOutputObserved { stream, content } => {
363                EventPayload::RuntimeNarrativeOutputObserved {
364                    attempt_id,
365                    stream,
366                    content,
367                }
368            }
369        }
370    }
371
372    fn append_projected_runtime_observations(
373        &self,
374        attempt_id: Uuid,
375        correlation: &CorrelationIds,
376        observations: Vec<ProjectedRuntimeObservation>,
377        origin: &'static str,
378    ) -> Result<()> {
379        for observation in observations {
380            self.append_event(
381                Event::new(
382                    Self::projected_runtime_event_payload(attempt_id, observation),
383                    correlation.clone(),
384                ),
385                origin,
386            )?;
387        }
388        Ok(())
389    }
390
391    fn acquire_flow_integration_lock(
392        &self,
393        flow_id: Uuid,
394        origin: &'static str,
395    ) -> Result<std::fs::File> {
396        let lock_dir = self.config.data_dir.join("locks");
397        fs::create_dir_all(&lock_dir)
398            .map_err(|e| HivemindError::system("create_dir_failed", e.to_string(), origin))?;
399
400        let lock_path = lock_dir.join(format!("flow_integration_{flow_id}.lock"));
401        let file = OpenOptions::new()
402            .create(true)
403            .read(true)
404            .write(true)
405            .truncate(false)
406            .open(&lock_path)
407            .map_err(|e| HivemindError::system("lock_open_failed", e.to_string(), origin))?;
408
409        file.try_lock_exclusive().map_err(|e| {
410            HivemindError::user(
411                "integration_in_progress",
412                "Another integration operation is already in progress for this flow",
413                origin,
414            )
415            .with_context("flow_id", flow_id.to_string())
416            .with_context("lock_error", e.to_string())
417        })?;
418
419        Ok(file)
420    }
421
422    fn resolve_git_ref(repo_path: &Path, reference: &str) -> Option<String> {
423        let output = std::process::Command::new("git")
424            .current_dir(repo_path)
425            .args(["rev-parse", reference])
426            .output()
427            .ok()?;
428        if !output.status.success() {
429            return None;
430        }
431
432        let sha = String::from_utf8_lossy(&output.stdout).trim().to_string();
433        if sha.is_empty() {
434            return None;
435        }
436        Some(sha)
437    }
438
439    fn resolve_task_frozen_commit_sha(
440        flow: &TaskFlow,
441        state: &AppState,
442        task_id: Uuid,
443    ) -> Option<String> {
444        let manager = Self::worktree_manager_for_flow(flow, state).ok()?;
445        let branch_ref = format!("refs/heads/exec/{}/{task_id}", flow.id);
446        Self::resolve_git_ref(manager.repo_path(), &branch_ref).or_else(|| {
447            let status = manager.inspect(flow.id, task_id).ok()?;
448            if !status.is_worktree {
449                return None;
450            }
451            status
452                .head_commit
453                .or_else(|| Self::resolve_git_ref(&status.path, "HEAD"))
454        })
455    }
456
457    pub fn list_graphs(&self, project_id_or_name: Option<&str>) -> Result<Vec<TaskGraph>> {
458        let project_filter = match project_id_or_name {
459            Some(id_or_name) => Some(self.get_project(id_or_name)?.id),
460            None => None,
461        };
462
463        let state = self.state()?;
464        let mut graphs: Vec<_> = state
465            .graphs
466            .into_values()
467            .filter(|g| project_filter.is_none_or(|pid| g.project_id == pid))
468            .collect();
469        graphs.sort_by(|a, b| a.updated_at.cmp(&b.updated_at));
470        graphs.reverse();
471        Ok(graphs)
472    }
473
474    fn emit_task_execution_frozen(
475        &self,
476        flow: &TaskFlow,
477        task_id: Uuid,
478        commit_sha: Option<String>,
479        origin: &'static str,
480    ) -> Result<()> {
481        self.append_event(
482            Event::new(
483                EventPayload::TaskExecutionFrozen {
484                    flow_id: flow.id,
485                    task_id,
486                    commit_sha,
487                },
488                CorrelationIds::for_graph_flow_task(
489                    flow.project_id,
490                    flow.graph_id,
491                    flow.id,
492                    task_id,
493                ),
494            ),
495            origin,
496        )
497    }
498
499    fn emit_integration_lock_acquired(
500        &self,
501        flow: &TaskFlow,
502        operation: &str,
503        origin: &'static str,
504    ) -> Result<()> {
505        self.append_event(
506            Event::new(
507                EventPayload::FlowIntegrationLockAcquired {
508                    flow_id: flow.id,
509                    operation: operation.to_string(),
510                },
511                CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
512            ),
513            origin,
514        )
515    }
516
517    fn emit_merge_conflict(
518        &self,
519        flow: &TaskFlow,
520        task_id: Option<Uuid>,
521        details: String,
522        origin: &'static str,
523    ) -> Result<()> {
524        self.append_event(
525            Event::new(
526                EventPayload::MergeConflictDetected {
527                    flow_id: flow.id,
528                    task_id,
529                    details,
530                },
531                CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
532            ),
533            origin,
534        )
535    }
536
537    fn attempt_runtime_outcome(&self, attempt_id: Uuid) -> Result<(Option<i32>, Option<String>)> {
538        let filter = EventFilter {
539            attempt_id: Some(attempt_id),
540            ..EventFilter::default()
541        };
542        let events = self.read_events(&filter)?;
543
544        let mut exit_code: Option<i32> = None;
545        let mut terminated: Option<String> = None;
546        for ev in events {
547            match ev.payload {
548                EventPayload::RuntimeExited { exit_code: ec, .. } => {
549                    exit_code = Some(ec);
550                }
551                EventPayload::RuntimeTerminated { reason, .. } => {
552                    terminated = Some(reason);
553                }
554                _ => {}
555            }
556        }
557
558        if exit_code.is_none() && terminated.is_none() {
559            return Ok((None, None));
560        }
561        Ok((exit_code, terminated))
562    }
563
564    #[allow(
565        clippy::type_complexity,
566        clippy::too_many_lines,
567        clippy::unnecessary_wraps
568    )]
569    fn build_retry_context(
570        &self,
571        state: &AppState,
572        flow: &TaskFlow,
573        task_id: Uuid,
574        attempt_number: u32,
575        max_attempts: u32,
576        _origin: &'static str,
577    ) -> Result<(
578        String,
579        Vec<AttemptSummary>,
580        Vec<Uuid>,
581        Vec<String>,
582        Vec<String>,
583        Option<i32>,
584        Option<String>,
585    )> {
586        let mut attempts: Vec<AttemptState> = state
587            .attempts
588            .values()
589            .filter(|a| a.flow_id == flow.id && a.task_id == task_id)
590            .cloned()
591            .collect();
592        attempts.sort_by_key(|a| a.attempt_number);
593
594        let prior_attempt_ids: Vec<Uuid> = attempts.iter().map(|a| a.id).collect();
595
596        let mut prior_attempts: Vec<AttemptSummary> = Vec::new();
597        for prior in &attempts {
598            let (exit_code, terminated_reason) = self
599                .attempt_runtime_outcome(prior.id)
600                .unwrap_or((None, None));
601
602            let required_failed: Vec<String> = prior
603                .check_results
604                .iter()
605                .filter(|r| r.required && !r.passed)
606                .map(|r| r.name.clone())
607                .collect();
608            let optional_failed: Vec<String> = prior
609                .check_results
610                .iter()
611                .filter(|r| !r.required && !r.passed)
612                .map(|r| r.name.clone())
613                .collect();
614
615            let mut summary = String::new();
616            if let Some(diff_id) = prior.diff_id {
617                if let Ok(artifact) = self.read_diff_artifact(diff_id) {
618                    let _ = write!(summary, "change_count={} ", artifact.diff.change_count());
619                }
620            }
621
622            if required_failed.is_empty() && optional_failed.is_empty() {
623                if let Some(ec) = exit_code {
624                    let _ = write!(summary, "runtime_exit_code={ec} ");
625                }
626                if let Some(ref reason) = terminated_reason {
627                    let _ = write!(summary, "runtime_terminated={reason} ");
628                }
629            }
630
631            if !required_failed.is_empty() {
632                let _ = write!(
633                    summary,
634                    "required_checks_failed={} ",
635                    required_failed.join(", ")
636                );
637            }
638            if !optional_failed.is_empty() {
639                let _ = write!(
640                    summary,
641                    "optional_checks_failed={} ",
642                    optional_failed.join(", ")
643                );
644            }
645
646            if summary.trim().is_empty() {
647                summary = "no recorded outcomes".to_string();
648            }
649
650            let failure_reason = if !required_failed.is_empty() {
651                Some(format!(
652                    "Required checks failed: {}",
653                    required_failed.join(", ")
654                ))
655            } else if !optional_failed.is_empty() {
656                Some(format!(
657                    "Optional checks failed: {}",
658                    optional_failed.join(", ")
659                ))
660            } else if let Some(ec) = exit_code {
661                if ec != 0 {
662                    Some(format!("Runtime exited with code {ec}"))
663                } else {
664                    None
665                }
666            } else if terminated_reason.is_some() {
667                Some("Runtime terminated".to_string())
668            } else {
669                None
670            };
671
672            prior_attempts.push(AttemptSummary {
673                attempt_number: prior.attempt_number,
674                summary: summary.trim().to_string(),
675                failure_reason,
676            });
677        }
678
679        let latest = attempts.last();
680        let mut required_failures: Vec<String> = Vec::new();
681        let mut optional_failures: Vec<String> = Vec::new();
682        let mut exit_code: Option<i32> = None;
683        #[allow(clippy::useless_let_if_seq, clippy::option_if_let_else)]
684        let terminated_reason = if let Some(last) = latest {
685            required_failures = last
686                .check_results
687                .iter()
688                .filter(|r| r.required && !r.passed)
689                .map(|r| r.name.clone())
690                .collect();
691            optional_failures = last
692                .check_results
693                .iter()
694                .filter(|r| !r.required && !r.passed)
695                .map(|r| r.name.clone())
696                .collect();
697
698            let (ec, term) = self
699                .attempt_runtime_outcome(last.id)
700                .unwrap_or((None, None));
701            exit_code = ec;
702            term
703        } else {
704            None
705        };
706
707        #[allow(clippy::items_after_statements)]
708        fn truncate(s: &str, max_len: usize) -> String {
709            if s.len() <= max_len {
710                return s.to_string();
711            }
712            s.chars().take(max_len).collect()
713        }
714
715        let mut ctx = String::new();
716        let _ = writeln!(
717            ctx,
718            "Retry attempt {attempt_number}/{max_attempts} for task {task_id}"
719        );
720
721        if let Some(last) = latest {
722            let _ = writeln!(ctx, "Previous attempt: {}", last.id);
723
724            if !required_failures.is_empty() {
725                let _ = writeln!(
726                    ctx,
727                    "Required check failures (must fix): {}",
728                    required_failures.join(", ")
729                );
730                for r in last
731                    .check_results
732                    .iter()
733                    .filter(|r| r.required && !r.passed)
734                {
735                    let _ = writeln!(ctx, "--- Check: {}", r.name);
736                    let _ = writeln!(ctx, "exit_code={}", r.exit_code);
737                    let _ = writeln!(ctx, "output:\n{}", truncate(&r.output, 2000));
738                }
739            }
740
741            if !optional_failures.is_empty() {
742                let _ = writeln!(
743                    ctx,
744                    "Optional check failures: {}",
745                    optional_failures.join(", ")
746                );
747            }
748
749            if let Some(ec) = exit_code {
750                let _ = writeln!(ctx, "Runtime exit code: {ec}");
751            }
752            if let Some(ref reason) = terminated_reason {
753                let _ = writeln!(ctx, "Runtime terminated: {reason}");
754            }
755
756            if let Some(diff_id) = last.diff_id {
757                if let Ok(artifact) = self.read_diff_artifact(diff_id) {
758                    let created: Vec<String> = artifact
759                        .diff
760                        .changes
761                        .iter()
762                        .filter(|c| c.change_type == ChangeType::Created)
763                        .map(|c| c.path.to_string_lossy().to_string())
764                        .collect();
765                    let modified: Vec<String> = artifact
766                        .diff
767                        .changes
768                        .iter()
769                        .filter(|c| c.change_type == ChangeType::Modified)
770                        .map(|c| c.path.to_string_lossy().to_string())
771                        .collect();
772                    let deleted: Vec<String> = artifact
773                        .diff
774                        .changes
775                        .iter()
776                        .filter(|c| c.change_type == ChangeType::Deleted)
777                        .map(|c| c.path.to_string_lossy().to_string())
778                        .collect();
779
780                    let _ = writeln!(
781                        ctx,
782                        "Filesystem changes observed (from diff): change_count={} created={} modified={} deleted={}",
783                        artifact.diff.change_count(),
784                        created.len(),
785                        modified.len(),
786                        deleted.len()
787                    );
788                    if !created.is_empty() {
789                        let _ = writeln!(ctx, "Created:\n{}", created.join("\n"));
790                    }
791                    if !modified.is_empty() {
792                        let _ = writeln!(ctx, "Modified:\n{}", modified.join("\n"));
793                    }
794                    if !deleted.is_empty() {
795                        let _ = writeln!(ctx, "Deleted:\n{}", deleted.join("\n"));
796                    }
797                }
798            }
799        }
800
801        Ok((
802            ctx,
803            prior_attempts,
804            prior_attempt_ids,
805            required_failures,
806            optional_failures,
807            exit_code,
808            terminated_reason,
809        ))
810    }
811
812    fn record_error_event(&self, err: &HivemindError, correlation: CorrelationIds) {
813        let _ = self.store.append(Event::new(
814            EventPayload::ErrorOccurred { error: err.clone() },
815            correlation,
816        ));
817    }
818
819    fn flow_for_task(state: &AppState, task_id: Uuid, origin: &'static str) -> Result<TaskFlow> {
820        state
821            .flows
822            .values()
823            .filter(|f| f.task_executions.contains_key(&task_id))
824            .max_by_key(|f| (f.updated_at, f.id))
825            .cloned()
826            .ok_or_else(|| {
827                HivemindError::user("task_not_in_flow", "Task is not part of any flow", origin)
828            })
829    }
830
831    fn inspect_task_worktree(
832        flow: &TaskFlow,
833        state: &AppState,
834        task_id: Uuid,
835        origin: &'static str,
836    ) -> Result<WorktreeStatus> {
837        let manager = Self::worktree_manager_for_flow(flow, state)?;
838        let status = manager
839            .inspect(flow.id, task_id)
840            .map_err(|e| Self::worktree_error_to_hivemind(e, origin))?;
841        if !status.is_worktree {
842            return Err(HivemindError::user(
843                "worktree_not_found",
844                "Worktree not found for task",
845                origin,
846            ));
847        }
848        Ok(status)
849    }
850
851    fn resolve_latest_attempt_without_diff(
852        state: &AppState,
853        flow_id: Uuid,
854        task_id: Uuid,
855        origin: &'static str,
856    ) -> Result<AttemptState> {
857        state
858            .attempts
859            .values()
860            .filter(|a| a.flow_id == flow_id && a.task_id == task_id)
861            .filter(|a| a.diff_id.is_none())
862            .max_by_key(|a| a.started_at)
863            .cloned()
864            .ok_or_else(|| {
865                HivemindError::system(
866                    "attempt_not_found",
867                    "Attempt not found for running task",
868                    origin,
869                )
870            })
871    }
872
873    fn normalized_checkpoint_ids(raw: &[String]) -> Vec<String> {
874        let mut ids = Vec::new();
875        let mut seen = HashSet::new();
876
877        for candidate in raw {
878            let trimmed = candidate.trim();
879            if trimmed.is_empty() {
880                continue;
881            }
882            if seen.insert(trimmed.to_string()) {
883                ids.push(trimmed.to_string());
884            }
885        }
886
887        if ids.is_empty() {
888            ids.push("checkpoint-1".to_string());
889        }
890
891        ids
892    }
893
894    fn checkpoint_order(checkpoint_ids: &[String], checkpoint_id: &str) -> Option<(u32, u32)> {
895        let idx = checkpoint_ids.iter().position(|id| id == checkpoint_id)?;
896        let order = u32::try_from(idx.saturating_add(1)).ok()?;
897        let total = u32::try_from(checkpoint_ids.len()).ok()?;
898        Some((order, total))
899    }
900
901    fn fail_running_attempt(
902        &self,
903        flow: &TaskFlow,
904        task_id: Uuid,
905        attempt_id: Uuid,
906        reason: &str,
907        origin: &'static str,
908    ) -> Result<()> {
909        let corr_task =
910            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, task_id);
911        let corr_attempt = CorrelationIds::for_graph_flow_task_attempt(
912            flow.project_id,
913            flow.graph_id,
914            flow.id,
915            task_id,
916            attempt_id,
917        );
918
919        self.append_event(
920            Event::new(
921                EventPayload::TaskExecutionStateChanged {
922                    flow_id: flow.id,
923                    task_id,
924                    from: TaskExecState::Running,
925                    to: TaskExecState::Failed,
926                },
927                corr_task,
928            ),
929            origin,
930        )?;
931
932        self.append_event(
933            Event::new(
934                EventPayload::TaskExecutionFailed {
935                    flow_id: flow.id,
936                    task_id,
937                    attempt_id: Some(attempt_id),
938                    reason: Some(reason.to_string()),
939                },
940                corr_attempt,
941            ),
942            origin,
943        )
944    }
945
946    fn resolve_latest_attempt_with_diff(
947        state: &AppState,
948        flow_id: Uuid,
949        task_id: Uuid,
950        origin: &'static str,
951    ) -> Result<AttemptState> {
952        state
953            .attempts
954            .values()
955            .filter(|a| a.flow_id == flow_id && a.task_id == task_id)
956            .filter(|a| a.diff_id.is_some())
957            .max_by_key(|a| a.started_at)
958            .cloned()
959            .ok_or_else(|| {
960                HivemindError::system(
961                    "attempt_not_found",
962                    "Attempt not found for verifying task",
963                    origin,
964                )
965            })
966    }
967
968    #[allow(clippy::too_many_lines)]
969    fn process_verifying_task(&self, flow_id: &str, task_id: Uuid) -> Result<TaskFlow> {
970        let flow = self.get_flow(flow_id)?;
971        if flow.state != FlowState::Running {
972            return Ok(flow);
973        }
974
975        let state = self.state()?;
976        let graph = state.graphs.get(&flow.graph_id).ok_or_else(|| {
977            HivemindError::system("graph_not_found", "Graph not found", "registry:tick_flow")
978        })?;
979
980        let origin = "registry:tick_flow";
981        let exec = flow.task_executions.get(&task_id).ok_or_else(|| {
982            HivemindError::system("task_exec_not_found", "Task execution not found", origin)
983        })?;
984        if exec.state != TaskExecState::Verifying {
985            return Ok(flow);
986        }
987
988        let attempt = Self::resolve_latest_attempt_with_diff(&state, flow.id, task_id, origin)?;
989        let diff_id = attempt.diff_id.ok_or_else(|| {
990            HivemindError::system("diff_not_found", "Diff not found for attempt", origin)
991        })?;
992        let artifact = self.read_diff_artifact(diff_id)?;
993
994        let baseline_id = attempt.baseline_id.ok_or_else(|| {
995            HivemindError::system(
996                "baseline_not_found",
997                "Baseline not found for attempt",
998                origin,
999            )
1000        })?;
1001        let baseline = self.read_baseline_artifact(baseline_id)?;
1002
1003        let worktree_status = Self::inspect_task_worktree(&flow, &state, task_id, origin)?;
1004
1005        let task = graph.tasks.get(&task_id).ok_or_else(|| {
1006            HivemindError::system("task_not_found", "Task not found in graph", origin)
1007        })?;
1008
1009        let mut verification = if let Some(scope) = &task.scope {
1010            let (commits_created, branches_created) =
1011                Self::detect_git_operations(&worktree_status.path, &baseline, attempt.id);
1012
1013            ScopeEnforcer::new(scope.clone()).verify_all(
1014                &artifact.diff,
1015                commits_created,
1016                branches_created,
1017                task_id,
1018                attempt.id,
1019            )
1020        } else {
1021            VerificationResult::pass(task_id, attempt.id)
1022        };
1023
1024        if let Some(scope) = &task.scope {
1025            let repo_violations =
1026                Self::verify_repository_scope(scope, &flow, &state, task_id, origin);
1027            if !repo_violations.is_empty() {
1028                verification.passed = false;
1029                verification.violations.extend(repo_violations);
1030            }
1031        }
1032
1033        let corr_task =
1034            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, task_id);
1035
1036        if !verification.passed {
1037            if let Some(scope) = &task.scope {
1038                self.append_event(
1039                    Event::new(
1040                        EventPayload::ScopeViolationDetected {
1041                            flow_id: flow.id,
1042                            task_id,
1043                            attempt_id: attempt.id,
1044                            verification_id: verification.id,
1045                            verified_at: verification.verified_at,
1046                            scope: scope.clone(),
1047                            violations: verification.violations.clone(),
1048                        },
1049                        CorrelationIds::for_graph_flow_task_attempt(
1050                            flow.project_id,
1051                            flow.graph_id,
1052                            flow.id,
1053                            task_id,
1054                            attempt.id,
1055                        ),
1056                    ),
1057                    origin,
1058                )?;
1059            }
1060
1061            self.append_event(
1062                Event::new(
1063                    EventPayload::TaskExecutionStateChanged {
1064                        flow_id: flow.id,
1065                        task_id,
1066                        from: TaskExecState::Verifying,
1067                        to: TaskExecState::Failed,
1068                    },
1069                    corr_task,
1070                ),
1071                origin,
1072            )?;
1073
1074            self.append_event(
1075                Event::new(
1076                    EventPayload::TaskExecutionFailed {
1077                        flow_id: flow.id,
1078                        task_id,
1079                        attempt_id: Some(attempt.id),
1080                        reason: Some("scope_violation".to_string()),
1081                    },
1082                    CorrelationIds::for_graph_flow_task_attempt(
1083                        flow.project_id,
1084                        flow.graph_id,
1085                        flow.id,
1086                        task_id,
1087                        attempt.id,
1088                    ),
1089                ),
1090                origin,
1091            )?;
1092
1093            let violations = verification
1094                .violations
1095                .iter()
1096                .map(|v| {
1097                    let path = v.path.as_deref().unwrap_or("-");
1098                    format!("{:?}: {path}: {}", v.violation_type, v.description)
1099                })
1100                .collect::<Vec<_>>()
1101                .join("\n");
1102
1103            return Err(HivemindError::scope(
1104                "scope_violation",
1105                format!("Scope violation detected:\n{violations}"),
1106                origin,
1107            )
1108            .with_hint(format!(
1109                "Worktree preserved at {}",
1110                worktree_status.path.display()
1111            )));
1112        }
1113
1114        let corr_attempt = CorrelationIds::for_graph_flow_task_attempt(
1115            flow.project_id,
1116            flow.graph_id,
1117            flow.id,
1118            task_id,
1119            attempt.id,
1120        );
1121
1122        if let Some(scope) = &task.scope {
1123            self.append_event(
1124                Event::new(
1125                    EventPayload::ScopeValidated {
1126                        flow_id: flow.id,
1127                        task_id,
1128                        attempt_id: attempt.id,
1129                        verification_id: verification.id,
1130                        verified_at: verification.verified_at,
1131                        scope: scope.clone(),
1132                    },
1133                    corr_attempt.clone(),
1134                ),
1135                origin,
1136            )?;
1137        }
1138
1139        let target_dir = self
1140            .config
1141            .data_dir
1142            .join("cargo-target")
1143            .join(flow.id.to_string())
1144            .join(task_id.to_string())
1145            .join(attempt.id.to_string())
1146            .join("checks");
1147        let _ = fs::create_dir_all(&target_dir);
1148
1149        let mut results = Vec::new();
1150        for check in &task.criteria.checks {
1151            self.append_event(
1152                Event::new(
1153                    EventPayload::CheckStarted {
1154                        flow_id: flow.id,
1155                        task_id,
1156                        attempt_id: attempt.id,
1157                        check_name: check.name.clone(),
1158                        required: check.required,
1159                    },
1160                    corr_attempt.clone(),
1161                ),
1162                origin,
1163            )?;
1164
1165            let started = Instant::now();
1166            let (exit_code, combined) = match Self::run_check_command(
1167                &worktree_status.path,
1168                &target_dir,
1169                &check.command,
1170                check.timeout_ms,
1171            ) {
1172                Ok((exit_code, output, _timed_out)) => (exit_code, output),
1173                Err(e) => (127, e.to_string()),
1174            };
1175            let duration_ms =
1176                u64::try_from(started.elapsed().as_millis().min(u128::from(u64::MAX)))
1177                    .unwrap_or(u64::MAX);
1178            let passed = exit_code == 0;
1179
1180            self.append_event(
1181                Event::new(
1182                    EventPayload::CheckCompleted {
1183                        flow_id: flow.id,
1184                        task_id,
1185                        attempt_id: attempt.id,
1186                        check_name: check.name.clone(),
1187                        passed,
1188                        exit_code,
1189                        output: combined.clone(),
1190                        duration_ms,
1191                        required: check.required,
1192                    },
1193                    corr_attempt.clone(),
1194                ),
1195                origin,
1196            )?;
1197
1198            results.push((check.name.clone(), check.required, passed));
1199        }
1200
1201        let required_failed = results
1202            .iter()
1203            .any(|(_, required, passed)| *required && !*passed);
1204
1205        if !required_failed {
1206            self.append_event(
1207                Event::new(
1208                    EventPayload::TaskExecutionStateChanged {
1209                        flow_id: flow.id,
1210                        task_id,
1211                        from: TaskExecState::Verifying,
1212                        to: TaskExecState::Success,
1213                    },
1214                    corr_task,
1215                ),
1216                origin,
1217            )?;
1218
1219            self.append_event(
1220                Event::new(
1221                    EventPayload::TaskExecutionSucceeded {
1222                        flow_id: flow.id,
1223                        task_id,
1224                        attempt_id: Some(attempt.id),
1225                    },
1226                    corr_attempt,
1227                ),
1228                origin,
1229            )?;
1230
1231            let frozen_commit_sha = Self::resolve_task_frozen_commit_sha(&flow, &state, task_id);
1232            self.emit_task_execution_frozen(&flow, task_id, frozen_commit_sha, origin)?;
1233
1234            if let Ok(managers) =
1235                Self::worktree_managers_for_flow(&flow, &state, "registry:tick_flow")
1236            {
1237                for (_repo_name, manager) in managers {
1238                    if manager.config().cleanup_on_success {
1239                        if let Ok(status) = manager.inspect(flow.id, task_id) {
1240                            if status.is_worktree {
1241                                let _ = manager.remove(&status.path);
1242                            }
1243                        }
1244                    }
1245                }
1246            }
1247
1248            let updated = self.get_flow(flow_id)?;
1249            let all_success = updated
1250                .task_executions
1251                .values()
1252                .all(|e| e.state == TaskExecState::Success);
1253            if all_success {
1254                let event = Event::new(
1255                    EventPayload::TaskFlowCompleted {
1256                        flow_id: updated.id,
1257                    },
1258                    CorrelationIds::for_graph_flow(
1259                        updated.project_id,
1260                        updated.graph_id,
1261                        updated.id,
1262                    ),
1263                );
1264                let _ = self.store.append(event);
1265            }
1266
1267            return self.get_flow(flow_id);
1268        }
1269
1270        let max_retries = task.retry_policy.max_retries;
1271        let max_attempts = max_retries.saturating_add(1);
1272        let can_retry = exec.attempt_count < max_attempts;
1273        let to = if can_retry {
1274            TaskExecState::Retry
1275        } else {
1276            TaskExecState::Failed
1277        };
1278
1279        self.append_event(
1280            Event::new(
1281                EventPayload::TaskExecutionStateChanged {
1282                    flow_id: flow.id,
1283                    task_id,
1284                    from: TaskExecState::Verifying,
1285                    to,
1286                },
1287                corr_task,
1288            ),
1289            origin,
1290        )?;
1291
1292        if matches!(to, TaskExecState::Retry | TaskExecState::Failed) {
1293            self.append_event(
1294                Event::new(
1295                    EventPayload::TaskExecutionFailed {
1296                        flow_id: flow.id,
1297                        task_id,
1298                        attempt_id: Some(attempt.id),
1299                        reason: Some("required_checks_failed".to_string()),
1300                    },
1301                    corr_attempt.clone(),
1302                ),
1303                origin,
1304            )?;
1305        }
1306
1307        let failures = results
1308            .into_iter()
1309            .filter(|(_, required, passed)| *required && !*passed)
1310            .map(|(name, _, _)| name)
1311            .collect::<Vec<_>>()
1312            .join(", ");
1313
1314        let err = HivemindError::verification(
1315            "required_checks_failed",
1316            format!("Required checks failed: {failures}"),
1317            origin,
1318        )
1319        .with_hint(format!(
1320            "View check outputs via `hivemind verify results {}`. Worktree preserved at {}",
1321            attempt.id,
1322            worktree_status.path.display()
1323        ));
1324
1325        self.record_error_event(&err, corr_attempt);
1326
1327        Err(err)
1328    }
1329
1330    pub fn verify_run(&self, task_id: &str) -> Result<TaskFlow> {
1331        let origin = "registry:verify_run";
1332        let id = Uuid::parse_str(task_id).map_err(|_| {
1333            HivemindError::user(
1334                "invalid_task_id",
1335                format!("'{task_id}' is not a valid task ID"),
1336                origin,
1337            )
1338        })?;
1339
1340        let state = self.state()?;
1341        let flow = Self::flow_for_task(&state, id, origin)?;
1342        let exec = flow.task_executions.get(&id).ok_or_else(|| {
1343            HivemindError::system("task_exec_not_found", "Task execution not found", origin)
1344        })?;
1345        if exec.state != TaskExecState::Verifying {
1346            return Err(HivemindError::user(
1347                "task_not_verifying",
1348                "Task is not in verifying state",
1349                origin,
1350            )
1351            .with_hint(
1352                "Complete the task execution first, or run `hivemind flow tick <flow-id>`",
1353            ));
1354        }
1355
1356        self.process_verifying_task(&flow.id.to_string(), id)
1357    }
1358
1359    fn run_check_command(
1360        workdir: &Path,
1361        cargo_target_dir: &Path,
1362        command: &str,
1363        timeout_ms: Option<u64>,
1364    ) -> std::io::Result<(i32, String, bool)> {
1365        let started = Instant::now();
1366
1367        let mut cmd = std::process::Command::new("sh");
1368        cmd.current_dir(workdir)
1369            .env("CARGO_TARGET_DIR", cargo_target_dir)
1370            .args(["-lc", command]);
1371
1372        if let Some(timeout_ms) = timeout_ms {
1373            let mut child = cmd.stdout(Stdio::piped()).stderr(Stdio::piped()).spawn()?;
1374
1375            let mut out_buf = Vec::new();
1376            let mut err_buf = Vec::new();
1377
1378            let stdout = child.stdout.take();
1379            let stderr = child.stderr.take();
1380
1381            let out_handle = std::thread::spawn(move || {
1382                if let Some(mut stdout) = stdout {
1383                    let _ = stdout.read_to_end(&mut out_buf);
1384                }
1385                out_buf
1386            });
1387            let err_handle = std::thread::spawn(move || {
1388                if let Some(mut stderr) = stderr {
1389                    let _ = stderr.read_to_end(&mut err_buf);
1390                }
1391                err_buf
1392            });
1393
1394            let timeout = Duration::from_millis(timeout_ms);
1395            let mut timed_out = false;
1396            let status = loop {
1397                if let Some(status) = child.try_wait()? {
1398                    break status;
1399                }
1400                if started.elapsed() >= timeout {
1401                    timed_out = true;
1402                    let _ = child.kill();
1403                    break child.wait()?;
1404                }
1405                std::thread::sleep(Duration::from_millis(10));
1406            };
1407
1408            let stdout_buf = out_handle.join().unwrap_or_default();
1409            let stderr_buf = err_handle.join().unwrap_or_default();
1410
1411            let mut combined = String::new();
1412            if timed_out {
1413                let _ = writeln!(combined, "timed out after {timeout_ms}ms");
1414            }
1415            combined.push_str(&String::from_utf8_lossy(&stdout_buf));
1416            if !combined.ends_with('\n') {
1417                combined.push('\n');
1418            }
1419            combined.push_str(&String::from_utf8_lossy(&stderr_buf));
1420
1421            let exit_code = if timed_out {
1422                124
1423            } else {
1424                status.code().unwrap_or(-1)
1425            };
1426
1427            return Ok((exit_code, combined, timed_out));
1428        }
1429
1430        let out = cmd.output()?;
1431        let mut combined = String::new();
1432        combined.push_str(&String::from_utf8_lossy(&out.stdout));
1433        if !combined.ends_with('\n') {
1434            combined.push('\n');
1435        }
1436        combined.push_str(&String::from_utf8_lossy(&out.stderr));
1437        Ok((out.status.code().unwrap_or(-1), combined, false))
1438    }
1439
1440    fn detect_git_operations(
1441        worktree_path: &Path,
1442        baseline: &Baseline,
1443        attempt_id: Uuid,
1444    ) -> (bool, bool) {
1445        let commits_created = Self::detect_commits_created(worktree_path, baseline, attempt_id);
1446        let branches_created = Self::detect_branches_created(worktree_path, baseline);
1447        (commits_created, branches_created)
1448    }
1449
1450    fn detect_commits_created(worktree_path: &Path, baseline: &Baseline, attempt_id: Uuid) -> bool {
1451        let Some(base) = baseline.git_head.as_deref() else {
1452            return false;
1453        };
1454
1455        let output = std::process::Command::new("git")
1456            .current_dir(worktree_path)
1457            .args(["log", "--format=%s", &format!("{base}..HEAD")])
1458            .output();
1459
1460        let Ok(output) = output else {
1461            return false;
1462        };
1463        if !output.status.success() {
1464            return false;
1465        }
1466
1467        let mut subjects: Vec<String> = String::from_utf8_lossy(&output.stdout)
1468            .lines()
1469            .map(|l| l.trim().to_string())
1470            .filter(|l| !l.is_empty())
1471            .collect();
1472        subjects.retain(|s| s != &format!("hivemind checkpoint {attempt_id}"));
1473        subjects.retain(|s| !s.starts_with("hivemind(checkpoint): "));
1474        !subjects.is_empty()
1475    }
1476
1477    fn detect_branches_created(worktree_path: &Path, baseline: &Baseline) -> bool {
1478        let output = std::process::Command::new("git")
1479            .current_dir(worktree_path)
1480            .args(["for-each-ref", "refs/heads", "--format=%(refname:short)"])
1481            .output();
1482
1483        let Ok(output) = output else {
1484            return false;
1485        };
1486        if !output.status.success() {
1487            return false;
1488        }
1489
1490        let current: std::collections::HashSet<String> = String::from_utf8_lossy(&output.stdout)
1491            .lines()
1492            .map(|l| l.trim().to_string())
1493            .filter(|l| !l.is_empty())
1494            .collect();
1495        let base: std::collections::HashSet<String> =
1496            baseline.git_branches.iter().cloned().collect();
1497        current.difference(&base).next().is_some()
1498    }
1499
1500    fn parse_git_status_paths(worktree_path: &Path) -> Vec<String> {
1501        let output = std::process::Command::new("git")
1502            .current_dir(worktree_path)
1503            .args(["status", "--porcelain"])
1504            .output();
1505        let Ok(output) = output else {
1506            return Vec::new();
1507        };
1508        if !output.status.success() {
1509            return Vec::new();
1510        }
1511
1512        String::from_utf8_lossy(&output.stdout)
1513            .lines()
1514            .map(str::trim)
1515            .filter(|line| !line.is_empty())
1516            .map(|line| {
1517                line.strip_prefix("?? ")
1518                    .or_else(|| line.get(3..))
1519                    .unwrap_or("")
1520                    .trim()
1521                    .to_string()
1522            })
1523            .filter(|path| !path.is_empty())
1524            .collect()
1525    }
1526
1527    fn verify_repository_scope(
1528        scope: &Scope,
1529        flow: &TaskFlow,
1530        state: &AppState,
1531        task_id: Uuid,
1532        origin: &'static str,
1533    ) -> Vec<crate::core::enforcement::ScopeViolation> {
1534        if scope.repositories.is_empty() {
1535            return Vec::new();
1536        }
1537
1538        let Ok(worktrees) = Self::inspect_task_worktrees(flow, state, task_id, origin) else {
1539            return vec![crate::core::enforcement::ScopeViolation::filesystem(
1540                "<worktree>",
1541                "Repository scope verification failed: worktree missing",
1542            )];
1543        };
1544
1545        let mut violations = Vec::new();
1546        for (repo_name, status) in worktrees {
1547            let changed_paths = Self::parse_git_status_paths(&status.path);
1548            if changed_paths.is_empty() {
1549                continue;
1550            }
1551
1552            let allowed_mode = scope
1553                .repositories
1554                .iter()
1555                .find(|r| r.repo == repo_name || r.repo == status.path.to_string_lossy())
1556                .map(|r| r.mode);
1557
1558            match allowed_mode {
1559                Some(RepoAccessMode::ReadWrite) => {}
1560                Some(RepoAccessMode::ReadOnly) => {
1561                    violations.push(crate::core::enforcement::ScopeViolation::filesystem(
1562                        format!("{repo_name}/{}", changed_paths[0]),
1563                        format!("Repository '{repo_name}' is read-only in scope"),
1564                    ));
1565                }
1566                None => {
1567                    violations.push(crate::core::enforcement::ScopeViolation::filesystem(
1568                        format!("{repo_name}/{}", changed_paths[0]),
1569                        format!("Repository '{repo_name}' is not declared in task scope"),
1570                    ));
1571                }
1572            }
1573        }
1574        violations
1575    }
1576
1577    fn emit_task_execution_completion_events(
1578        &self,
1579        flow: &TaskFlow,
1580        task_id: Uuid,
1581        attempt: &AttemptState,
1582        completion: CompletionArtifacts<'_>,
1583        origin: &'static str,
1584    ) -> Result<()> {
1585        let corr_task =
1586            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, task_id);
1587        let corr_attempt = CorrelationIds::for_graph_flow_task_attempt(
1588            flow.project_id,
1589            flow.graph_id,
1590            flow.id,
1591            task_id,
1592            attempt.id,
1593        );
1594
1595        self.append_event(
1596            Event::new(
1597                EventPayload::TaskExecutionStateChanged {
1598                    flow_id: flow.id,
1599                    task_id,
1600                    from: TaskExecState::Running,
1601                    to: TaskExecState::Verifying,
1602                },
1603                corr_task,
1604            ),
1605            origin,
1606        )?;
1607
1608        if let Some(commit_sha) = completion.checkpoint_commit_sha {
1609            self.append_event(
1610                Event::new(
1611                    EventPayload::CheckpointCommitCreated {
1612                        flow_id: flow.id,
1613                        task_id,
1614                        attempt_id: attempt.id,
1615                        commit_sha,
1616                    },
1617                    corr_attempt.clone(),
1618                ),
1619                origin,
1620            )?;
1621        }
1622
1623        for change in &completion.artifact.diff.changes {
1624            self.append_event(
1625                Event::new(
1626                    EventPayload::FileModified {
1627                        flow_id: flow.id,
1628                        task_id,
1629                        attempt_id: attempt.id,
1630                        path: change.path.to_string_lossy().to_string(),
1631                        change_type: change.change_type,
1632                        old_hash: change.old_hash.clone(),
1633                        new_hash: change.new_hash.clone(),
1634                    },
1635                    corr_attempt.clone(),
1636                ),
1637                origin,
1638            )?;
1639        }
1640
1641        self.append_event(
1642            Event::new(
1643                EventPayload::DiffComputed {
1644                    flow_id: flow.id,
1645                    task_id,
1646                    attempt_id: attempt.id,
1647                    diff_id: completion.artifact.diff.id,
1648                    baseline_id: completion.baseline_id,
1649                    change_count: completion.artifact.diff.change_count(),
1650                },
1651                corr_attempt,
1652            ),
1653            origin,
1654        )?;
1655
1656        Ok(())
1657    }
1658
1659    fn capture_and_store_baseline(
1660        &self,
1661        worktree_path: &Path,
1662        origin: &'static str,
1663    ) -> Result<Baseline> {
1664        let baseline = Baseline::capture(worktree_path)
1665            .map_err(|e| HivemindError::system("baseline_capture_failed", e.to_string(), origin))?;
1666        self.write_baseline_artifact(&baseline)?;
1667        Ok(baseline)
1668    }
1669
1670    fn compute_and_store_diff(
1671        &self,
1672        baseline_id: Uuid,
1673        worktree_path: &Path,
1674        task_id: Uuid,
1675        attempt_id: Uuid,
1676        origin: &'static str,
1677    ) -> Result<DiffArtifact> {
1678        let baseline = self.read_baseline_artifact(baseline_id)?;
1679        let diff = Diff::compute(&baseline, worktree_path)
1680            .map_err(|e| HivemindError::system("diff_compute_failed", e.to_string(), origin))?
1681            .for_task(task_id)
1682            .for_attempt(attempt_id);
1683
1684        let mut unified = String::new();
1685        for change in &diff.changes {
1686            if let Ok(chunk) = self.unified_diff_for_change(baseline_id, worktree_path, change) {
1687                unified.push_str(&chunk);
1688                if !chunk.ends_with('\n') {
1689                    unified.push('\n');
1690                }
1691            }
1692        }
1693
1694        let artifact = DiffArtifact { diff, unified };
1695        self.write_diff_artifact(&artifact)?;
1696        Ok(artifact)
1697    }
1698
1699    fn artifacts_dir(&self) -> PathBuf {
1700        self.config.data_dir.join("artifacts")
1701    }
1702
1703    fn baselines_dir(&self) -> PathBuf {
1704        self.artifacts_dir().join("baselines")
1705    }
1706
1707    fn baseline_dir(&self, baseline_id: Uuid) -> PathBuf {
1708        self.baselines_dir().join(baseline_id.to_string())
1709    }
1710
1711    fn baseline_json_path(&self, baseline_id: Uuid) -> PathBuf {
1712        self.baseline_dir(baseline_id).join("baseline.json")
1713    }
1714
1715    fn baseline_files_dir(&self, baseline_id: Uuid) -> PathBuf {
1716        self.baseline_dir(baseline_id).join("files")
1717    }
1718
1719    fn diffs_dir(&self) -> PathBuf {
1720        self.artifacts_dir().join("diffs")
1721    }
1722
1723    fn diff_json_path(&self, diff_id: Uuid) -> PathBuf {
1724        self.diffs_dir().join(format!("{diff_id}.json"))
1725    }
1726
1727    fn write_baseline_artifact(&self, baseline: &Baseline) -> Result<()> {
1728        let files_dir = self.baseline_files_dir(baseline.id);
1729        fs::create_dir_all(&files_dir).map_err(|e| {
1730            HivemindError::system(
1731                "artifact_write_failed",
1732                e.to_string(),
1733                "registry:write_baseline_artifact",
1734            )
1735        })?;
1736
1737        let json = serde_json::to_vec_pretty(baseline).map_err(|e| {
1738            HivemindError::system(
1739                "artifact_serialize_failed",
1740                e.to_string(),
1741                "registry:write_baseline_artifact",
1742            )
1743        })?;
1744        fs::write(self.baseline_json_path(baseline.id), json).map_err(|e| {
1745            HivemindError::system(
1746                "artifact_write_failed",
1747                e.to_string(),
1748                "registry:write_baseline_artifact",
1749            )
1750        })?;
1751
1752        for snapshot in baseline.files.values() {
1753            if snapshot.is_dir {
1754                continue;
1755            }
1756
1757            let src = baseline.root.join(&snapshot.path);
1758            let dst = files_dir.join(&snapshot.path);
1759            if let Some(parent) = dst.parent() {
1760                fs::create_dir_all(parent).map_err(|e| {
1761                    HivemindError::system(
1762                        "artifact_write_failed",
1763                        e.to_string(),
1764                        "registry:write_baseline_artifact",
1765                    )
1766                })?;
1767            }
1768
1769            let Ok(contents) = fs::read(src) else {
1770                continue;
1771            };
1772            let _ = fs::write(dst, contents);
1773        }
1774        Ok(())
1775    }
1776
1777    fn read_baseline_artifact(&self, baseline_id: Uuid) -> Result<Baseline> {
1778        let bytes = fs::read(self.baseline_json_path(baseline_id)).map_err(|e| {
1779            HivemindError::system(
1780                "artifact_read_failed",
1781                e.to_string(),
1782                "registry:read_baseline_artifact",
1783            )
1784        })?;
1785        serde_json::from_slice(&bytes).map_err(|e| {
1786            HivemindError::system(
1787                "artifact_deserialize_failed",
1788                e.to_string(),
1789                "registry:read_baseline_artifact",
1790            )
1791        })
1792    }
1793
1794    fn write_diff_artifact(&self, artifact: &DiffArtifact) -> Result<()> {
1795        fs::create_dir_all(self.diffs_dir()).map_err(|e| {
1796            HivemindError::system(
1797                "artifact_write_failed",
1798                e.to_string(),
1799                "registry:write_diff_artifact",
1800            )
1801        })?;
1802        let json = serde_json::to_vec_pretty(artifact).map_err(|e| {
1803            HivemindError::system(
1804                "artifact_serialize_failed",
1805                e.to_string(),
1806                "registry:write_diff_artifact",
1807            )
1808        })?;
1809        fs::write(self.diff_json_path(artifact.diff.id), json).map_err(|e| {
1810            HivemindError::system(
1811                "artifact_write_failed",
1812                e.to_string(),
1813                "registry:write_diff_artifact",
1814            )
1815        })?;
1816        Ok(())
1817    }
1818
1819    fn read_diff_artifact(&self, diff_id: Uuid) -> Result<DiffArtifact> {
1820        let bytes = fs::read(self.diff_json_path(diff_id)).map_err(|e| {
1821            HivemindError::system(
1822                "artifact_read_failed",
1823                e.to_string(),
1824                "registry:read_diff_artifact",
1825            )
1826        })?;
1827        serde_json::from_slice(&bytes).map_err(|e| {
1828            HivemindError::system(
1829                "artifact_deserialize_failed",
1830                e.to_string(),
1831                "registry:read_diff_artifact",
1832            )
1833        })
1834    }
1835
1836    fn unified_diff_for_change(
1837        &self,
1838        baseline_id: Uuid,
1839        worktree_root: &std::path::Path,
1840        change: &FileChange,
1841    ) -> std::io::Result<String> {
1842        let baseline_files = self.baseline_files_dir(baseline_id);
1843        let old = baseline_files.join(&change.path);
1844        let new = worktree_root.join(&change.path);
1845
1846        match change.change_type {
1847            ChangeType::Created => unified_diff(None, Some(&new)),
1848            ChangeType::Deleted => unified_diff(Some(&old), None),
1849            ChangeType::Modified => unified_diff(Some(&old), Some(&new)),
1850        }
1851    }
1852
1853    fn worktree_error_to_hivemind(err: WorktreeError, origin: &'static str) -> HivemindError {
1854        match err {
1855            WorktreeError::InvalidRepo(path) => HivemindError::git(
1856                "invalid_repo",
1857                format!("Invalid git repository: {}", path.display()),
1858                origin,
1859            ),
1860            WorktreeError::GitError(msg) => HivemindError::git("git_worktree_failed", msg, origin),
1861            WorktreeError::AlreadyExists(task_id) => HivemindError::user(
1862                "worktree_already_exists",
1863                format!("Worktree already exists for task {task_id}"),
1864                origin,
1865            ),
1866            WorktreeError::NotFound(id) => HivemindError::user(
1867                "worktree_not_found",
1868                format!("Worktree not found: {id}"),
1869                origin,
1870            ),
1871            WorktreeError::IoError(e) => {
1872                HivemindError::system("worktree_io_error", e.to_string(), origin)
1873            }
1874        }
1875    }
1876
1877    fn project_for_flow<'a>(flow: &TaskFlow, state: &'a AppState) -> Result<&'a Project> {
1878        state.projects.get(&flow.project_id).ok_or_else(|| {
1879            HivemindError::system(
1880                "project_not_found",
1881                format!("Project '{}' not found", flow.project_id),
1882                "registry:worktree_manager_for_flow",
1883            )
1884        })
1885    }
1886
1887    fn worktree_managers_for_flow(
1888        flow: &TaskFlow,
1889        state: &AppState,
1890        origin: &'static str,
1891    ) -> Result<Vec<(String, WorktreeManager)>> {
1892        let project = Self::project_for_flow(flow, state)?;
1893
1894        if project.repositories.is_empty() {
1895            return Err(HivemindError::user(
1896                "project_has_no_repo",
1897                "Project has no repository attached",
1898                origin,
1899            )
1900            .with_hint("Attach a repo via 'hivemind project attach-repo <project> <path>'"));
1901        }
1902
1903        project
1904            .repositories
1905            .iter()
1906            .map(|repo| {
1907                WorktreeManager::new(PathBuf::from(&repo.path), WorktreeConfig::default())
1908                    .map(|manager| (repo.name.clone(), manager))
1909                    .map_err(|e| Self::worktree_error_to_hivemind(e, origin))
1910            })
1911            .collect()
1912    }
1913
1914    fn worktree_manager_for_flow(flow: &TaskFlow, state: &AppState) -> Result<WorktreeManager> {
1915        let managers =
1916            Self::worktree_managers_for_flow(flow, state, "registry:worktree_manager_for_flow")?;
1917        managers
1918            .into_iter()
1919            .next()
1920            .map(|(_, manager)| manager)
1921            .ok_or_else(|| {
1922                HivemindError::user(
1923                    "project_has_no_repo",
1924                    "Project has no repository attached",
1925                    "registry:worktree_manager_for_flow",
1926                )
1927            })
1928    }
1929
1930    fn git_ref_exists(repo_path: &Path, reference: &str) -> bool {
1931        std::process::Command::new("git")
1932            .current_dir(repo_path)
1933            .args(["show-ref", "--verify", "--quiet", reference])
1934            .status()
1935            .map(|status| status.success())
1936            .unwrap_or(false)
1937    }
1938
1939    fn default_base_ref_for_repo(
1940        flow: &TaskFlow,
1941        manager: &WorktreeManager,
1942        is_primary: bool,
1943    ) -> String {
1944        let flow_ref = format!("refs/heads/flow/{}", flow.id);
1945        if Self::git_ref_exists(manager.repo_path(), &flow_ref) {
1946            return format!("flow/{}", flow.id);
1947        }
1948        if is_primary {
1949            return flow
1950                .base_revision
1951                .clone()
1952                .unwrap_or_else(|| "HEAD".to_string());
1953        }
1954        "HEAD".to_string()
1955    }
1956
1957    fn ensure_task_worktree_status(
1958        manager: &WorktreeManager,
1959        flow: &TaskFlow,
1960        task_id: Uuid,
1961        base_ref: &str,
1962        origin: &'static str,
1963    ) -> Result<WorktreeStatus> {
1964        let status = manager
1965            .inspect(flow.id, task_id)
1966            .map_err(|e| Self::worktree_error_to_hivemind(e, origin))?;
1967        if status.is_worktree {
1968            return Ok(status);
1969        }
1970
1971        manager
1972            .create(flow.id, task_id, Some(base_ref))
1973            .map_err(|e| Self::worktree_error_to_hivemind(e, origin))?;
1974        let status = manager
1975            .inspect(flow.id, task_id)
1976            .map_err(|e| Self::worktree_error_to_hivemind(e, origin))?;
1977        if !status.is_worktree {
1978            return Err(HivemindError::git(
1979                "worktree_create_failed",
1980                format!(
1981                    "Worktree path exists but is not a git worktree: {}",
1982                    status.path.display()
1983                ),
1984                origin,
1985            ));
1986        }
1987        Ok(status)
1988    }
1989
1990    fn ensure_task_worktree(
1991        flow: &TaskFlow,
1992        state: &AppState,
1993        task_id: Uuid,
1994        origin: &'static str,
1995    ) -> Result<WorktreeStatus> {
1996        let managers = Self::worktree_managers_for_flow(flow, state, origin)?;
1997        let mut primary_status: Option<WorktreeStatus> = None;
1998        for (idx, (_repo_name, manager)) in managers.iter().enumerate() {
1999            let base_ref = Self::default_base_ref_for_repo(flow, manager, idx == 0);
2000            let status =
2001                Self::ensure_task_worktree_status(manager, flow, task_id, &base_ref, origin)?;
2002            if idx == 0 {
2003                primary_status = Some(status);
2004            }
2005        }
2006        primary_status.ok_or_else(|| {
2007            HivemindError::user(
2008                "project_has_no_repo",
2009                "Project has no repository attached",
2010                origin,
2011            )
2012        })
2013    }
2014
2015    fn inspect_task_worktrees(
2016        flow: &TaskFlow,
2017        state: &AppState,
2018        task_id: Uuid,
2019        origin: &'static str,
2020    ) -> Result<Vec<(String, WorktreeStatus)>> {
2021        let managers = Self::worktree_managers_for_flow(flow, state, origin)?;
2022        let mut statuses = Vec::new();
2023        for (repo_name, manager) in managers {
2024            let status = manager
2025                .inspect(flow.id, task_id)
2026                .map_err(|e| Self::worktree_error_to_hivemind(e, origin))?;
2027            if !status.is_worktree {
2028                return Err(HivemindError::user(
2029                    "worktree_not_found",
2030                    format!("Worktree not found for task in repo '{repo_name}'"),
2031                    origin,
2032                ));
2033            }
2034            statuses.push((repo_name, status));
2035        }
2036        Ok(statuses)
2037    }
2038
2039    /// Opens or creates a registry at the default location.
2040    ///
2041    /// # Errors
2042    /// Returns an error if the event store cannot be opened.
2043    pub fn open() -> Result<Self> {
2044        Self::open_with_config(RegistryConfig::default_dir())
2045    }
2046
2047    /// Opens or creates a registry with custom config.
2048    ///
2049    /// # Errors
2050    /// Returns an error if the event store cannot be opened.
2051    pub fn open_with_config(config: RegistryConfig) -> Result<Self> {
2052        let store = IndexedEventStore::open(&config.data_dir).map_err(|e| {
2053            HivemindError::system("store_open_failed", e.to_string(), "registry:open")
2054        })?;
2055
2056        Ok(Self {
2057            store: Arc::new(store),
2058            config,
2059        })
2060    }
2061
2062    /// Creates a registry with a custom event store (for testing).
2063    #[must_use]
2064    pub fn with_store(store: Arc<dyn EventStore>, config: RegistryConfig) -> Self {
2065        Self { store, config }
2066    }
2067
2068    /// Gets the current state by replaying all events.
2069    ///
2070    /// # Errors
2071    /// Returns an error if events cannot be read.
2072    pub fn state(&self) -> Result<AppState> {
2073        let events = self.store.read_all().map_err(|e| {
2074            HivemindError::system("state_read_failed", e.to_string(), "registry:state")
2075        })?;
2076        Ok(AppState::replay(&events))
2077    }
2078
2079    /// Lists events in the store.
2080    ///
2081    /// # Errors
2082    /// Returns an error if events cannot be read.
2083    pub fn list_events(&self, project_id: Option<Uuid>, limit: usize) -> Result<Vec<Event>> {
2084        let mut filter = EventFilter::all();
2085        filter.project_id = project_id;
2086        filter.limit = Some(limit);
2087
2088        self.store.read(&filter).map_err(|e| {
2089            HivemindError::system("event_read_failed", e.to_string(), "registry:list_events")
2090        })
2091    }
2092
2093    pub fn read_events(&self, filter: &EventFilter) -> Result<Vec<Event>> {
2094        self.store.read(filter).map_err(|e| {
2095            HivemindError::system("event_read_failed", e.to_string(), "registry:read_events")
2096        })
2097    }
2098
2099    pub fn stream_events(&self, filter: &EventFilter) -> Result<std::sync::mpsc::Receiver<Event>> {
2100        self.store.stream(filter).map_err(|e| {
2101            HivemindError::system(
2102                "event_stream_failed",
2103                e.to_string(),
2104                "registry:stream_events",
2105            )
2106        })
2107    }
2108
2109    /// Gets a specific event by ID.
2110    ///
2111    /// # Errors
2112    /// Returns an error if the event cannot be read or is not found.
2113    pub fn get_event(&self, event_id: &str) -> Result<Event> {
2114        let id = Uuid::parse_str(event_id).map_err(|_| {
2115            HivemindError::user(
2116                "invalid_event_id",
2117                format!("'{event_id}' is not a valid event ID"),
2118                "registry:get_event",
2119            )
2120        })?;
2121
2122        let events = self.store.read_all().map_err(|e| {
2123            HivemindError::system("event_read_failed", e.to_string(), "registry:get_event")
2124        })?;
2125
2126        events
2127            .into_iter()
2128            .find(|e| e.metadata.id.as_uuid() == id)
2129            .ok_or_else(|| {
2130                HivemindError::user(
2131                    "event_not_found",
2132                    format!("Event '{event_id}' not found"),
2133                    "registry:get_event",
2134                )
2135            })
2136    }
2137
2138    /// Creates a new project.
2139    ///
2140    /// # Errors
2141    /// Returns an error if a project with that name already exists.
2142    pub fn create_project(&self, name: &str, description: Option<&str>) -> Result<Project> {
2143        if name.trim().is_empty() {
2144            let err = HivemindError::user(
2145                "invalid_project_name",
2146                "Project name cannot be empty",
2147                "registry:create_project",
2148            )
2149            .with_hint("Provide a non-empty project name");
2150            self.record_error_event(&err, CorrelationIds::none());
2151            return Err(err);
2152        }
2153
2154        let state = self.state()?;
2155
2156        // Check for duplicate name
2157        if state.projects.values().any(|p| p.name == name) {
2158            let err = HivemindError::user(
2159                "project_exists",
2160                format!("Project '{name}' already exists"),
2161                "registry:create_project",
2162            )
2163            .with_hint("Choose a different project name");
2164            self.record_error_event(&err, CorrelationIds::none());
2165            return Err(err);
2166        }
2167
2168        let id = Uuid::new_v4();
2169        let event = Event::new(
2170            EventPayload::ProjectCreated {
2171                id,
2172                name: name.to_string(),
2173                description: description.map(String::from),
2174            },
2175            CorrelationIds::for_project(id),
2176        );
2177
2178        self.store.append(event).map_err(|e| {
2179            HivemindError::system(
2180                "event_append_failed",
2181                e.to_string(),
2182                "registry:create_project",
2183            )
2184        })?;
2185
2186        // Return the created project by replaying
2187        let new_state = self.state()?;
2188        new_state.projects.get(&id).cloned().ok_or_else(|| {
2189            HivemindError::system(
2190                "project_not_found_after_create",
2191                "Project was not found after creation",
2192                "registry:create_project",
2193            )
2194        })
2195    }
2196
2197    /// Lists all projects.
2198    ///
2199    /// # Errors
2200    /// Returns an error if state cannot be read.
2201    pub fn list_projects(&self) -> Result<Vec<Project>> {
2202        let state = self.state()?;
2203        let mut projects: Vec<_> = state.projects.into_values().collect();
2204        projects.sort_by(|a, b| a.name.cmp(&b.name));
2205        Ok(projects)
2206    }
2207
2208    /// Gets a project by ID or name.
2209    ///
2210    /// # Errors
2211    /// Returns an error if the project is not found.
2212    pub fn get_project(&self, id_or_name: &str) -> Result<Project> {
2213        let state = self.state()?;
2214
2215        // Try parsing as UUID first
2216        if let Ok(id) = Uuid::parse_str(id_or_name) {
2217            if let Some(project) = state.projects.get(&id) {
2218                return Ok(project.clone());
2219            }
2220        }
2221
2222        // Search by name
2223        state
2224            .projects
2225            .values()
2226            .find(|p| p.name == id_or_name)
2227            .cloned()
2228            .ok_or_else(|| {
2229                HivemindError::user(
2230                    "project_not_found",
2231                    format!("Project '{id_or_name}' not found"),
2232                    "registry:get_project",
2233                )
2234                .with_hint("Use 'hivemind project list' to see available projects")
2235            })
2236    }
2237
2238    /// Updates a project.
2239    ///
2240    /// # Errors
2241    /// Returns an error if the project is not found.
2242    pub fn update_project(
2243        &self,
2244        id_or_name: &str,
2245        name: Option<&str>,
2246        description: Option<&str>,
2247    ) -> Result<Project> {
2248        let project = self
2249            .get_project(id_or_name)
2250            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
2251
2252        if let Some(new_name) = name {
2253            if new_name.trim().is_empty() {
2254                let err = HivemindError::user(
2255                    "invalid_project_name",
2256                    "Project name cannot be empty",
2257                    "registry:update_project",
2258                )
2259                .with_hint("Provide a non-empty project name");
2260                self.record_error_event(&err, CorrelationIds::for_project(project.id));
2261                return Err(err);
2262            }
2263        }
2264
2265        let name = name.filter(|n| *n != project.name);
2266        let description = description.filter(|d| project.description.as_deref() != Some(*d));
2267
2268        if name.is_none() && description.is_none() {
2269            return Ok(project);
2270        }
2271
2272        // Check for name conflict if changing name
2273        if let Some(new_name) = name {
2274            let state = self.state()?;
2275            if state
2276                .projects
2277                .values()
2278                .any(|p| p.name == new_name && p.id != project.id)
2279            {
2280                let err = HivemindError::user(
2281                    "project_name_conflict",
2282                    format!("Project name '{new_name}' is already taken"),
2283                    "registry:update_project",
2284                );
2285                self.record_error_event(&err, CorrelationIds::for_project(project.id));
2286                return Err(err);
2287            }
2288        }
2289
2290        let event = Event::new(
2291            EventPayload::ProjectUpdated {
2292                id: project.id,
2293                name: name.map(String::from),
2294                description: description.map(String::from),
2295            },
2296            CorrelationIds::for_project(project.id),
2297        );
2298
2299        self.store.append(event).map_err(|e| {
2300            HivemindError::system(
2301                "event_append_failed",
2302                e.to_string(),
2303                "registry:update_project",
2304            )
2305        })?;
2306
2307        self.get_project(&project.id.to_string())
2308    }
2309
2310    #[allow(clippy::too_many_arguments)]
2311    pub fn project_runtime_set(
2312        &self,
2313        id_or_name: &str,
2314        adapter: &str,
2315        binary_path: &str,
2316        model: Option<String>,
2317        args: &[String],
2318        env: &[String],
2319        timeout_ms: u64,
2320        max_parallel_tasks: u16,
2321    ) -> Result<Project> {
2322        let project = self
2323            .get_project(id_or_name)
2324            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
2325
2326        if max_parallel_tasks == 0 {
2327            let err = HivemindError::user(
2328                "invalid_max_parallel_tasks",
2329                "max_parallel_tasks must be at least 1",
2330                "registry:project_runtime_set",
2331            )
2332            .with_hint("Use --max-parallel-tasks 1 or higher");
2333            self.record_error_event(&err, CorrelationIds::for_project(project.id));
2334            return Err(err);
2335        }
2336
2337        if !SUPPORTED_ADAPTERS.contains(&adapter) {
2338            let err = HivemindError::user(
2339                "invalid_runtime_adapter",
2340                format!(
2341                    "Unsupported runtime adapter '{adapter}'. Supported: {}",
2342                    SUPPORTED_ADAPTERS.join(", ")
2343                ),
2344                "registry:project_runtime_set",
2345            );
2346            self.record_error_event(&err, CorrelationIds::for_project(project.id));
2347            return Err(err);
2348        }
2349
2350        let mut env_map = HashMap::new();
2351        for pair in env {
2352            let Some((k, v)) = pair.split_once('=') else {
2353                let err = HivemindError::user(
2354                    "invalid_env",
2355                    format!("Invalid env var '{pair}'. Expected KEY=VALUE"),
2356                    "registry:project_runtime_set",
2357                );
2358                self.record_error_event(&err, CorrelationIds::for_project(project.id));
2359                return Err(err);
2360            };
2361            if k.trim().is_empty() {
2362                let err = HivemindError::user(
2363                    "invalid_env",
2364                    format!("Invalid env var '{pair}'. KEY cannot be empty"),
2365                    "registry:project_runtime_set",
2366                );
2367                self.record_error_event(&err, CorrelationIds::for_project(project.id));
2368                return Err(err);
2369            }
2370            env_map.insert(k.to_string(), v.to_string());
2371        }
2372
2373        let desired = crate::core::state::ProjectRuntimeConfig {
2374            adapter_name: adapter.to_string(),
2375            binary_path: binary_path.to_string(),
2376            model: model.clone(),
2377            args: args.to_vec(),
2378            env: env_map.clone(),
2379            timeout_ms,
2380            max_parallel_tasks,
2381        };
2382        if project.runtime.as_ref() == Some(&desired) {
2383            return Ok(project);
2384        }
2385
2386        let event = Event::new(
2387            EventPayload::ProjectRuntimeConfigured {
2388                project_id: project.id,
2389                adapter_name: adapter.to_string(),
2390                binary_path: binary_path.to_string(),
2391                model,
2392                args: args.to_vec(),
2393                env: env_map,
2394                timeout_ms,
2395                max_parallel_tasks,
2396            },
2397            CorrelationIds::for_project(project.id),
2398        );
2399
2400        self.store.append(event).map_err(|e| {
2401            HivemindError::system(
2402                "event_append_failed",
2403                e.to_string(),
2404                "registry:project_runtime_set",
2405            )
2406        })?;
2407
2408        self.get_project(&project.id.to_string())
2409    }
2410
2411    #[must_use]
2412    pub fn runtime_list(&self) -> Vec<RuntimeListEntry> {
2413        runtime_descriptors()
2414            .into_iter()
2415            .map(|d| RuntimeListEntry {
2416                adapter_name: d.adapter_name.to_string(),
2417                default_binary: d.default_binary.to_string(),
2418                available: Self::binary_available(d.default_binary),
2419                opencode_compatible: d.opencode_compatible,
2420            })
2421            .collect()
2422    }
2423
2424    pub fn runtime_health(
2425        &self,
2426        project: Option<&str>,
2427        task_id: Option<&str>,
2428    ) -> Result<RuntimeHealthStatus> {
2429        if let Some(task_id) = task_id {
2430            let task = self.get_task(task_id)?;
2431            let project = self.get_project(&task.project_id.to_string())?;
2432            let project_max_parallel = project
2433                .runtime
2434                .as_ref()
2435                .map_or(1, |cfg| cfg.max_parallel_tasks);
2436            let project_runtime = project.runtime.ok_or_else(|| {
2437                HivemindError::new(
2438                    ErrorCategory::Runtime,
2439                    "runtime_not_configured",
2440                    "Project has no runtime configured",
2441                    "registry:runtime_health",
2442                )
2443            })?;
2444            let runtime = task.runtime_override.map_or_else(
2445                || project_runtime,
2446                |r| ProjectRuntimeConfig {
2447                    adapter_name: r.adapter_name,
2448                    binary_path: r.binary_path,
2449                    model: r.model,
2450                    args: r.args,
2451                    env: r.env,
2452                    timeout_ms: r.timeout_ms,
2453                    max_parallel_tasks: project_max_parallel,
2454                },
2455            );
2456
2457            return Ok(Self::health_for_runtime(
2458                &runtime,
2459                Some(format!("task:{task_id}")),
2460            ));
2461        }
2462
2463        if let Some(project_id_or_name) = project {
2464            let project = self.get_project(project_id_or_name)?;
2465            let runtime = project.runtime.ok_or_else(|| {
2466                HivemindError::new(
2467                    ErrorCategory::Runtime,
2468                    "runtime_not_configured",
2469                    "Project has no runtime configured",
2470                    "registry:runtime_health",
2471                )
2472            })?;
2473            return Ok(Self::health_for_runtime(
2474                &runtime,
2475                Some(format!("project:{project_id_or_name}")),
2476            ));
2477        }
2478
2479        Ok(RuntimeHealthStatus {
2480            adapter_name: "all".to_string(),
2481            binary_path: "builtin-defaults".to_string(),
2482            healthy: self.runtime_list().iter().all(|r| r.available),
2483            target: None,
2484            details: Some(
2485                self.runtime_list()
2486                    .into_iter()
2487                    .map(|r| {
2488                        format!(
2489                            "{}={} ({})",
2490                            r.adapter_name,
2491                            if r.available { "ok" } else { "missing" },
2492                            r.default_binary
2493                        )
2494                    })
2495                    .collect::<Vec<_>>()
2496                    .join(", "),
2497            ),
2498        })
2499    }
2500
2501    #[allow(clippy::too_many_arguments)]
2502    pub fn task_runtime_set(
2503        &self,
2504        task_id: &str,
2505        adapter: &str,
2506        binary_path: &str,
2507        model: Option<String>,
2508        args: &[String],
2509        env: &[String],
2510        timeout_ms: u64,
2511    ) -> Result<Task> {
2512        let task = self.get_task(task_id)?;
2513        if !SUPPORTED_ADAPTERS.contains(&adapter) {
2514            return Err(HivemindError::user(
2515                "invalid_runtime_adapter",
2516                format!(
2517                    "Unsupported runtime adapter '{adapter}'. Supported: {}",
2518                    SUPPORTED_ADAPTERS.join(", ")
2519                ),
2520                "registry:task_runtime_set",
2521            ));
2522        }
2523
2524        let mut env_map = HashMap::new();
2525        for pair in env {
2526            let Some((k, v)) = pair.split_once('=') else {
2527                return Err(HivemindError::user(
2528                    "invalid_env",
2529                    format!("Invalid env var '{pair}'. Expected KEY=VALUE"),
2530                    "registry:task_runtime_set",
2531                ));
2532            };
2533            if k.trim().is_empty() {
2534                return Err(HivemindError::user(
2535                    "invalid_env",
2536                    format!("Invalid env var '{pair}'. KEY cannot be empty"),
2537                    "registry:task_runtime_set",
2538                ));
2539            }
2540            env_map.insert(k.to_string(), v.to_string());
2541        }
2542
2543        let event = Event::new(
2544            EventPayload::TaskRuntimeConfigured {
2545                task_id: task.id,
2546                adapter_name: adapter.to_string(),
2547                binary_path: binary_path.to_string(),
2548                model,
2549                args: args.to_vec(),
2550                env: env_map,
2551                timeout_ms,
2552            },
2553            CorrelationIds::for_task(task.project_id, task.id),
2554        );
2555        self.store.append(event).map_err(|e| {
2556            HivemindError::system(
2557                "event_append_failed",
2558                e.to_string(),
2559                "registry:task_runtime_set",
2560            )
2561        })?;
2562        self.get_task(task_id)
2563    }
2564
2565    pub fn task_runtime_clear(&self, task_id: &str) -> Result<Task> {
2566        let task = self.get_task(task_id)?;
2567        if task.runtime_override.is_none() {
2568            return Ok(task);
2569        }
2570
2571        let event = Event::new(
2572            EventPayload::TaskRuntimeCleared { task_id: task.id },
2573            CorrelationIds::for_task(task.project_id, task.id),
2574        );
2575        self.store.append(event).map_err(|e| {
2576            HivemindError::system(
2577                "event_append_failed",
2578                e.to_string(),
2579                "registry:task_runtime_clear",
2580            )
2581        })?;
2582        self.get_task(task_id)
2583    }
2584
2585    fn binary_available(binary: &str) -> bool {
2586        if binary.contains('/') {
2587            let path = PathBuf::from(binary);
2588            return path.exists();
2589        }
2590
2591        std::env::var_os("PATH").is_some_and(|paths| {
2592            std::env::split_paths(&paths).any(|dir| {
2593                let candidate = dir.join(binary);
2594                candidate.exists() && candidate.is_file()
2595            })
2596        })
2597    }
2598
2599    fn health_for_runtime(
2600        runtime: &ProjectRuntimeConfig,
2601        target: Option<String>,
2602    ) -> RuntimeHealthStatus {
2603        match Self::build_runtime_adapter(runtime.clone()) {
2604            Ok(mut adapter) => match adapter.initialize() {
2605                Ok(()) => RuntimeHealthStatus {
2606                    adapter_name: runtime.adapter_name.clone(),
2607                    binary_path: runtime.binary_path.clone(),
2608                    healthy: true,
2609                    target,
2610                    details: None,
2611                },
2612                Err(e) => RuntimeHealthStatus {
2613                    adapter_name: runtime.adapter_name.clone(),
2614                    binary_path: runtime.binary_path.clone(),
2615                    healthy: false,
2616                    target,
2617                    details: Some(format!("{}: {}", e.code, e.message)),
2618                },
2619            },
2620            Err(e) => RuntimeHealthStatus {
2621                adapter_name: runtime.adapter_name.clone(),
2622                binary_path: runtime.binary_path.clone(),
2623                healthy: false,
2624                target,
2625                details: Some(format!("{}: {}", e.code, e.message)),
2626            },
2627        }
2628    }
2629
2630    fn build_runtime_adapter(runtime: ProjectRuntimeConfig) -> Result<SelectedRuntimeAdapter> {
2631        let timeout = Duration::from_millis(runtime.timeout_ms);
2632        match runtime.adapter_name.as_str() {
2633            "opencode" => {
2634                let mut cfg = OpenCodeConfig::new(PathBuf::from(runtime.binary_path));
2635                cfg.model = runtime.model.clone().or(cfg.model);
2636                cfg.base.args = runtime.args;
2637                cfg.base.env = runtime.env;
2638                cfg.base.timeout = timeout;
2639                Ok(SelectedRuntimeAdapter::OpenCode(
2640                    crate::adapters::opencode::OpenCodeAdapter::new(cfg),
2641                ))
2642            }
2643            "codex" => {
2644                let mut cfg = CodexConfig::new(PathBuf::from(runtime.binary_path));
2645                cfg.model = runtime.model;
2646                cfg.base.args = if runtime.args.is_empty() {
2647                    CodexConfig::default().base.args
2648                } else {
2649                    runtime.args
2650                };
2651                cfg.base.env = runtime.env;
2652                cfg.base.timeout = timeout;
2653                Ok(SelectedRuntimeAdapter::Codex(CodexAdapter::new(cfg)))
2654            }
2655            "claude-code" => {
2656                let mut cfg = ClaudeCodeConfig::new(PathBuf::from(runtime.binary_path));
2657                cfg.model = runtime.model;
2658                cfg.base.args = if runtime.args.is_empty() {
2659                    ClaudeCodeConfig::default().base.args
2660                } else {
2661                    runtime.args
2662                };
2663                cfg.base.env = runtime.env;
2664                cfg.base.timeout = timeout;
2665                Ok(SelectedRuntimeAdapter::ClaudeCode(ClaudeCodeAdapter::new(
2666                    cfg,
2667                )))
2668            }
2669            "kilo" => {
2670                let mut cfg = KiloConfig::new(PathBuf::from(runtime.binary_path));
2671                cfg.model = runtime.model;
2672                cfg.base.args = runtime.args;
2673                cfg.base.env = runtime.env;
2674                cfg.base.timeout = timeout;
2675                Ok(SelectedRuntimeAdapter::Kilo(KiloAdapter::new(cfg)))
2676            }
2677            _ => Err(HivemindError::user(
2678                "unsupported_runtime",
2679                format!("Unsupported runtime adapter '{}'", runtime.adapter_name),
2680                "registry:build_runtime_adapter",
2681            )),
2682        }
2683    }
2684
2685    #[allow(clippy::too_many_lines)]
2686    fn tick_flow_once(
2687        &self,
2688        flow_id: &str,
2689        interactive: bool,
2690        preferred_task: Option<Uuid>,
2691    ) -> Result<TaskFlow> {
2692        let flow = self.get_flow(flow_id)?;
2693        if flow.state != FlowState::Running {
2694            return Err(HivemindError::user(
2695                "flow_not_running",
2696                "Flow is not in running state",
2697                "registry:tick_flow",
2698            ));
2699        }
2700
2701        let state = self.state()?;
2702        let graph = state.graphs.get(&flow.graph_id).ok_or_else(|| {
2703            HivemindError::system("graph_not_found", "Graph not found", "registry:tick_flow")
2704        })?;
2705
2706        let mut verifying = flow.tasks_in_state(TaskExecState::Verifying);
2707        verifying.sort();
2708        if let Some(task_id) = verifying.first().copied() {
2709            return self.process_verifying_task(flow_id, task_id);
2710        }
2711
2712        let mut newly_ready = Vec::new();
2713        let mut newly_blocked: Vec<(Uuid, String)> = Vec::new();
2714        for task_id in graph.tasks.keys() {
2715            let Some(exec) = flow.task_executions.get(task_id) else {
2716                continue;
2717            };
2718            if exec.state != TaskExecState::Pending {
2719                continue;
2720            }
2721
2722            let deps_satisfied = graph.dependencies.get(task_id).is_none_or(|deps| {
2723                deps.iter().all(|dep| {
2724                    flow.task_executions
2725                        .get(dep)
2726                        .is_some_and(|e| e.state == TaskExecState::Success)
2727                })
2728            });
2729
2730            if deps_satisfied {
2731                newly_ready.push(*task_id);
2732            } else {
2733                let mut missing: Vec<Uuid> = graph
2734                    .dependencies
2735                    .get(task_id)
2736                    .map(|deps| {
2737                        deps.iter()
2738                            .filter(|dep| {
2739                                flow.task_executions
2740                                    .get(dep)
2741                                    .is_none_or(|e| e.state != TaskExecState::Success)
2742                            })
2743                            .copied()
2744                            .collect::<Vec<_>>()
2745                    })
2746                    .unwrap_or_default();
2747                missing.sort();
2748
2749                let preview = missing
2750                    .iter()
2751                    .take(5)
2752                    .map(ToString::to_string)
2753                    .collect::<Vec<_>>()
2754                    .join(", ");
2755                let reason = if missing.len() <= 5 {
2756                    format!("Waiting on dependencies: {preview}")
2757                } else {
2758                    format!(
2759                        "Waiting on dependencies: {preview} (+{} more)",
2760                        missing.len().saturating_sub(5)
2761                    )
2762                };
2763
2764                if exec.blocked_reason.as_deref() != Some(reason.as_str()) {
2765                    newly_blocked.push((*task_id, reason));
2766                }
2767            }
2768        }
2769
2770        for (task_id, reason) in newly_blocked {
2771            let event = Event::new(
2772                EventPayload::TaskBlocked {
2773                    flow_id: flow.id,
2774                    task_id,
2775                    reason: Some(reason),
2776                },
2777                CorrelationIds::for_graph_flow_task(
2778                    flow.project_id,
2779                    flow.graph_id,
2780                    flow.id,
2781                    task_id,
2782                ),
2783            );
2784            self.store.append(event).map_err(|e| {
2785                HivemindError::system("event_append_failed", e.to_string(), "registry:tick_flow")
2786            })?;
2787        }
2788
2789        for task_id in newly_ready {
2790            let event = Event::new(
2791                EventPayload::TaskReady {
2792                    flow_id: flow.id,
2793                    task_id,
2794                },
2795                CorrelationIds::for_graph_flow_task(
2796                    flow.project_id,
2797                    flow.graph_id,
2798                    flow.id,
2799                    task_id,
2800                ),
2801            );
2802            self.store.append(event).map_err(|e| {
2803                HivemindError::system("event_append_failed", e.to_string(), "registry:tick_flow")
2804            })?;
2805        }
2806
2807        let flow = self.get_flow(flow_id)?;
2808        let mut retrying = flow.tasks_in_state(TaskExecState::Retry);
2809        retrying.sort();
2810        let mut ready = flow.tasks_in_state(TaskExecState::Ready);
2811        ready.sort();
2812
2813        let preferred =
2814            preferred_task.filter(|task_id| retrying.contains(task_id) || ready.contains(task_id));
2815        let task_to_run =
2816            preferred.or_else(|| retrying.first().copied().or_else(|| ready.first().copied()));
2817
2818        let Some(task_id) = task_to_run else {
2819            let all_success = flow
2820                .task_executions
2821                .values()
2822                .all(|e| e.state == TaskExecState::Success);
2823            if all_success {
2824                let event = Event::new(
2825                    EventPayload::TaskFlowCompleted { flow_id: flow.id },
2826                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
2827                );
2828                self.store.append(event).map_err(|e| {
2829                    HivemindError::system(
2830                        "event_append_failed",
2831                        e.to_string(),
2832                        "registry:tick_flow",
2833                    )
2834                })?;
2835                return self.get_flow(flow_id);
2836            }
2837
2838            return Ok(flow);
2839        };
2840
2841        let project = state.projects.get(&flow.project_id).ok_or_else(|| {
2842            HivemindError::system(
2843                "project_not_found",
2844                format!("Project '{}' not found", flow.project_id),
2845                "registry:tick_flow",
2846            )
2847        })?;
2848
2849        let project_runtime = project.runtime.clone().ok_or_else(|| {
2850            HivemindError::new(
2851                ErrorCategory::Runtime,
2852                "runtime_not_configured",
2853                "Project has no runtime configured",
2854                "registry:tick_flow",
2855            )
2856        })?;
2857        let task_runtime_override = state
2858            .tasks
2859            .get(&task_id)
2860            .and_then(|task| task.runtime_override.clone());
2861        let runtime = task_runtime_override.map_or_else(
2862            || project_runtime.clone(),
2863            |ovr| ProjectRuntimeConfig {
2864                adapter_name: ovr.adapter_name,
2865                binary_path: ovr.binary_path,
2866                model: ovr.model,
2867                args: ovr.args,
2868                env: ovr.env,
2869                timeout_ms: ovr.timeout_ms,
2870                max_parallel_tasks: project_runtime.max_parallel_tasks,
2871            },
2872        );
2873
2874        let worktree_status =
2875            Self::ensure_task_worktree(&flow, &state, task_id, "registry:tick_flow")?;
2876        let repo_worktrees =
2877            Self::inspect_task_worktrees(&flow, &state, task_id, "registry:tick_flow")?;
2878
2879        let exec = flow.task_executions.get(&task_id).ok_or_else(|| {
2880            HivemindError::system(
2881                "task_exec_not_found",
2882                "Task execution not found",
2883                "registry:tick_flow",
2884            )
2885        })?;
2886
2887        if exec.state == TaskExecState::Retry && exec.retry_mode == RetryMode::Clean {
2888            let branch = format!("exec/{}/{task_id}", flow.id);
2889            for (idx, (_repo_name, repo_worktree)) in repo_worktrees.iter().enumerate() {
2890                let managers =
2891                    Self::worktree_managers_for_flow(&flow, &state, "registry:tick_flow")?;
2892                let (_, manager) = &managers[idx];
2893                let base = Self::default_base_ref_for_repo(&flow, manager, idx == 0);
2894                Self::checkout_and_clean_worktree(
2895                    &repo_worktree.path,
2896                    &branch,
2897                    &base,
2898                    "registry:tick_flow",
2899                )?;
2900            }
2901        }
2902
2903        let next_attempt_number = exec.attempt_count.saturating_add(1);
2904
2905        // Ensure this worktree contains the latest changes from dependency tasks.
2906        // Each task runs in its own worktree/branch (`exec/<flow>/<task>`), so dependent
2907        // tasks must merge dependency branch heads to see upstream work.
2908        if let Some(deps) = graph.dependencies.get(&task_id) {
2909            let mut dep_ids: Vec<Uuid> = deps.iter().copied().collect();
2910            dep_ids.sort();
2911
2912            for (_repo_name, repo_worktree) in &repo_worktrees {
2913                for &dep_task_id in &dep_ids {
2914                    let dep_branch = format!("exec/{}/{dep_task_id}", flow.id);
2915                    let dep_ref = format!("refs/heads/{dep_branch}");
2916
2917                    let ref_exists = std::process::Command::new("git")
2918                        .current_dir(&repo_worktree.path)
2919                        .args(["show-ref", "--verify", "--quiet", &dep_ref])
2920                        .status()
2921                        .map(|s| s.success())
2922                        .unwrap_or(false);
2923
2924                    if !ref_exists {
2925                        continue;
2926                    }
2927
2928                    let already_contains = std::process::Command::new("git")
2929                        .current_dir(&repo_worktree.path)
2930                        .args(["merge-base", "--is-ancestor", &dep_branch, "HEAD"])
2931                        .status()
2932                        .map(|s| s.success())
2933                        .unwrap_or(false);
2934
2935                    if already_contains {
2936                        continue;
2937                    }
2938
2939                    let merge = std::process::Command::new("git")
2940                        .current_dir(&repo_worktree.path)
2941                        .args([
2942                            "-c",
2943                            "user.name=Hivemind",
2944                            "-c",
2945                            "user.email=hivemind@example.com",
2946                            "merge",
2947                            "--no-edit",
2948                            &dep_branch,
2949                        ])
2950                        .output()
2951                        .map_err(|e| {
2952                            HivemindError::system(
2953                                "git_merge_failed",
2954                                e.to_string(),
2955                                "registry:tick_flow",
2956                            )
2957                        })?;
2958
2959                    if !merge.status.success() {
2960                        let _ = std::process::Command::new("git")
2961                            .current_dir(&repo_worktree.path)
2962                            .args(["merge", "--abort"])
2963                            .output();
2964                        return Err(HivemindError::git(
2965                            "merge_failed",
2966                            String::from_utf8_lossy(&merge.stderr).to_string(),
2967                            "registry:tick_flow",
2968                        ));
2969                    }
2970                }
2971            }
2972        }
2973
2974        // Use the canonical task lifecycle so we persist baselines, compute diffs, and create
2975        // checkpoint commits. This is required for dependency propagation.
2976        let attempt_id = self.start_task_execution(&task_id.to_string())?;
2977        let attempt_corr = CorrelationIds::for_graph_flow_task_attempt(
2978            flow.project_id,
2979            flow.graph_id,
2980            flow.id,
2981            task_id,
2982            attempt_id,
2983        );
2984
2985        let task = graph.tasks.get(&task_id).ok_or_else(|| {
2986            HivemindError::system(
2987                "task_not_found",
2988                "Task not found in graph",
2989                "registry:tick_flow",
2990            )
2991        })?;
2992
2993        let max_attempts = task.retry_policy.max_retries.saturating_add(1);
2994        let checkpoint_ids = Self::normalized_checkpoint_ids(&task.checkpoints);
2995
2996        let checkpoint_help = if checkpoint_ids.is_empty() {
2997            None
2998        } else {
2999            Some(format!(
3000                "Execution checkpoints (in order): {}\nComplete checkpoints from the runtime using: \"$HIVEMIND_BIN\" checkpoint complete --id <checkpoint-id> [--summary \"...\"]\n(If available, \"$HIVEMIND_AGENT_BIN\" may be used equivalently.)\nAttempt ID for this run: {attempt_id}",
3001                checkpoint_ids.join(", ")
3002            ))
3003        };
3004        let repo_context = format!(
3005            "Multi-repo worktrees for this attempt:\n{}",
3006            repo_worktrees
3007                .iter()
3008                .map(|(name, wt)| format!("- {name}: {}", wt.path.display()))
3009                .collect::<Vec<_>>()
3010                .join("\n")
3011        );
3012
3013        let (retry_context, prior_attempts) = if next_attempt_number > 1 {
3014            let (ctx, priors, ids, req, opt, ec, term) = self.build_retry_context(
3015                &state,
3016                &flow,
3017                task_id,
3018                next_attempt_number,
3019                max_attempts,
3020                "registry:tick_flow",
3021            )?;
3022
3023            self.append_event(
3024                Event::new(
3025                    EventPayload::RetryContextAssembled {
3026                        flow_id: flow.id,
3027                        task_id,
3028                        attempt_id,
3029                        attempt_number: next_attempt_number,
3030                        max_attempts,
3031                        prior_attempt_ids: ids,
3032                        required_check_failures: req,
3033                        optional_check_failures: opt,
3034                        runtime_exit_code: ec,
3035                        runtime_terminated_reason: term,
3036                        context: ctx.clone(),
3037                    },
3038                    attempt_corr.clone(),
3039                ),
3040                "registry:tick_flow",
3041            )?;
3042
3043            let context = checkpoint_help.as_ref().map_or_else(
3044                || format!("{ctx}\n\n{repo_context}"),
3045                |checkpoint_text| format!("{ctx}\n\n{repo_context}\n\n{checkpoint_text}"),
3046            );
3047
3048            (Some(context), priors)
3049        } else {
3050            let context = match checkpoint_help {
3051                Some(text) => format!("{repo_context}\n\n{text}"),
3052                None => repo_context,
3053            };
3054            (Some(context), Vec::new())
3055        };
3056
3057        self.store
3058            .append(Event::new(
3059                EventPayload::RuntimeStarted {
3060                    adapter_name: runtime.adapter_name.clone(),
3061                    task_id,
3062                    attempt_id,
3063                },
3064                attempt_corr.clone(),
3065            ))
3066            .map_err(|e| {
3067                HivemindError::system("event_append_failed", e.to_string(), "registry:tick_flow")
3068            })?;
3069
3070        let mut runtime_for_adapter = runtime;
3071
3072        let target_dir = self
3073            .config
3074            .data_dir
3075            .join("cargo-target")
3076            .join(flow.id.to_string())
3077            .join(task_id.to_string())
3078            .join(attempt_id.to_string());
3079        let _ = fs::create_dir_all(&target_dir);
3080        runtime_for_adapter
3081            .env
3082            .entry("CARGO_TARGET_DIR".to_string())
3083            .or_insert_with(|| target_dir.to_string_lossy().to_string());
3084        runtime_for_adapter
3085            .env
3086            .insert("HIVEMIND_ATTEMPT_ID".to_string(), attempt_id.to_string());
3087        runtime_for_adapter
3088            .env
3089            .insert("HIVEMIND_TASK_ID".to_string(), task_id.to_string());
3090        runtime_for_adapter
3091            .env
3092            .insert("HIVEMIND_FLOW_ID".to_string(), flow.id.to_string());
3093        runtime_for_adapter.env.insert(
3094            "HIVEMIND_PRIMARY_WORKTREE".to_string(),
3095            worktree_status.path.to_string_lossy().to_string(),
3096        );
3097        runtime_for_adapter.env.insert(
3098            "HIVEMIND_ALL_WORKTREES".to_string(),
3099            repo_worktrees
3100                .iter()
3101                .map(|(name, wt)| format!("{name}={}", wt.path.display()))
3102                .collect::<Vec<_>>()
3103                .join(";"),
3104        );
3105        for (repo_name, wt) in &repo_worktrees {
3106            let env_key = format!(
3107                "HIVEMIND_REPO_{}_WORKTREE",
3108                repo_name
3109                    .chars()
3110                    .map(|c| if c.is_ascii_alphanumeric() {
3111                        c.to_ascii_uppercase()
3112                    } else {
3113                        '_'
3114                    })
3115                    .collect::<String>()
3116            );
3117            runtime_for_adapter
3118                .env
3119                .insert(env_key, wt.path.to_string_lossy().to_string());
3120        }
3121        if let Ok(bin) = std::env::current_exe() {
3122            let hivemind_bin = bin.to_string_lossy().to_string();
3123            runtime_for_adapter
3124                .env
3125                .insert("HIVEMIND_BIN".to_string(), hivemind_bin);
3126
3127            let agent_path = bin
3128                .parent()
3129                .map(|p| p.join("hivemind-agent"))
3130                .filter(|p| p.exists())
3131                .unwrap_or(bin);
3132            runtime_for_adapter.env.insert(
3133                "HIVEMIND_AGENT_BIN".to_string(),
3134                agent_path.to_string_lossy().to_string(),
3135            );
3136        }
3137
3138        let mut adapter = Self::build_runtime_adapter(runtime_for_adapter.clone())?;
3139        if let Err(e) = adapter.initialize() {
3140            let reason = format!("{}: {}", e.code, e.message);
3141            self.store
3142                .append(Event::new(
3143                    EventPayload::RuntimeTerminated { attempt_id, reason },
3144                    attempt_corr,
3145                ))
3146                .map_err(|err| {
3147                    HivemindError::system(
3148                        "event_append_failed",
3149                        err.to_string(),
3150                        "registry:tick_flow",
3151                    )
3152                })?;
3153            let _ = self.fail_running_attempt(
3154                &flow,
3155                task_id,
3156                attempt_id,
3157                "runtime_initialize_failed",
3158                "registry:tick_flow",
3159            );
3160            return self.get_flow(flow_id);
3161        }
3162        if let Err(e) = adapter.prepare(task_id, &worktree_status.path) {
3163            let reason = format!("{}: {}", e.code, e.message);
3164            self.store
3165                .append(Event::new(
3166                    EventPayload::RuntimeTerminated { attempt_id, reason },
3167                    attempt_corr,
3168                ))
3169                .map_err(|err| {
3170                    HivemindError::system(
3171                        "event_append_failed",
3172                        err.to_string(),
3173                        "registry:tick_flow",
3174                    )
3175                })?;
3176            let _ = self.fail_running_attempt(
3177                &flow,
3178                task_id,
3179                attempt_id,
3180                "runtime_prepare_failed",
3181                "registry:tick_flow",
3182            );
3183            return self.get_flow(flow_id);
3184        }
3185
3186        let input = ExecutionInput {
3187            task_description: task
3188                .description
3189                .clone()
3190                .unwrap_or_else(|| task.title.clone()),
3191            success_criteria: task.criteria.description.clone(),
3192            context: retry_context,
3193            prior_attempts,
3194            verifier_feedback: None,
3195        };
3196
3197        let mut runtime_projector = RuntimeEventProjector::new();
3198
3199        let (report, terminated_reason) = if interactive {
3200            let mut stdout = std::io::stdout();
3201
3202            let res = adapter.execute_interactive(&input, |evt| {
3203                match evt {
3204                    InteractiveAdapterEvent::Output { content } => {
3205                        let chunk = content;
3206                        let _ = stdout.write_all(chunk.as_bytes());
3207                        let _ = stdout.flush();
3208                        let event = Event::new(
3209                            EventPayload::RuntimeOutputChunk {
3210                                attempt_id,
3211                                stream: RuntimeOutputStream::Stdout,
3212                                content: chunk.clone(),
3213                            },
3214                            attempt_corr.clone(),
3215                        );
3216                        self.store.append(event).map_err(|e| e.to_string())?;
3217                        let _ = self.append_projected_runtime_observations(
3218                            attempt_id,
3219                            &attempt_corr,
3220                            runtime_projector.observe_chunk(RuntimeOutputStream::Stdout, &chunk),
3221                            "registry:tick_flow",
3222                        );
3223                    }
3224                    InteractiveAdapterEvent::Input { content } => {
3225                        let event = Event::new(
3226                            EventPayload::RuntimeInputProvided {
3227                                attempt_id,
3228                                content,
3229                            },
3230                            attempt_corr.clone(),
3231                        );
3232                        self.store.append(event).map_err(|e| e.to_string())?;
3233                    }
3234                    InteractiveAdapterEvent::Interrupted => {
3235                        let event = Event::new(
3236                            EventPayload::RuntimeInterrupted { attempt_id },
3237                            attempt_corr.clone(),
3238                        );
3239                        self.store.append(event).map_err(|e| e.to_string())?;
3240                    }
3241                }
3242                Ok(())
3243            });
3244
3245            match res {
3246                Ok(r) => (r.report, r.terminated_reason),
3247                Err(e) => {
3248                    let reason = format!("{}: {}", e.code, e.message);
3249                    self.store
3250                        .append(Event::new(
3251                            EventPayload::RuntimeTerminated { attempt_id, reason },
3252                            attempt_corr,
3253                        ))
3254                        .map_err(|err| {
3255                            HivemindError::system(
3256                                "event_append_failed",
3257                                err.to_string(),
3258                                "registry:tick_flow",
3259                            )
3260                        })?;
3261                    let _ = self.fail_running_attempt(
3262                        &flow,
3263                        task_id,
3264                        attempt_id,
3265                        "runtime_execution_failed",
3266                        "registry:tick_flow",
3267                    );
3268                    return self.get_flow(flow_id);
3269                }
3270            }
3271        } else {
3272            let report = match adapter.execute(input) {
3273                Ok(r) => r,
3274                Err(e) => {
3275                    let reason = format!("{}: {}", e.code, e.message);
3276                    self.store
3277                        .append(Event::new(
3278                            EventPayload::RuntimeTerminated { attempt_id, reason },
3279                            attempt_corr,
3280                        ))
3281                        .map_err(|err| {
3282                            HivemindError::system(
3283                                "event_append_failed",
3284                                err.to_string(),
3285                                "registry:tick_flow",
3286                            )
3287                        })?;
3288                    let _ = self.fail_running_attempt(
3289                        &flow,
3290                        task_id,
3291                        attempt_id,
3292                        "runtime_execution_failed",
3293                        "registry:tick_flow",
3294                    );
3295                    return self.get_flow(flow_id);
3296                }
3297            };
3298            (report, None)
3299        };
3300
3301        // Best-effort filesystem observed based on the persisted baseline.
3302        if let Ok(state) = self.state() {
3303            if let Some(attempt) = state.attempts.get(&attempt_id) {
3304                if let Some(baseline_id) = attempt.baseline_id {
3305                    if let Ok(baseline) = self.read_baseline_artifact(baseline_id) {
3306                        if let Ok(diff) = Diff::compute(&baseline, &worktree_status.path) {
3307                            let created = diff
3308                                .changes
3309                                .iter()
3310                                .filter(|c| c.change_type == ChangeType::Created)
3311                                .map(|c| c.path.clone())
3312                                .collect();
3313                            let modified = diff
3314                                .changes
3315                                .iter()
3316                                .filter(|c| c.change_type == ChangeType::Modified)
3317                                .map(|c| c.path.clone())
3318                                .collect();
3319                            let deleted = diff
3320                                .changes
3321                                .iter()
3322                                .filter(|c| c.change_type == ChangeType::Deleted)
3323                                .map(|c| c.path.clone())
3324                                .collect();
3325
3326                            let fs_event = Event::new(
3327                                EventPayload::RuntimeFilesystemObserved {
3328                                    attempt_id,
3329                                    files_created: created,
3330                                    files_modified: modified,
3331                                    files_deleted: deleted,
3332                                },
3333                                attempt_corr.clone(),
3334                            );
3335                            let _ = self.store.append(fs_event);
3336                        }
3337                    }
3338                }
3339            }
3340        }
3341
3342        if !interactive {
3343            for chunk in report.stdout.lines() {
3344                let content = chunk.to_string();
3345                let event = Event::new(
3346                    EventPayload::RuntimeOutputChunk {
3347                        attempt_id,
3348                        stream: RuntimeOutputStream::Stdout,
3349                        content: content.clone(),
3350                    },
3351                    attempt_corr.clone(),
3352                );
3353                self.store.append(event).map_err(|e| {
3354                    HivemindError::system(
3355                        "event_append_failed",
3356                        e.to_string(),
3357                        "registry:tick_flow",
3358                    )
3359                })?;
3360
3361                let _ = self.append_projected_runtime_observations(
3362                    attempt_id,
3363                    &attempt_corr,
3364                    runtime_projector
3365                        .observe_chunk(RuntimeOutputStream::Stdout, &format!("{content}\n")),
3366                    "registry:tick_flow",
3367                );
3368            }
3369            for chunk in report.stderr.lines() {
3370                let content = chunk.to_string();
3371                let event = Event::new(
3372                    EventPayload::RuntimeOutputChunk {
3373                        attempt_id,
3374                        stream: RuntimeOutputStream::Stderr,
3375                        content: content.clone(),
3376                    },
3377                    attempt_corr.clone(),
3378                );
3379                self.store.append(event).map_err(|e| {
3380                    HivemindError::system(
3381                        "event_append_failed",
3382                        e.to_string(),
3383                        "registry:tick_flow",
3384                    )
3385                })?;
3386
3387                let _ = self.append_projected_runtime_observations(
3388                    attempt_id,
3389                    &attempt_corr,
3390                    runtime_projector
3391                        .observe_chunk(RuntimeOutputStream::Stderr, &format!("{content}\n")),
3392                    "registry:tick_flow",
3393                );
3394            }
3395        }
3396
3397        let _ = self.append_projected_runtime_observations(
3398            attempt_id,
3399            &attempt_corr,
3400            runtime_projector.flush(),
3401            "registry:tick_flow",
3402        );
3403
3404        if let Some(reason) = terminated_reason {
3405            let _ = self.store.append(Event::new(
3406                EventPayload::RuntimeTerminated { attempt_id, reason },
3407                attempt_corr.clone(),
3408            ));
3409        }
3410
3411        let duration_ms = u64::try_from(report.duration.as_millis().min(u128::from(u64::MAX)))
3412            .unwrap_or(u64::MAX);
3413        let exited_event = Event::new(
3414            EventPayload::RuntimeExited {
3415                attempt_id,
3416                exit_code: report.exit_code,
3417                duration_ms,
3418            },
3419            attempt_corr,
3420        );
3421        self.store.append(exited_event).map_err(|e| {
3422            HivemindError::system("event_append_failed", e.to_string(), "registry:tick_flow")
3423        })?;
3424
3425        if report.exit_code != 0 {
3426            let _ = self.fail_running_attempt(
3427                &flow,
3428                task_id,
3429                attempt_id,
3430                "runtime_nonzero_exit",
3431                "registry:tick_flow",
3432            );
3433            return self.get_flow(flow_id);
3434        }
3435
3436        self.complete_task_execution(&task_id.to_string())?;
3437        self.process_verifying_task(flow_id, task_id)
3438    }
3439
3440    #[allow(clippy::too_many_lines)]
3441    pub fn tick_flow(
3442        &self,
3443        flow_id: &str,
3444        interactive: bool,
3445        max_parallel: Option<u16>,
3446    ) -> Result<TaskFlow> {
3447        if interactive {
3448            return Err(HivemindError::user(
3449                "interactive_mode_deprecated",
3450                "Interactive mode is deprecated and no longer supported",
3451                "registry:tick_flow",
3452            )
3453            .with_hint("Re-run without --interactive"));
3454        }
3455
3456        let flow = self.get_flow(flow_id)?;
3457        if flow.state != FlowState::Running {
3458            return Err(HivemindError::user(
3459                "flow_not_running",
3460                "Flow is not in running state",
3461                "registry:tick_flow",
3462            ));
3463        }
3464
3465        let state = self.state()?;
3466        let project = state.projects.get(&flow.project_id).ok_or_else(|| {
3467            HivemindError::system(
3468                "project_not_found",
3469                format!("Project '{}' not found", flow.project_id),
3470                "registry:tick_flow",
3471            )
3472        })?;
3473
3474        let configured_limit = project
3475            .runtime
3476            .as_ref()
3477            .map_or(1_u16, |cfg| cfg.max_parallel_tasks.max(1));
3478        let requested_limit = max_parallel.unwrap_or(configured_limit);
3479        let global_limit = match env::var("HIVEMIND_MAX_PARALLEL_TASKS_GLOBAL") {
3480            Ok(raw) => Self::parse_global_parallel_limit(Some(raw))?,
3481            Err(env::VarError::NotPresent) => Self::parse_global_parallel_limit(None)?,
3482            Err(env::VarError::NotUnicode(_)) => {
3483                return Err(HivemindError::user(
3484                    "invalid_global_parallel_limit",
3485                    "HIVEMIND_MAX_PARALLEL_TASKS_GLOBAL must be valid UTF-8",
3486                    "registry:tick_flow",
3487                ))
3488            }
3489        };
3490
3491        let limit = requested_limit.min(global_limit);
3492        if limit == 0 {
3493            return Err(HivemindError::user(
3494                "invalid_max_parallel",
3495                "max_parallel must be at least 1",
3496                "registry:tick_flow",
3497            )
3498            .with_hint("Use --max-parallel 1 or higher"));
3499        }
3500
3501        let mut started_in_tick: Vec<(Uuid, Scope)> = Vec::new();
3502        let mut latest_flow = flow;
3503
3504        for _ in 0..usize::from(limit) {
3505            let snapshot = self.get_flow(flow_id)?;
3506            latest_flow = snapshot.clone();
3507            if snapshot.state != FlowState::Running {
3508                break;
3509            }
3510
3511            let mut verifying = snapshot.tasks_in_state(TaskExecState::Verifying);
3512            verifying.sort();
3513            if let Some(task_id) = verifying.first().copied() {
3514                latest_flow = self.process_verifying_task(flow_id, task_id)?;
3515                continue;
3516            }
3517
3518            let state = self.state()?;
3519            let graph = state.graphs.get(&snapshot.graph_id).ok_or_else(|| {
3520                HivemindError::system("graph_not_found", "Graph not found", "registry:tick_flow")
3521            })?;
3522
3523            let mut retrying = snapshot.tasks_in_state(TaskExecState::Retry);
3524            retrying.sort();
3525            let mut ready = snapshot.tasks_in_state(TaskExecState::Ready);
3526            ready.sort();
3527
3528            let mut candidates = retrying;
3529            for task_id in ready {
3530                if !candidates.contains(&task_id) {
3531                    candidates.push(task_id);
3532                }
3533            }
3534
3535            if candidates.is_empty() {
3536                latest_flow = self.tick_flow_once(flow_id, interactive, None)?;
3537                break;
3538            }
3539
3540            let mut active_scopes = started_in_tick.clone();
3541            let mut running = snapshot.tasks_in_state(TaskExecState::Running);
3542            running.sort();
3543            for running_id in running {
3544                if active_scopes.iter().any(|(id, _)| *id == running_id) {
3545                    continue;
3546                }
3547                if let Some(task) = graph.tasks.get(&running_id) {
3548                    active_scopes.push((running_id, task.scope.clone().unwrap_or_default()));
3549                }
3550            }
3551
3552            let mut chosen: Option<(Uuid, Scope)> = None;
3553
3554            for candidate_id in candidates {
3555                let Some(task) = graph.tasks.get(&candidate_id) else {
3556                    continue;
3557                };
3558
3559                let candidate_scope = task.scope.clone().unwrap_or_default();
3560                let mut hard_conflict: Option<(Uuid, String)> = None;
3561                let mut soft_conflict: Option<(Uuid, String)> = None;
3562
3563                for (other_id, other_scope) in &active_scopes {
3564                    if *other_id == candidate_id {
3565                        continue;
3566                    }
3567
3568                    match check_compatibility(&candidate_scope, other_scope) {
3569                        ScopeCompatibility::HardConflict => {
3570                            hard_conflict = Some((
3571                                *other_id,
3572                                format!(
3573                                    "Hard scope conflict with task {other_id}; serialized in this tick"
3574                                ),
3575                            ));
3576                            break;
3577                        }
3578                        ScopeCompatibility::SoftConflict => {
3579                            if soft_conflict.is_none() {
3580                                soft_conflict = Some((
3581                                    *other_id,
3582                                    format!(
3583                                        "Soft scope conflict with task {other_id}; allowing parallel attempt with warning"
3584                                    ),
3585                                ));
3586                            }
3587                        }
3588                        ScopeCompatibility::Compatible => {}
3589                    }
3590                }
3591
3592                if let Some((conflicting_task_id, reason)) = hard_conflict {
3593                    self.append_event(
3594                        Event::new(
3595                            EventPayload::ScopeConflictDetected {
3596                                flow_id: snapshot.id,
3597                                task_id: candidate_id,
3598                                conflicting_task_id,
3599                                severity: "hard_conflict".to_string(),
3600                                action: "serialized".to_string(),
3601                                reason: reason.clone(),
3602                            },
3603                            CorrelationIds::for_graph_flow_task(
3604                                snapshot.project_id,
3605                                snapshot.graph_id,
3606                                snapshot.id,
3607                                candidate_id,
3608                            ),
3609                        ),
3610                        "registry:tick_flow",
3611                    )?;
3612
3613                    self.append_event(
3614                        Event::new(
3615                            EventPayload::TaskSchedulingDeferred {
3616                                flow_id: snapshot.id,
3617                                task_id: candidate_id,
3618                                reason,
3619                            },
3620                            CorrelationIds::for_graph_flow_task(
3621                                snapshot.project_id,
3622                                snapshot.graph_id,
3623                                snapshot.id,
3624                                candidate_id,
3625                            ),
3626                        ),
3627                        "registry:tick_flow",
3628                    )?;
3629                    continue;
3630                }
3631
3632                if let Some((conflicting_task_id, reason)) = soft_conflict {
3633                    self.append_event(
3634                        Event::new(
3635                            EventPayload::ScopeConflictDetected {
3636                                flow_id: snapshot.id,
3637                                task_id: candidate_id,
3638                                conflicting_task_id,
3639                                severity: "soft_conflict".to_string(),
3640                                action: "warn_parallel".to_string(),
3641                                reason,
3642                            },
3643                            CorrelationIds::for_graph_flow_task(
3644                                snapshot.project_id,
3645                                snapshot.graph_id,
3646                                snapshot.id,
3647                                candidate_id,
3648                            ),
3649                        ),
3650                        "registry:tick_flow",
3651                    )?;
3652                }
3653
3654                chosen = Some((candidate_id, candidate_scope));
3655                break;
3656            }
3657
3658            let Some((task_id, scope)) = chosen else {
3659                break;
3660            };
3661
3662            started_in_tick.push((task_id, scope));
3663            latest_flow = self.tick_flow_once(flow_id, interactive, Some(task_id))?;
3664        }
3665
3666        Ok(latest_flow)
3667    }
3668
3669    fn parse_global_parallel_limit(raw: Option<String>) -> Result<u16> {
3670        let Some(raw) = raw else {
3671            return Ok(u16::MAX);
3672        };
3673
3674        let parsed = raw.parse::<u16>().map_err(|_| {
3675            HivemindError::user(
3676                "invalid_global_parallel_limit",
3677                format!(
3678                    "HIVEMIND_MAX_PARALLEL_TASKS_GLOBAL must be a positive integer, got '{raw}'"
3679                ),
3680                "registry:tick_flow",
3681            )
3682        })?;
3683
3684        if parsed == 0 {
3685            return Err(HivemindError::user(
3686                "invalid_global_parallel_limit",
3687                "HIVEMIND_MAX_PARALLEL_TASKS_GLOBAL must be at least 1",
3688                "registry:tick_flow",
3689            ));
3690        }
3691
3692        Ok(parsed)
3693    }
3694
3695    /// Returns the registry configuration.
3696    #[must_use]
3697    pub fn config(&self) -> &RegistryConfig {
3698        &self.config
3699    }
3700
3701    /// Attaches a repository to a project.
3702    ///
3703    /// # Errors
3704    /// Returns an error if the project is not found or the path is not a valid git repository.
3705    pub fn attach_repo(
3706        &self,
3707        id_or_name: &str,
3708        path: &str,
3709        name: Option<&str>,
3710        access_mode: RepoAccessMode,
3711    ) -> Result<Project> {
3712        let project = self
3713            .get_project(id_or_name)
3714            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
3715        let path = path.trim().trim_matches(|c| c == '"' || c == '\'').trim();
3716        let path_buf = std::path::PathBuf::from(path);
3717
3718        if path.is_empty() {
3719            let err = HivemindError::user(
3720                "invalid_repository_path",
3721                "Repository path cannot be empty",
3722                "registry:attach_repo",
3723            )
3724            .with_hint("Provide a valid filesystem path to a git repository");
3725            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3726            return Err(err);
3727        }
3728
3729        if !path_buf.exists() {
3730            let err = HivemindError::user(
3731                "repo_path_not_found",
3732                format!("Repository path '{path}' not found"),
3733                "registry:attach_repo",
3734            )
3735            .with_hint("Provide an existing path to a git repository");
3736            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3737            return Err(err);
3738        }
3739
3740        // Validate it's a git repository
3741        let git_dir = path_buf.join(".git");
3742        if !git_dir.exists() {
3743            let err = HivemindError::git(
3744                "not_a_git_repo",
3745                format!("'{path}' is not a git repository"),
3746                "registry:attach_repo",
3747            )
3748            .with_hint("Provide a path to a directory containing a .git folder");
3749            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3750            return Err(err);
3751        }
3752
3753        // Check if already attached
3754        let canonical_path = path_buf
3755            .canonicalize()
3756            .map_err(|e| {
3757                let err = HivemindError::system(
3758                    "path_canonicalize_failed",
3759                    e.to_string(),
3760                    "registry:attach_repo",
3761                );
3762                self.record_error_event(&err, CorrelationIds::for_project(project.id));
3763                err
3764            })?
3765            .to_string_lossy()
3766            .to_string();
3767
3768        if project
3769            .repositories
3770            .iter()
3771            .any(|r| r.path == canonical_path)
3772        {
3773            let err = HivemindError::user(
3774                "repo_already_attached",
3775                format!("Repository '{path}' is already attached to this project"),
3776                "registry:attach_repo",
3777            )
3778            .with_hint(
3779                "Use 'hivemind project inspect <project>' to view attached repos or detach the existing one with 'hivemind project detach-repo <project> <repo-name>'",
3780            );
3781            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3782            return Err(err);
3783        }
3784
3785        // Derive repo name from arg or path
3786        let repo_name = name
3787            .map(ToString::to_string)
3788            .or_else(|| {
3789                path_buf
3790                    .file_name()
3791                    .map(|n| n.to_string_lossy().to_string())
3792            })
3793            .unwrap_or_else(|| "repo".to_string());
3794
3795        if project.repositories.iter().any(|r| r.name == repo_name) {
3796            let err = HivemindError::user(
3797                "repo_name_already_attached",
3798                format!(
3799                    "Repository name '{repo_name}' is already attached to project '{}'",
3800                    project.name
3801                ),
3802                "registry:attach_repo",
3803            )
3804            .with_hint("Use --name to provide a different repository name");
3805            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3806            return Err(err);
3807        }
3808
3809        let event = Event::new(
3810            EventPayload::RepositoryAttached {
3811                project_id: project.id,
3812                path: canonical_path,
3813                name: repo_name,
3814                access_mode,
3815            },
3816            CorrelationIds::for_project(project.id),
3817        );
3818
3819        self.store.append(event).map_err(|e| {
3820            HivemindError::system("event_append_failed", e.to_string(), "registry:attach_repo")
3821        })?;
3822
3823        self.get_project(&project.id.to_string())
3824    }
3825
3826    /// Detaches a repository from a project.
3827    ///
3828    /// # Errors
3829    /// Returns an error if the project or repository is not found.
3830    pub fn detach_repo(&self, id_or_name: &str, repo_name: &str) -> Result<Project> {
3831        let project = self
3832            .get_project(id_or_name)
3833            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
3834
3835        let state = self.state()?;
3836        let has_active_flow = state.flows.values().any(|flow| {
3837            flow.project_id == project.id
3838                && !matches!(
3839                    flow.state,
3840                    FlowState::Completed | FlowState::Merged | FlowState::Aborted
3841                )
3842        });
3843        if has_active_flow {
3844            let err = HivemindError::user(
3845                "project_in_active_flow",
3846                "Cannot detach repositories while project has active flows",
3847                "registry:detach_repo",
3848            )
3849            .with_hint("Abort, complete, or merge all active flows before detaching repositories");
3850            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3851            return Err(err);
3852        }
3853
3854        // Check if repo exists
3855        if !project.repositories.iter().any(|r| r.name == repo_name) {
3856            let err = HivemindError::user(
3857                "repo_not_found",
3858                format!(
3859                    "Repository '{repo_name}' is not attached to project '{}'",
3860                    project.name
3861                ),
3862                "registry:detach_repo",
3863            )
3864            .with_hint("Use 'hivemind project inspect' to see attached repositories");
3865            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3866            return Err(err);
3867        }
3868
3869        let event = Event::new(
3870            EventPayload::RepositoryDetached {
3871                project_id: project.id,
3872                name: repo_name.to_string(),
3873            },
3874            CorrelationIds::for_project(project.id),
3875        );
3876
3877        self.store.append(event).map_err(|e| {
3878            HivemindError::system("event_append_failed", e.to_string(), "registry:detach_repo")
3879        })?;
3880
3881        self.get_project(&project.id.to_string())
3882    }
3883
3884    // ========== Task Management ==========
3885
3886    /// Creates a new task in a project.
3887    ///
3888    /// # Errors
3889    /// Returns an error if the project is not found.
3890    pub fn create_task(
3891        &self,
3892        project_id_or_name: &str,
3893        title: &str,
3894        description: Option<&str>,
3895        scope: Option<Scope>,
3896    ) -> Result<Task> {
3897        let project = self
3898            .get_project(project_id_or_name)
3899            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
3900
3901        if title.trim().is_empty() {
3902            let err = HivemindError::user(
3903                "invalid_task_title",
3904                "Task title cannot be empty",
3905                "registry:create_task",
3906            )
3907            .with_hint("Provide a non-empty task title");
3908            self.record_error_event(&err, CorrelationIds::for_project(project.id));
3909            return Err(err);
3910        }
3911
3912        let task_id = Uuid::new_v4();
3913        let event = Event::new(
3914            EventPayload::TaskCreated {
3915                id: task_id,
3916                project_id: project.id,
3917                title: title.to_string(),
3918                description: description.map(String::from),
3919                scope,
3920            },
3921            CorrelationIds::for_task(project.id, task_id),
3922        );
3923
3924        self.store.append(event).map_err(|e| {
3925            HivemindError::system("event_append_failed", e.to_string(), "registry:create_task")
3926        })?;
3927
3928        self.get_task(&task_id.to_string())
3929    }
3930
3931    /// Lists tasks in a project.
3932    ///
3933    /// # Errors
3934    /// Returns an error if the project is not found.
3935    pub fn list_tasks(
3936        &self,
3937        project_id_or_name: &str,
3938        state_filter: Option<TaskState>,
3939    ) -> Result<Vec<Task>> {
3940        let project = self.get_project(project_id_or_name)?;
3941        let state = self.state()?;
3942
3943        let mut tasks: Vec<_> = state
3944            .tasks
3945            .into_values()
3946            .filter(|t| t.project_id == project.id)
3947            .filter(|t| state_filter.is_none_or(|s| t.state == s))
3948            .collect();
3949
3950        tasks.sort_by(|a, b| a.created_at.cmp(&b.created_at));
3951        Ok(tasks)
3952    }
3953
3954    /// Gets a task by ID.
3955    ///
3956    /// # Errors
3957    /// Returns an error if the task is not found.
3958    pub fn get_task(&self, task_id: &str) -> Result<Task> {
3959        let id = Uuid::parse_str(task_id).map_err(|_| {
3960            HivemindError::user(
3961                "invalid_task_id",
3962                format!("'{task_id}' is not a valid task ID"),
3963                "registry:get_task",
3964            )
3965        })?;
3966
3967        let state = self.state()?;
3968        state.tasks.get(&id).cloned().ok_or_else(|| {
3969            HivemindError::user(
3970                "task_not_found",
3971                format!("Task '{task_id}' not found"),
3972                "registry:get_task",
3973            )
3974            .with_hint("Use 'hivemind task list <project>' to see available tasks")
3975        })
3976    }
3977
3978    /// Updates a task.
3979    ///
3980    /// # Errors
3981    /// Returns an error if the task is not found.
3982    pub fn update_task(
3983        &self,
3984        task_id: &str,
3985        title: Option<&str>,
3986        description: Option<&str>,
3987    ) -> Result<Task> {
3988        let task = self
3989            .get_task(task_id)
3990            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
3991
3992        if let Some(new_title) = title {
3993            if new_title.trim().is_empty() {
3994                let err = HivemindError::user(
3995                    "invalid_task_title",
3996                    "Task title cannot be empty",
3997                    "registry:update_task",
3998                )
3999                .with_hint("Provide a non-empty task title");
4000                self.record_error_event(&err, CorrelationIds::for_task(task.project_id, task.id));
4001                return Err(err);
4002            }
4003        }
4004
4005        let title = title.filter(|t| *t != task.title);
4006        let description = description.filter(|d| task.description.as_deref() != Some(*d));
4007
4008        if title.is_none() && description.is_none() {
4009            return Ok(task);
4010        }
4011
4012        let event = Event::new(
4013            EventPayload::TaskUpdated {
4014                id: task.id,
4015                title: title.map(String::from),
4016                description: description.map(String::from),
4017            },
4018            CorrelationIds::for_task(task.project_id, task.id),
4019        );
4020
4021        self.store.append(event).map_err(|e| {
4022            HivemindError::system("event_append_failed", e.to_string(), "registry:update_task")
4023        })?;
4024
4025        self.get_task(task_id)
4026    }
4027
4028    /// Closes a task.
4029    ///
4030    /// # Errors
4031    /// Returns an error if the task is not found or already closed.
4032    pub fn close_task(&self, task_id: &str, reason: Option<&str>) -> Result<Task> {
4033        let task = self
4034            .get_task(task_id)
4035            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
4036
4037        let state = self.state()?;
4038        let in_active_flow = state.flows.values().any(|f| {
4039            f.task_executions.contains_key(&task.id)
4040                && !matches!(
4041                    f.state,
4042                    FlowState::Completed | FlowState::Merged | FlowState::Aborted
4043                )
4044        });
4045        if in_active_flow {
4046            let err = HivemindError::user(
4047                "task_in_active_flow",
4048                "Task is part of an active flow",
4049                "registry:close_task",
4050            );
4051            self.record_error_event(&err, CorrelationIds::for_task(task.project_id, task.id));
4052            return Err(err);
4053        }
4054
4055        if task.state == TaskState::Closed {
4056            // Idempotent: closing an already closed task is a no-op.
4057            return Ok(task);
4058        }
4059
4060        let event = Event::new(
4061            EventPayload::TaskClosed {
4062                id: task.id,
4063                reason: reason.map(String::from),
4064            },
4065            CorrelationIds::for_task(task.project_id, task.id),
4066        );
4067
4068        self.store.append(event).map_err(|e| {
4069            HivemindError::system("event_append_failed", e.to_string(), "registry:close_task")
4070        })?;
4071
4072        self.get_task(task_id)
4073    }
4074
4075    pub fn get_graph(&self, graph_id: &str) -> Result<TaskGraph> {
4076        let id = Uuid::parse_str(graph_id).map_err(|_| {
4077            HivemindError::user(
4078                "invalid_graph_id",
4079                format!("'{graph_id}' is not a valid graph ID"),
4080                "registry:get_graph",
4081            )
4082        })?;
4083
4084        let state = self.state()?;
4085        state.graphs.get(&id).cloned().ok_or_else(|| {
4086            HivemindError::user(
4087                "graph_not_found",
4088                format!("Graph '{graph_id}' not found"),
4089                "registry:get_graph",
4090            )
4091        })
4092    }
4093
4094    pub fn create_graph(
4095        &self,
4096        project_id_or_name: &str,
4097        name: &str,
4098        from_tasks: &[Uuid],
4099    ) -> Result<TaskGraph> {
4100        let project = self
4101            .get_project(project_id_or_name)
4102            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
4103        let state = self.state()?;
4104
4105        let mut tasks_to_add = Vec::new();
4106        for tid in from_tasks {
4107            let task = state.tasks.get(tid).cloned().ok_or_else(|| {
4108                let err = HivemindError::user(
4109                    "task_not_found",
4110                    format!("Task '{tid}' not found"),
4111                    "registry:create_graph",
4112                );
4113                self.record_error_event(&err, CorrelationIds::for_project(project.id));
4114                err
4115            })?;
4116            if task.state != TaskState::Open {
4117                let err = HivemindError::user(
4118                    "task_not_open",
4119                    format!("Task '{tid}' is not open"),
4120                    "registry:create_graph",
4121                );
4122                self.record_error_event(&err, CorrelationIds::for_project(project.id));
4123                return Err(err);
4124            }
4125            tasks_to_add.push(task);
4126        }
4127
4128        let graph_id = Uuid::new_v4();
4129        let event = Event::new(
4130            EventPayload::TaskGraphCreated {
4131                graph_id,
4132                project_id: project.id,
4133                name: name.to_string(),
4134                description: None,
4135            },
4136            CorrelationIds::for_graph(project.id, graph_id),
4137        );
4138
4139        self.store.append(event).map_err(|e| {
4140            HivemindError::system(
4141                "event_append_failed",
4142                e.to_string(),
4143                "registry:create_graph",
4144            )
4145        })?;
4146
4147        for task in tasks_to_add {
4148            let graph_task = GraphTask {
4149                id: task.id,
4150                title: task.title,
4151                description: task.description,
4152                criteria: SuccessCriteria::new("Done"),
4153                retry_policy: RetryPolicy::default(),
4154                checkpoints: vec!["checkpoint-1".to_string()],
4155                scope: task.scope,
4156            };
4157            let event = Event::new(
4158                EventPayload::TaskAddedToGraph {
4159                    graph_id,
4160                    task: graph_task,
4161                },
4162                CorrelationIds::for_graph(project.id, graph_id),
4163            );
4164            self.store.append(event).map_err(|e| {
4165                HivemindError::system(
4166                    "event_append_failed",
4167                    e.to_string(),
4168                    "registry:create_graph",
4169                )
4170            })?;
4171        }
4172
4173        self.get_graph(&graph_id.to_string())
4174    }
4175
4176    #[allow(clippy::too_many_lines)]
4177    pub fn add_graph_dependency(
4178        &self,
4179        graph_id: &str,
4180        from_task: &str,
4181        to_task: &str,
4182    ) -> Result<TaskGraph> {
4183        let gid = Uuid::parse_str(graph_id).map_err(|_| {
4184            let err = HivemindError::user(
4185                "invalid_graph_id",
4186                format!("'{graph_id}' is not a valid graph ID"),
4187                "registry:add_graph_dependency",
4188            );
4189            self.record_error_event(&err, CorrelationIds::none());
4190            err
4191        })?;
4192        let from = Uuid::parse_str(from_task).map_err(|_| {
4193            let err = HivemindError::user(
4194                "invalid_task_id",
4195                format!("'{from_task}' is not a valid task ID"),
4196                "registry:add_graph_dependency",
4197            );
4198            self.record_error_event(&err, CorrelationIds::none());
4199            err
4200        })?;
4201        let to = Uuid::parse_str(to_task).map_err(|_| {
4202            let err = HivemindError::user(
4203                "invalid_task_id",
4204                format!("'{to_task}' is not a valid task ID"),
4205                "registry:add_graph_dependency",
4206            );
4207            self.record_error_event(&err, CorrelationIds::none());
4208            err
4209        })?;
4210
4211        let state = self.state()?;
4212        let graph = state.graphs.get(&gid).cloned().ok_or_else(|| {
4213            let err = HivemindError::user(
4214                "graph_not_found",
4215                format!("Graph '{graph_id}' not found"),
4216                "registry:add_graph_dependency",
4217            );
4218            self.record_error_event(&err, CorrelationIds::none());
4219            err
4220        })?;
4221
4222        if graph.state != GraphState::Draft {
4223            let locking_flow_id = state
4224                .flows
4225                .values()
4226                .filter(|flow| flow.graph_id == gid)
4227                .max_by_key(|flow| flow.updated_at)
4228                .map(|flow| flow.id);
4229            let message = locking_flow_id.map_or_else(
4230                || format!("Graph '{graph_id}' is immutable"),
4231                |flow_id| format!("Graph '{graph_id}' is immutable (locked by flow '{flow_id}')"),
4232            );
4233
4234            let mut err = HivemindError::user(
4235                "graph_immutable",
4236                message,
4237                "registry:add_graph_dependency",
4238            )
4239            .with_hint(
4240                "Create a new graph if you need additional dependencies, or modify task execution in the existing flow",
4241            );
4242            if let Some(flow_id) = locking_flow_id {
4243                err = err.with_context("locking_flow_id", flow_id.to_string());
4244            }
4245            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, gid));
4246            return Err(err);
4247        }
4248
4249        if !graph.tasks.contains_key(&from) || !graph.tasks.contains_key(&to) {
4250            let err = HivemindError::user(
4251                "task_not_in_graph",
4252                "One or more tasks are not in the graph",
4253                "registry:add_graph_dependency",
4254            )
4255            .with_hint("Ensure both task IDs were included when the graph was created");
4256            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, gid));
4257            return Err(err);
4258        }
4259
4260        if graph
4261            .dependencies
4262            .get(&to)
4263            .is_some_and(|deps| deps.contains(&from))
4264        {
4265            return Ok(graph);
4266        }
4267
4268        let mut graph_for_check = graph.clone();
4269        graph_for_check.add_dependency(to, from).map_err(|e| {
4270            let err = HivemindError::user(
4271                "cycle_detected",
4272                e.to_string(),
4273                "registry:add_graph_dependency",
4274            )
4275            .with_hint("Remove one dependency in the cycle and try again");
4276            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, gid));
4277            err
4278        })?;
4279
4280        let event = Event::new(
4281            EventPayload::DependencyAdded {
4282                graph_id: gid,
4283                from_task: from,
4284                to_task: to,
4285            },
4286            CorrelationIds::for_graph(graph.project_id, gid),
4287        );
4288
4289        self.store.append(event).map_err(|e| {
4290            HivemindError::system(
4291                "event_append_failed",
4292                e.to_string(),
4293                "registry:add_graph_dependency",
4294            )
4295        })?;
4296
4297        self.get_graph(graph_id)
4298    }
4299
4300    pub fn add_graph_task_check(
4301        &self,
4302        graph_id: &str,
4303        task_id: &str,
4304        check: crate::core::verification::CheckConfig,
4305    ) -> Result<TaskGraph> {
4306        let origin = "registry:add_graph_task_check";
4307        let gid = Uuid::parse_str(graph_id).map_err(|_| {
4308            let err = HivemindError::user(
4309                "invalid_graph_id",
4310                format!("'{graph_id}' is not a valid graph ID"),
4311                origin,
4312            );
4313            self.record_error_event(&err, CorrelationIds::none());
4314            err
4315        })?;
4316        let tid = Uuid::parse_str(task_id).map_err(|_| {
4317            let err = HivemindError::user(
4318                "invalid_task_id",
4319                format!("'{task_id}' is not a valid task ID"),
4320                origin,
4321            );
4322            self.record_error_event(&err, CorrelationIds::none());
4323            err
4324        })?;
4325
4326        let state = self.state()?;
4327        let graph = state.graphs.get(&gid).cloned().ok_or_else(|| {
4328            let err = HivemindError::user(
4329                "graph_not_found",
4330                format!("Graph '{graph_id}' not found"),
4331                origin,
4332            );
4333            self.record_error_event(&err, CorrelationIds::none());
4334            err
4335        })?;
4336        if graph.state != GraphState::Draft {
4337            let err = HivemindError::user(
4338                "graph_immutable",
4339                format!("Graph '{graph_id}' is immutable"),
4340                origin,
4341            )
4342            .with_hint("Checks can only be added to draft graphs");
4343            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, graph.id));
4344            return Err(err);
4345        }
4346        if !graph.tasks.contains_key(&tid) {
4347            let err = HivemindError::user(
4348                "task_not_in_graph",
4349                format!("Task '{task_id}' is not part of graph '{graph_id}'"),
4350                origin,
4351            );
4352            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, graph.id));
4353            return Err(err);
4354        }
4355
4356        let event = Event::new(
4357            EventPayload::GraphTaskCheckAdded {
4358                graph_id: gid,
4359                task_id: tid,
4360                check,
4361            },
4362            CorrelationIds::for_graph(graph.project_id, gid),
4363        );
4364        self.store
4365            .append(event)
4366            .map_err(|e| HivemindError::system("event_append_failed", e.to_string(), origin))?;
4367
4368        self.get_graph(graph_id)
4369    }
4370
4371    fn validate_graph_issues(graph: &TaskGraph) -> Vec<String> {
4372        fn has_cycle(graph: &TaskGraph) -> bool {
4373            use std::collections::HashSet;
4374
4375            fn visit(
4376                graph: &TaskGraph,
4377                node: Uuid,
4378                visited: &mut HashSet<Uuid>,
4379                stack: &mut HashSet<Uuid>,
4380            ) -> bool {
4381                if stack.contains(&node) {
4382                    return true;
4383                }
4384                if visited.contains(&node) {
4385                    return false;
4386                }
4387
4388                visited.insert(node);
4389                stack.insert(node);
4390
4391                if let Some(deps) = graph.dependencies.get(&node) {
4392                    for dep in deps {
4393                        if visit(graph, *dep, visited, stack) {
4394                            return true;
4395                        }
4396                    }
4397                }
4398
4399                stack.remove(&node);
4400                false
4401            }
4402
4403            let mut visited = HashSet::new();
4404            let mut stack = HashSet::new();
4405            for node in graph.tasks.keys() {
4406                if visit(graph, *node, &mut visited, &mut stack) {
4407                    return true;
4408                }
4409            }
4410            false
4411        }
4412
4413        if graph.tasks.is_empty() {
4414            return vec!["Graph must contain at least one task".to_string()];
4415        }
4416
4417        for (task_id, deps) in &graph.dependencies {
4418            if !graph.tasks.contains_key(task_id) {
4419                return vec![format!("Task not found: {task_id}")];
4420            }
4421            for dep in deps {
4422                if !graph.tasks.contains_key(dep) {
4423                    return vec![format!("Task not found: {dep}")];
4424                }
4425            }
4426        }
4427
4428        if has_cycle(graph) {
4429            return vec!["Cycle detected in task dependencies".to_string()];
4430        }
4431
4432        Vec::new()
4433    }
4434
4435    pub fn validate_graph(&self, graph_id: &str) -> Result<GraphValidationResult> {
4436        let graph = self.get_graph(graph_id)?;
4437        let issues = Self::validate_graph_issues(&graph);
4438        Ok(GraphValidationResult {
4439            graph_id: graph.id,
4440            valid: issues.is_empty(),
4441            issues,
4442        })
4443    }
4444
4445    pub fn list_flows(&self, project_id_or_name: Option<&str>) -> Result<Vec<TaskFlow>> {
4446        let project_filter = match project_id_or_name {
4447            Some(id_or_name) => Some(self.get_project(id_or_name)?.id),
4448            None => None,
4449        };
4450
4451        let state = self.state()?;
4452        let mut flows: Vec<_> = state
4453            .flows
4454            .into_values()
4455            .filter(|flow| project_filter.is_none_or(|pid| flow.project_id == pid))
4456            .collect();
4457        flows.sort_by(|a, b| a.updated_at.cmp(&b.updated_at));
4458        flows.reverse();
4459        Ok(flows)
4460    }
4461
4462    pub fn get_flow(&self, flow_id: &str) -> Result<TaskFlow> {
4463        let id = Uuid::parse_str(flow_id).map_err(|_| {
4464            HivemindError::user(
4465                "invalid_flow_id",
4466                format!("'{flow_id}' is not a valid flow ID"),
4467                "registry:get_flow",
4468            )
4469        })?;
4470
4471        let state = self.state()?;
4472        state.flows.get(&id).cloned().ok_or_else(|| {
4473            HivemindError::user(
4474                "flow_not_found",
4475                format!("Flow '{flow_id}' not found"),
4476                "registry:get_flow",
4477            )
4478        })
4479    }
4480
4481    pub fn create_flow(&self, graph_id: &str, name: Option<&str>) -> Result<TaskFlow> {
4482        let graph = self
4483            .get_graph(graph_id)
4484            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
4485        let issues = Self::validate_graph_issues(&graph);
4486
4487        if graph.state == GraphState::Draft {
4488            let valid = issues.is_empty();
4489            let event = Event::new(
4490                EventPayload::TaskGraphValidated {
4491                    graph_id: graph.id,
4492                    project_id: graph.project_id,
4493                    valid,
4494                    issues: issues.clone(),
4495                },
4496                CorrelationIds::for_graph(graph.project_id, graph.id),
4497            );
4498            self.store.append(event).map_err(|e| {
4499                HivemindError::system("event_append_failed", e.to_string(), "registry:create_flow")
4500            })?;
4501
4502            if valid {
4503                let event = Event::new(
4504                    EventPayload::TaskGraphLocked {
4505                        graph_id: graph.id,
4506                        project_id: graph.project_id,
4507                    },
4508                    CorrelationIds::for_graph(graph.project_id, graph.id),
4509                );
4510                self.store.append(event).map_err(|e| {
4511                    HivemindError::system(
4512                        "event_append_failed",
4513                        e.to_string(),
4514                        "registry:create_flow",
4515                    )
4516                })?;
4517            }
4518        }
4519
4520        if !issues.is_empty() {
4521            let err = HivemindError::user(
4522                "graph_invalid",
4523                "Graph validation failed",
4524                "registry:create_flow",
4525            )
4526            .with_context("graph_id", graph.id.to_string());
4527            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, graph.id));
4528            return Err(err);
4529        }
4530
4531        let state = self.state()?;
4532        let has_active = state.flows.values().any(|f| {
4533            f.graph_id == graph.id
4534                && !matches!(
4535                    f.state,
4536                    FlowState::Completed | FlowState::Merged | FlowState::Aborted
4537                )
4538        });
4539        if has_active {
4540            let err = HivemindError::user(
4541                "graph_in_use",
4542                "Graph already used by an active flow",
4543                "registry:create_flow",
4544            )
4545            .with_context("graph_id", graph.id.to_string());
4546            self.record_error_event(&err, CorrelationIds::for_graph(graph.project_id, graph.id));
4547            return Err(err);
4548        }
4549
4550        let flow_id = Uuid::new_v4();
4551        let task_ids: Vec<Uuid> = graph.tasks.keys().copied().collect();
4552        let event = Event::new(
4553            EventPayload::TaskFlowCreated {
4554                flow_id,
4555                graph_id: graph.id,
4556                project_id: graph.project_id,
4557                name: name.map(String::from),
4558                task_ids,
4559            },
4560            CorrelationIds::for_graph_flow(graph.project_id, graph.id, flow_id),
4561        );
4562
4563        self.store.append(event).map_err(|e| {
4564            HivemindError::system("event_append_failed", e.to_string(), "registry:create_flow")
4565        })?;
4566
4567        self.get_flow(&flow_id.to_string())
4568    }
4569
4570    #[allow(clippy::too_many_lines)]
4571    pub fn start_flow(&self, flow_id: &str) -> Result<TaskFlow> {
4572        let flow = self
4573            .get_flow(flow_id)
4574            .inspect_err(|err| self.record_error_event(err, CorrelationIds::none()))?;
4575        match flow.state {
4576            FlowState::Created => {}
4577            FlowState::Paused => {
4578                let event = Event::new(
4579                    EventPayload::TaskFlowResumed { flow_id: flow.id },
4580                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4581                );
4582                self.store.append(event).map_err(|e| {
4583                    HivemindError::system(
4584                        "event_append_failed",
4585                        e.to_string(),
4586                        "registry:start_flow",
4587                    )
4588                })?;
4589                return self.get_flow(flow_id);
4590            }
4591            FlowState::Running => {
4592                let err = HivemindError::user(
4593                    "flow_already_running",
4594                    "Flow is already running",
4595                    "registry:start_flow",
4596                );
4597                self.record_error_event(
4598                    &err,
4599                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4600                );
4601                return Err(err);
4602            }
4603            FlowState::Completed => {
4604                let err = HivemindError::user(
4605                    "flow_completed",
4606                    "Flow has already completed",
4607                    "registry:start_flow",
4608                );
4609                self.record_error_event(
4610                    &err,
4611                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4612                );
4613                return Err(err);
4614            }
4615            FlowState::FrozenForMerge => {
4616                let err = HivemindError::user(
4617                    "flow_frozen",
4618                    "Flow is frozen for merge",
4619                    "registry:start_flow",
4620                );
4621                self.record_error_event(
4622                    &err,
4623                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4624                );
4625                return Err(err);
4626            }
4627            FlowState::Merged => {
4628                let err = HivemindError::user(
4629                    "flow_merged",
4630                    "Flow has already been merged",
4631                    "registry:start_flow",
4632                );
4633                self.record_error_event(
4634                    &err,
4635                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4636                );
4637                return Err(err);
4638            }
4639            FlowState::Aborted => {
4640                let err =
4641                    HivemindError::user("flow_aborted", "Flow was aborted", "registry:start_flow");
4642                self.record_error_event(
4643                    &err,
4644                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4645                );
4646                return Err(err);
4647            }
4648        }
4649
4650        let state = self.state()?;
4651
4652        let base_revision = state
4653            .projects
4654            .get(&flow.project_id)
4655            .and_then(|p| p.repositories.first())
4656            .and_then(|repo| {
4657                std::process::Command::new("git")
4658                    .current_dir(&repo.path)
4659                    .args(["rev-parse", "HEAD"])
4660                    .output()
4661                    .ok()
4662                    .filter(|o| o.status.success())
4663                    .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
4664                    .filter(|s| !s.is_empty())
4665            });
4666
4667        if let Some(project) = state.projects.get(&flow.project_id) {
4668            for repo in &project.repositories {
4669                let repo_head = std::process::Command::new("git")
4670                    .current_dir(&repo.path)
4671                    .args(["rev-parse", "HEAD"])
4672                    .output()
4673                    .ok()
4674                    .filter(|o| o.status.success())
4675                    .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
4676                    .filter(|s| !s.is_empty());
4677                if let Some(head) = repo_head {
4678                    let _ = std::process::Command::new("git")
4679                        .current_dir(&repo.path)
4680                        .args(["branch", "-f", &format!("flow/{}", flow.id), &head])
4681                        .output();
4682                }
4683            }
4684        }
4685
4686        let event = Event::new(
4687            EventPayload::TaskFlowStarted {
4688                flow_id: flow.id,
4689                base_revision,
4690            },
4691            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4692        );
4693        self.store.append(event).map_err(|e| {
4694            HivemindError::system("event_append_failed", e.to_string(), "registry:start_flow")
4695        })?;
4696
4697        if let Some(graph) = state.graphs.get(&flow.graph_id) {
4698            let ready = graph.root_tasks();
4699            for task_id in ready {
4700                let event = Event::new(
4701                    EventPayload::TaskReady {
4702                        flow_id: flow.id,
4703                        task_id,
4704                    },
4705                    CorrelationIds::for_graph_flow_task(
4706                        flow.project_id,
4707                        flow.graph_id,
4708                        flow.id,
4709                        task_id,
4710                    ),
4711                );
4712                self.store.append(event).map_err(|e| {
4713                    HivemindError::system(
4714                        "event_append_failed",
4715                        e.to_string(),
4716                        "registry:start_flow",
4717                    )
4718                })?;
4719            }
4720        }
4721
4722        self.get_flow(flow_id)
4723    }
4724
4725    pub fn worktree_list(&self, flow_id: &str) -> Result<Vec<WorktreeStatus>> {
4726        let flow = self.get_flow(flow_id)?;
4727        let state = self.state()?;
4728        let managers = Self::worktree_managers_for_flow(&flow, &state, "registry:worktree_list")?;
4729
4730        let mut statuses = Vec::new();
4731        for (_repo_name, manager) in managers {
4732            for task_id in flow.task_executions.keys() {
4733                let status = manager
4734                    .inspect(flow.id, *task_id)
4735                    .map_err(|e| Self::worktree_error_to_hivemind(e, "registry:worktree_list"))?;
4736                statuses.push(status);
4737            }
4738        }
4739        Ok(statuses)
4740    }
4741
4742    pub fn worktree_inspect(&self, task_id: &str) -> Result<WorktreeStatus> {
4743        let tid = Uuid::parse_str(task_id).map_err(|_| {
4744            HivemindError::user(
4745                "invalid_task_id",
4746                format!("'{task_id}' is not a valid task ID"),
4747                "registry:worktree_inspect",
4748            )
4749        })?;
4750
4751        let state = self.state()?;
4752        let mut candidates: Vec<&TaskFlow> = state
4753            .flows
4754            .values()
4755            .filter(|f| f.task_executions.contains_key(&tid))
4756            .collect();
4757
4758        if candidates.is_empty() {
4759            return Err(HivemindError::user(
4760                "task_not_in_flow",
4761                "Task is not part of any flow",
4762                "registry:worktree_inspect",
4763            ));
4764        }
4765
4766        candidates.sort_by_key(|f| std::cmp::Reverse(f.updated_at));
4767        let flow = candidates[0].clone();
4768
4769        let manager = Self::worktree_manager_for_flow(&flow, &state)?;
4770        manager
4771            .inspect(flow.id, tid)
4772            .map_err(|e| Self::worktree_error_to_hivemind(e, "registry:worktree_inspect"))
4773    }
4774
4775    pub fn worktree_cleanup(&self, flow_id: &str) -> Result<()> {
4776        let flow = self.get_flow(flow_id)?;
4777        let state = self.state()?;
4778        let managers =
4779            Self::worktree_managers_for_flow(&flow, &state, "registry:worktree_cleanup")?;
4780        for (_repo_name, manager) in managers {
4781            manager
4782                .cleanup_flow(flow.id)
4783                .map_err(|e| Self::worktree_error_to_hivemind(e, "registry:worktree_cleanup"))?;
4784        }
4785        Ok(())
4786    }
4787
4788    pub fn pause_flow(&self, flow_id: &str) -> Result<TaskFlow> {
4789        let flow = self.get_flow(flow_id)?;
4790
4791        match flow.state {
4792            FlowState::Paused => return Ok(flow),
4793            FlowState::Running => {}
4794            _ => {
4795                return Err(HivemindError::user(
4796                    "flow_not_running",
4797                    "Flow is not in running state",
4798                    "registry:pause_flow",
4799                ));
4800            }
4801        }
4802
4803        let running_tasks: Vec<Uuid> = flow.tasks_in_state(TaskExecState::Running);
4804        let event = Event::new(
4805            EventPayload::TaskFlowPaused {
4806                flow_id: flow.id,
4807                running_tasks,
4808            },
4809            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4810        );
4811
4812        self.store.append(event).map_err(|e| {
4813            HivemindError::system("event_append_failed", e.to_string(), "registry:pause_flow")
4814        })?;
4815
4816        self.get_flow(flow_id)
4817    }
4818
4819    pub fn resume_flow(&self, flow_id: &str) -> Result<TaskFlow> {
4820        let flow = self.get_flow(flow_id)?;
4821        if flow.state != FlowState::Paused {
4822            return Err(HivemindError::user(
4823                "flow_not_paused",
4824                "Flow is not paused",
4825                "registry:resume_flow",
4826            ));
4827        }
4828
4829        let event = Event::new(
4830            EventPayload::TaskFlowResumed { flow_id: flow.id },
4831            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4832        );
4833        self.store.append(event).map_err(|e| {
4834            HivemindError::system("event_append_failed", e.to_string(), "registry:resume_flow")
4835        })?;
4836
4837        self.get_flow(flow_id)
4838    }
4839
4840    pub fn abort_flow(
4841        &self,
4842        flow_id: &str,
4843        reason: Option<&str>,
4844        forced: bool,
4845    ) -> Result<TaskFlow> {
4846        let flow = self.get_flow(flow_id)?;
4847        if flow.state == FlowState::Aborted {
4848            return Ok(flow);
4849        }
4850        if flow.state == FlowState::Completed {
4851            return Err(HivemindError::user(
4852                "flow_already_terminal",
4853                "Flow is completed",
4854                "registry:abort_flow",
4855            ));
4856        }
4857
4858        let event = Event::new(
4859            EventPayload::TaskFlowAborted {
4860                flow_id: flow.id,
4861                reason: reason.map(String::from),
4862                forced,
4863            },
4864            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
4865        );
4866
4867        self.store.append(event).map_err(|e| {
4868            HivemindError::system("event_append_failed", e.to_string(), "registry:abort_flow")
4869        })?;
4870
4871        self.get_flow(flow_id)
4872    }
4873
4874    pub fn retry_task(
4875        &self,
4876        task_id: &str,
4877        reset_count: bool,
4878        retry_mode: RetryMode,
4879    ) -> Result<TaskFlow> {
4880        let id = Uuid::parse_str(task_id).map_err(|_| {
4881            HivemindError::user(
4882                "invalid_task_id",
4883                format!("'{task_id}' is not a valid task ID"),
4884                "registry:retry_task",
4885            )
4886        })?;
4887
4888        let state = self.state()?;
4889        let mut candidates: Vec<TaskFlow> = state
4890            .flows
4891            .values()
4892            .filter(|f| f.task_executions.contains_key(&id))
4893            .cloned()
4894            .collect();
4895
4896        if candidates.is_empty() {
4897            return Err(HivemindError::user(
4898                "task_not_in_flow",
4899                "Task is not part of any flow",
4900                "registry:retry_task",
4901            ));
4902        }
4903
4904        candidates.sort_by_key(|f| std::cmp::Reverse(f.updated_at));
4905        let flow = candidates[0].clone();
4906
4907        let exec = flow.task_executions.get(&id).ok_or_else(|| {
4908            HivemindError::system(
4909                "task_exec_not_found",
4910                "Task execution not found",
4911                "registry:retry_task",
4912            )
4913        })?;
4914
4915        let max_retries = state
4916            .graphs
4917            .get(&flow.graph_id)
4918            .and_then(|g| g.tasks.get(&id))
4919            .map_or(3, |t| t.retry_policy.max_retries);
4920        let max_attempts = max_retries.saturating_add(1);
4921        if !reset_count && exec.attempt_count >= max_attempts {
4922            return Err(HivemindError::user(
4923                "retry_limit_exceeded",
4924                "Retry limit exceeded",
4925                "registry:retry_task",
4926            ));
4927        }
4928
4929        if exec.state != TaskExecState::Failed && exec.state != TaskExecState::Retry {
4930            return Err(HivemindError::user(
4931                "task_not_retriable",
4932                "Task is not in a retriable state",
4933                "registry:retry_task",
4934            ));
4935        }
4936
4937        if matches!(retry_mode, RetryMode::Clean) {
4938            if let Ok(managers) =
4939                Self::worktree_managers_for_flow(&flow, &state, "registry:retry_task")
4940            {
4941                for (idx, (_repo_name, manager)) in managers.iter().enumerate() {
4942                    let base = Self::default_base_ref_for_repo(&flow, manager, idx == 0);
4943                    let mut status = manager.inspect(flow.id, id).ok();
4944                    if status.as_ref().is_none_or(|s| !s.is_worktree) {
4945                        let _ = manager.create(flow.id, id, Some(&base));
4946                        status = manager.inspect(flow.id, id).ok();
4947                    }
4948
4949                    if let Some(status) = status.filter(|s| s.is_worktree) {
4950                        let branch = format!("exec/{}/{id}", flow.id);
4951                        Self::checkout_and_clean_worktree(
4952                            &status.path,
4953                            &branch,
4954                            &base,
4955                            "registry:retry_task",
4956                        )?;
4957                    }
4958                }
4959            }
4960        }
4961
4962        let event = Event::new(
4963            EventPayload::TaskRetryRequested {
4964                task_id: id,
4965                reset_count,
4966                retry_mode,
4967            },
4968            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, id),
4969        );
4970
4971        self.store.append(event).map_err(|e| {
4972            HivemindError::system("event_append_failed", e.to_string(), "registry:retry_task")
4973        })?;
4974
4975        self.get_flow(&flow.id.to_string())
4976    }
4977
4978    #[allow(clippy::too_many_lines)]
4979    pub fn start_task_execution(&self, task_id: &str) -> Result<Uuid> {
4980        let origin = "registry:start_task_execution";
4981        let id = Uuid::parse_str(task_id).map_err(|_| {
4982            HivemindError::user(
4983                "invalid_task_id",
4984                format!("'{task_id}' is not a valid task ID"),
4985                origin,
4986            )
4987        })?;
4988
4989        let state = self.state()?;
4990        let flow = Self::flow_for_task(&state, id, origin)?;
4991        if flow.state != FlowState::Running {
4992            return Err(HivemindError::user(
4993                "flow_not_running",
4994                "Flow is not in running state",
4995                origin,
4996            ));
4997        }
4998
4999        let exec = flow.task_executions.get(&id).ok_or_else(|| {
5000            HivemindError::system("task_exec_not_found", "Task execution not found", origin)
5001        })?;
5002        if exec.state != TaskExecState::Ready && exec.state != TaskExecState::Retry {
5003            return Err(HivemindError::user(
5004                "task_not_ready",
5005                "Task is not ready to start",
5006                origin,
5007            ));
5008        }
5009
5010        let status = Self::ensure_task_worktree(&flow, &state, id, origin)?;
5011        let attempt_id = Uuid::new_v4();
5012        let attempt_number = exec.attempt_count.saturating_add(1);
5013        let baseline = self.capture_and_store_baseline(&status.path, origin)?;
5014
5015        let graph = state
5016            .graphs
5017            .get(&flow.graph_id)
5018            .ok_or_else(|| HivemindError::system("graph_not_found", "Graph not found", origin))?;
5019        let graph_task = graph.tasks.get(&id).ok_or_else(|| {
5020            HivemindError::system("task_not_found", "Task not found in graph", origin)
5021        })?;
5022        let checkpoint_ids = Self::normalized_checkpoint_ids(&graph_task.checkpoints);
5023
5024        let corr_task =
5025            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, id);
5026        let corr_attempt = CorrelationIds::for_graph_flow_task_attempt(
5027            flow.project_id,
5028            flow.graph_id,
5029            flow.id,
5030            id,
5031            attempt_id,
5032        );
5033
5034        self.append_event(
5035            Event::new(
5036                EventPayload::TaskExecutionStateChanged {
5037                    flow_id: flow.id,
5038                    task_id: id,
5039                    from: exec.state,
5040                    to: TaskExecState::Running,
5041                },
5042                corr_task,
5043            ),
5044            origin,
5045        )?;
5046
5047        self.append_event(
5048            Event::new(
5049                EventPayload::AttemptStarted {
5050                    flow_id: flow.id,
5051                    task_id: id,
5052                    attempt_id,
5053                    attempt_number,
5054                },
5055                corr_attempt.clone(),
5056            ),
5057            origin,
5058        )?;
5059
5060        let total = u32::try_from(checkpoint_ids.len()).map_err(|_| {
5061            HivemindError::system(
5062                "checkpoint_count_overflow",
5063                "Checkpoint count exceeds supported range",
5064                origin,
5065            )
5066        })?;
5067
5068        for (idx, checkpoint_id) in checkpoint_ids.iter().enumerate() {
5069            let order = u32::try_from(idx.saturating_add(1)).map_err(|_| {
5070                HivemindError::system(
5071                    "checkpoint_order_overflow",
5072                    "Checkpoint order exceeds supported range",
5073                    origin,
5074                )
5075            })?;
5076
5077            self.append_event(
5078                Event::new(
5079                    EventPayload::CheckpointDeclared {
5080                        flow_id: flow.id,
5081                        task_id: id,
5082                        attempt_id,
5083                        checkpoint_id: checkpoint_id.clone(),
5084                        order,
5085                        total,
5086                    },
5087                    corr_attempt.clone(),
5088                ),
5089                origin,
5090            )?;
5091        }
5092
5093        if let Some(first_checkpoint_id) = checkpoint_ids.first() {
5094            self.append_event(
5095                Event::new(
5096                    EventPayload::CheckpointActivated {
5097                        flow_id: flow.id,
5098                        task_id: id,
5099                        attempt_id,
5100                        checkpoint_id: first_checkpoint_id.clone(),
5101                        order: 1,
5102                    },
5103                    corr_attempt.clone(),
5104                ),
5105                origin,
5106            )?;
5107        }
5108
5109        self.append_event(
5110            Event::new(
5111                EventPayload::TaskExecutionStarted {
5112                    flow_id: flow.id,
5113                    task_id: id,
5114                    attempt_id,
5115                    attempt_number,
5116                },
5117                corr_attempt.clone(),
5118            ),
5119            origin,
5120        )?;
5121
5122        self.append_event(
5123            Event::new(
5124                EventPayload::BaselineCaptured {
5125                    flow_id: flow.id,
5126                    task_id: id,
5127                    attempt_id,
5128                    baseline_id: baseline.id,
5129                    git_head: baseline.git_head.clone(),
5130                    file_count: baseline.file_count(),
5131                },
5132                corr_attempt,
5133            ),
5134            origin,
5135        )?;
5136
5137        Ok(attempt_id)
5138    }
5139
5140    #[allow(clippy::too_many_lines)]
5141    pub fn checkpoint_complete(
5142        &self,
5143        attempt_id: &str,
5144        checkpoint_id: &str,
5145        summary: Option<&str>,
5146    ) -> Result<CheckpointCompletionResult> {
5147        let origin = "registry:checkpoint_complete";
5148        let attempt_uuid = Uuid::parse_str(attempt_id).map_err(|_| {
5149            HivemindError::user(
5150                "invalid_attempt_id",
5151                format!("'{attempt_id}' is not a valid attempt ID"),
5152                origin,
5153            )
5154        })?;
5155
5156        let checkpoint_id = checkpoint_id.trim();
5157        if checkpoint_id.is_empty() {
5158            return Err(HivemindError::user(
5159                "invalid_checkpoint_id",
5160                "Checkpoint ID cannot be empty",
5161                origin,
5162            ));
5163        }
5164
5165        let state = self.state()?;
5166        let attempt = state
5167            .attempts
5168            .get(&attempt_uuid)
5169            .ok_or_else(|| HivemindError::user("attempt_not_found", "Attempt not found", origin))?;
5170
5171        let flow = state.flows.get(&attempt.flow_id).ok_or_else(|| {
5172            HivemindError::system("flow_not_found", "Flow not found for attempt", origin)
5173        })?;
5174
5175        let corr_attempt = CorrelationIds::for_graph_flow_task_attempt(
5176            flow.project_id,
5177            flow.graph_id,
5178            flow.id,
5179            attempt.task_id,
5180            attempt.id,
5181        );
5182
5183        if flow.state != FlowState::Running {
5184            let err = HivemindError::user(
5185                "flow_not_running",
5186                "Flow is not running; checkpoint completion rejected",
5187                origin,
5188            );
5189            self.record_error_event(&err, corr_attempt);
5190            return Err(err);
5191        }
5192
5193        let exec = flow.task_executions.get(&attempt.task_id).ok_or_else(|| {
5194            HivemindError::system("task_exec_not_found", "Task execution not found", origin)
5195        })?;
5196        if exec.state != TaskExecState::Running {
5197            let err = HivemindError::user(
5198                "attempt_not_running",
5199                "Attempt is not in RUNNING state",
5200                origin,
5201            );
5202            self.record_error_event(&err, corr_attempt);
5203            return Err(err);
5204        }
5205
5206        let graph = state
5207            .graphs
5208            .get(&flow.graph_id)
5209            .ok_or_else(|| HivemindError::system("graph_not_found", "Graph not found", origin))?;
5210        let graph_task = graph.tasks.get(&attempt.task_id).ok_or_else(|| {
5211            HivemindError::system("task_not_found", "Task not found in graph", origin)
5212        })?;
5213
5214        let checkpoint_ids = Self::normalized_checkpoint_ids(&graph_task.checkpoints);
5215        let Some((order, total)) = Self::checkpoint_order(&checkpoint_ids, checkpoint_id) else {
5216            let err = HivemindError::user(
5217                "checkpoint_not_found",
5218                format!("Checkpoint '{checkpoint_id}' is not declared for this task"),
5219                origin,
5220            );
5221            self.record_error_event(&err, corr_attempt);
5222            return Err(err);
5223        };
5224
5225        let Some(current) = attempt
5226            .checkpoints
5227            .iter()
5228            .find(|cp| cp.checkpoint_id == checkpoint_id)
5229        else {
5230            let err = HivemindError::user(
5231                "checkpoint_not_declared",
5232                format!("Checkpoint '{checkpoint_id}' has not been declared for this attempt"),
5233                origin,
5234            );
5235            self.record_error_event(&err, corr_attempt);
5236            return Err(err);
5237        };
5238
5239        if current.state == AttemptCheckpointState::Completed {
5240            let err = HivemindError::user(
5241                "checkpoint_already_completed",
5242                format!("Checkpoint '{checkpoint_id}' is already completed"),
5243                origin,
5244            );
5245            self.record_error_event(&err, corr_attempt);
5246            return Err(err);
5247        }
5248
5249        if current.state != AttemptCheckpointState::Active {
5250            let err = HivemindError::user(
5251                "checkpoint_not_active",
5252                format!("Checkpoint '{checkpoint_id}' is not ACTIVE"),
5253                origin,
5254            );
5255            self.record_error_event(&err, corr_attempt);
5256            return Err(err);
5257        }
5258
5259        let idx = usize::try_from(order.saturating_sub(1)).map_err(|_| {
5260            HivemindError::system(
5261                "checkpoint_order_invalid",
5262                "Checkpoint order conversion failed",
5263                origin,
5264            )
5265        })?;
5266
5267        let completed_ids: HashSet<&str> = attempt
5268            .checkpoints
5269            .iter()
5270            .filter(|cp| cp.state == AttemptCheckpointState::Completed)
5271            .map(|cp| cp.checkpoint_id.as_str())
5272            .collect();
5273        for prev in checkpoint_ids.iter().take(idx) {
5274            if !completed_ids.contains(prev.as_str()) {
5275                let err = HivemindError::user(
5276                    "checkpoint_order_violation",
5277                    format!("Cannot complete '{checkpoint_id}' before '{prev}'"),
5278                    origin,
5279                );
5280                self.record_error_event(&err, corr_attempt);
5281                return Err(err);
5282            }
5283        }
5284
5285        let worktree = Self::inspect_task_worktree(flow, &state, attempt.task_id, origin)?;
5286        let commit_hash = match Self::create_checkpoint_commit(
5287            &worktree.path,
5288            &CheckpointCommitSpec {
5289                flow_id: flow.id,
5290                task_id: attempt.task_id,
5291                attempt_id: attempt.id,
5292                checkpoint_id,
5293                order,
5294                total,
5295                summary,
5296            },
5297            origin,
5298        ) {
5299            Ok(hash) => hash,
5300            Err(err) => {
5301                self.record_error_event(&err, corr_attempt);
5302                return Err(err);
5303            }
5304        };
5305
5306        let completed_at = Utc::now();
5307        let summary_owned = summary.map(str::to_string);
5308        self.append_event(
5309            Event::new(
5310                EventPayload::CheckpointCompleted {
5311                    flow_id: flow.id,
5312                    task_id: attempt.task_id,
5313                    attempt_id: attempt.id,
5314                    checkpoint_id: checkpoint_id.to_string(),
5315                    order,
5316                    commit_hash: commit_hash.clone(),
5317                    timestamp: completed_at,
5318                    summary: summary_owned,
5319                },
5320                corr_attempt.clone(),
5321            ),
5322            origin,
5323        )?;
5324
5325        self.append_event(
5326            Event::new(
5327                EventPayload::CheckpointCommitCreated {
5328                    flow_id: flow.id,
5329                    task_id: attempt.task_id,
5330                    attempt_id: attempt.id,
5331                    commit_sha: commit_hash.clone(),
5332                },
5333                corr_attempt.clone(),
5334            ),
5335            origin,
5336        )?;
5337
5338        let next_checkpoint_id = checkpoint_ids.get(idx.saturating_add(1)).cloned();
5339        if let Some(next_id) = next_checkpoint_id.as_ref() {
5340            let next_order = order.saturating_add(1);
5341            self.append_event(
5342                Event::new(
5343                    EventPayload::CheckpointActivated {
5344                        flow_id: flow.id,
5345                        task_id: attempt.task_id,
5346                        attempt_id: attempt.id,
5347                        checkpoint_id: next_id.clone(),
5348                        order: next_order,
5349                    },
5350                    corr_attempt,
5351                ),
5352                origin,
5353            )?;
5354        } else {
5355            self.append_event(
5356                Event::new(
5357                    EventPayload::AllCheckpointsCompleted {
5358                        flow_id: flow.id,
5359                        task_id: attempt.task_id,
5360                        attempt_id: attempt.id,
5361                    },
5362                    corr_attempt,
5363                ),
5364                origin,
5365            )?;
5366        }
5367
5368        Ok(CheckpointCompletionResult {
5369            flow_id: flow.id,
5370            task_id: attempt.task_id,
5371            attempt_id: attempt.id,
5372            checkpoint_id: checkpoint_id.to_string(),
5373            order,
5374            total,
5375            next_checkpoint_id,
5376            all_completed: order == total,
5377            commit_hash,
5378        })
5379    }
5380
5381    pub fn complete_task_execution(&self, task_id: &str) -> Result<TaskFlow> {
5382        let origin = "registry:complete_task_execution";
5383        let id = Uuid::parse_str(task_id).map_err(|_| {
5384            HivemindError::user(
5385                "invalid_task_id",
5386                format!("'{task_id}' is not a valid task ID"),
5387                origin,
5388            )
5389        })?;
5390
5391        let state = self.state()?;
5392        let flow = Self::flow_for_task(&state, id, origin)?;
5393        let exec = flow.task_executions.get(&id).ok_or_else(|| {
5394            HivemindError::system("task_exec_not_found", "Task execution not found", origin)
5395        })?;
5396        if exec.state != TaskExecState::Running {
5397            return Err(HivemindError::user(
5398                "task_not_running",
5399                "Task is not in running state",
5400                origin,
5401            ));
5402        }
5403
5404        let attempt = Self::resolve_latest_attempt_without_diff(&state, flow.id, id, origin)?;
5405
5406        if !attempt.all_checkpoints_completed {
5407            let err = HivemindError::user(
5408                "checkpoints_incomplete",
5409                "All checkpoints must be completed before task completion",
5410                origin,
5411            )
5412            .with_hint(format!(
5413                "Complete the active checkpoint via `hivemind checkpoint complete --attempt-id {} --id <checkpoint-id>` before finishing the task attempt",
5414                attempt.id
5415            ));
5416            self.record_error_event(
5417                &err,
5418                CorrelationIds::for_graph_flow_task_attempt(
5419                    flow.project_id,
5420                    flow.graph_id,
5421                    flow.id,
5422                    id,
5423                    attempt.id,
5424                ),
5425            );
5426            return Err(err);
5427        }
5428
5429        let baseline_id = attempt.baseline_id.ok_or_else(|| {
5430            HivemindError::system(
5431                "baseline_not_found",
5432                "Baseline not found for attempt",
5433                origin,
5434            )
5435        })?;
5436
5437        let status = Self::inspect_task_worktree(&flow, &state, id, origin)?;
5438        let artifact =
5439            self.compute_and_store_diff(baseline_id, &status.path, id, attempt.id, origin)?;
5440
5441        self.emit_task_execution_completion_events(
5442            &flow,
5443            id,
5444            &attempt,
5445            CompletionArtifacts {
5446                baseline_id,
5447                artifact: &artifact,
5448                checkpoint_commit_sha: None,
5449            },
5450            origin,
5451        )?;
5452
5453        self.get_flow(&flow.id.to_string())
5454    }
5455
5456    pub fn get_attempt(&self, attempt_id: &str) -> Result<AttemptState> {
5457        let id = Uuid::parse_str(attempt_id).map_err(|_| {
5458            HivemindError::user(
5459                "invalid_attempt_id",
5460                format!("'{attempt_id}' is not a valid attempt ID"),
5461                "registry:get_attempt",
5462            )
5463        })?;
5464
5465        let state = self.state()?;
5466        state.attempts.get(&id).cloned().ok_or_else(|| {
5467            HivemindError::user(
5468                "attempt_not_found",
5469                format!("Attempt '{attempt_id}' not found"),
5470                "registry:get_attempt",
5471            )
5472        })
5473    }
5474
5475    pub fn get_attempt_diff(&self, attempt_id: &str) -> Result<Option<String>> {
5476        let attempt = self.get_attempt(attempt_id)?;
5477        let Some(diff_id) = attempt.diff_id else {
5478            return Ok(None);
5479        };
5480        let artifact = self.read_diff_artifact(diff_id)?;
5481        Ok(Some(artifact.unified))
5482    }
5483
5484    pub fn abort_task(&self, task_id: &str, reason: Option<&str>) -> Result<TaskFlow> {
5485        let id = Uuid::parse_str(task_id).map_err(|_| {
5486            HivemindError::user(
5487                "invalid_task_id",
5488                format!("'{task_id}' is not a valid task ID"),
5489                "registry:abort_task",
5490            )
5491        })?;
5492
5493        let state = self.state()?;
5494        let mut candidates: Vec<TaskFlow> = state
5495            .flows
5496            .values()
5497            .filter(|f| f.task_executions.contains_key(&id))
5498            .cloned()
5499            .collect();
5500
5501        if candidates.is_empty() {
5502            return Err(HivemindError::user(
5503                "task_not_in_flow",
5504                "Task is not part of any flow",
5505                "registry:abort_task",
5506            ));
5507        }
5508
5509        candidates.sort_by_key(|f| std::cmp::Reverse(f.updated_at));
5510        let flow = candidates[0].clone();
5511
5512        let exec = flow.task_executions.get(&id).ok_or_else(|| {
5513            HivemindError::system(
5514                "task_exec_not_found",
5515                "Task execution not found",
5516                "registry:abort_task",
5517            )
5518        })?;
5519
5520        if exec.state == TaskExecState::Success {
5521            return Err(HivemindError::user(
5522                "task_already_terminal",
5523                "Task is already successful",
5524                "registry:abort_task",
5525            ));
5526        }
5527
5528        if exec.state == TaskExecState::Failed {
5529            return Ok(flow);
5530        }
5531
5532        let event = Event::new(
5533            EventPayload::TaskAborted {
5534                task_id: id,
5535                reason: reason.map(String::from),
5536            },
5537            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, id),
5538        );
5539
5540        self.store.append(event).map_err(|e| {
5541            HivemindError::system("event_append_failed", e.to_string(), "registry:abort_task")
5542        })?;
5543
5544        let event = Event::new(
5545            EventPayload::TaskExecutionFailed {
5546                flow_id: flow.id,
5547                task_id: id,
5548                attempt_id: None,
5549                reason: Some("aborted".to_string()),
5550            },
5551            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, id),
5552        );
5553        self.store.append(event).map_err(|e| {
5554            HivemindError::system("event_append_failed", e.to_string(), "registry:abort_task")
5555        })?;
5556
5557        self.get_flow(&flow.id.to_string())
5558    }
5559
5560    #[allow(clippy::too_many_lines)]
5561    pub fn verify_override(&self, task_id: &str, decision: &str, reason: &str) -> Result<TaskFlow> {
5562        let origin = "registry:verify_override";
5563
5564        let id = Uuid::parse_str(task_id).map_err(|_| {
5565            HivemindError::user(
5566                "invalid_task_id",
5567                format!("'{task_id}' is not a valid task ID"),
5568                origin,
5569            )
5570        })?;
5571
5572        if decision != "pass" && decision != "fail" {
5573            return Err(HivemindError::user(
5574                "invalid_decision",
5575                "Decision must be 'pass' or 'fail'",
5576                origin,
5577            ));
5578        }
5579
5580        if reason.trim().is_empty() {
5581            return Err(HivemindError::user(
5582                "invalid_reason",
5583                "Reason must be non-empty",
5584                origin,
5585            ));
5586        }
5587
5588        let user = env::var("HIVEMIND_USER")
5589            .or_else(|_| env::var("USER"))
5590            .ok()
5591            .filter(|u| !u.trim().is_empty());
5592
5593        let state = self.state()?;
5594        let flow = Self::flow_for_task(&state, id, origin)?;
5595
5596        if matches!(
5597            flow.state,
5598            FlowState::Completed
5599                | FlowState::FrozenForMerge
5600                | FlowState::Merged
5601                | FlowState::Aborted
5602        ) {
5603            return Err(HivemindError::user(
5604                "flow_not_active",
5605                "Cannot override verification for a completed or aborted flow",
5606                origin,
5607            ));
5608        }
5609
5610        let exec = flow.task_executions.get(&id).ok_or_else(|| {
5611            HivemindError::system("task_exec_not_found", "Task execution not found", origin)
5612        })?;
5613
5614        // Allow overrides both during verification and after automated decisions.
5615        // This supports overriding failed checks/verifier outcomes (Retry/Failed/Escalated).
5616        if !matches!(
5617            exec.state,
5618            TaskExecState::Verifying
5619                | TaskExecState::Retry
5620                | TaskExecState::Failed
5621                | TaskExecState::Escalated
5622        ) {
5623            return Err(HivemindError::user(
5624                "task_not_overridable",
5625                "Task is not in an overridable state",
5626                origin,
5627            ));
5628        }
5629
5630        let event = Event::new(
5631            EventPayload::HumanOverride {
5632                task_id: id,
5633                override_type: "VERIFICATION_OVERRIDE".to_string(),
5634                decision: decision.to_string(),
5635                reason: reason.to_string(),
5636                user,
5637            },
5638            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, id),
5639        );
5640
5641        self.store
5642            .append(event)
5643            .map_err(|e| HivemindError::system("event_append_failed", e.to_string(), origin))?;
5644
5645        let updated = self.get_flow(&flow.id.to_string())?;
5646
5647        if decision == "pass" {
5648            let frozen_commit_sha = Self::resolve_task_frozen_commit_sha(&updated, &state, id);
5649            self.emit_task_execution_frozen(&updated, id, frozen_commit_sha, origin)?;
5650
5651            if let Ok(managers) =
5652                Self::worktree_managers_for_flow(&updated, &state, "registry:verify_override")
5653            {
5654                for (_repo_name, manager) in managers {
5655                    if manager.config().cleanup_on_success {
5656                        if let Ok(status) = manager.inspect(updated.id, id) {
5657                            if status.is_worktree {
5658                                let _ = manager.remove(&status.path);
5659                            }
5660                        }
5661                    }
5662                }
5663            }
5664
5665            let all_success = updated
5666                .task_executions
5667                .values()
5668                .all(|e| e.state == TaskExecState::Success);
5669            if all_success {
5670                let event = Event::new(
5671                    EventPayload::TaskFlowCompleted {
5672                        flow_id: updated.id,
5673                    },
5674                    CorrelationIds::for_graph_flow(
5675                        updated.project_id,
5676                        updated.graph_id,
5677                        updated.id,
5678                    ),
5679                );
5680                let _ = self.store.append(event);
5681            }
5682        }
5683
5684        self.get_flow(&flow.id.to_string())
5685    }
5686
5687    #[allow(clippy::too_many_lines)]
5688    pub fn merge_prepare(
5689        &self,
5690        flow_id: &str,
5691        target_branch: Option<&str>,
5692    ) -> Result<crate::core::state::MergeState> {
5693        let origin = "registry:merge_prepare";
5694        let mut flow = self.get_flow(flow_id)?;
5695
5696        if !matches!(flow.state, FlowState::Completed | FlowState::FrozenForMerge) {
5697            return Err(HivemindError::user(
5698                "flow_not_completed",
5699                "Flow has not completed successfully",
5700                origin,
5701            ));
5702        }
5703
5704        let mut state = self.state()?;
5705        if let Some(ms) = state.merge_states.get(&flow.id) {
5706            if ms.status == crate::core::state::MergeStatus::Prepared && ms.conflicts.is_empty() {
5707                return Ok(ms.clone());
5708            }
5709        }
5710
5711        let _integration_lock = self.acquire_flow_integration_lock(flow.id, origin)?;
5712        self.emit_integration_lock_acquired(&flow, "merge_prepare", origin)?;
5713
5714        if flow.state == FlowState::Completed {
5715            self.append_event(
5716                Event::new(
5717                    EventPayload::FlowFrozenForMerge { flow_id: flow.id },
5718                    CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
5719                ),
5720                origin,
5721            )?;
5722            flow = self.get_flow(flow_id)?;
5723            state = self.state()?;
5724        }
5725
5726        let graph = state.graphs.get(&flow.graph_id).ok_or_else(|| {
5727            HivemindError::system(
5728                "graph_not_found",
5729                "Graph not found",
5730                "registry:merge_prepare",
5731            )
5732        })?;
5733
5734        let mut conflicts = Vec::new();
5735        let mut integrated_tasks: Vec<(Uuid, Option<String>)> = Vec::new();
5736        let mut managers =
5737            Self::worktree_managers_for_flow(&flow, &state, "registry:merge_prepare")?;
5738
5739        let project = state.projects.get(&flow.project_id).ok_or_else(|| {
5740            HivemindError::system(
5741                "project_not_found",
5742                "Project not found",
5743                "registry:merge_prepare",
5744            )
5745        })?;
5746        if project.repositories.is_empty() {
5747            return Err(HivemindError::user(
5748                "project_has_no_repo",
5749                "Project has no attached repository",
5750                "registry:merge_prepare",
5751            ));
5752        }
5753        let (_primary_repo_name, manager) = managers.drain(..1).next().ok_or_else(|| {
5754            HivemindError::user(
5755                "project_has_no_repo",
5756                "Project has no attached repository",
5757                "registry:merge_prepare",
5758            )
5759        })?;
5760
5761        let prepared_target_branch = {
5762            let repo_path = manager.repo_path();
5763            let current_branch = std::process::Command::new("git")
5764                .current_dir(repo_path)
5765                .args(["rev-parse", "--abbrev-ref", "HEAD"])
5766                .output()
5767                .ok()
5768                .filter(|o| o.status.success())
5769                .map_or_else(
5770                    || "HEAD".to_string(),
5771                    |o| String::from_utf8_lossy(&o.stdout).trim().to_string(),
5772                );
5773
5774            let main_exists = std::process::Command::new("git")
5775                .current_dir(repo_path)
5776                .args(["show-ref", "--verify", "--quiet", "refs/heads/main"])
5777                .status()
5778                .map(|s| s.success())
5779                .unwrap_or(false);
5780
5781            let target = target_branch.map_or_else(
5782                || {
5783                    if main_exists {
5784                        "main".to_string()
5785                    } else {
5786                        current_branch
5787                    }
5788                },
5789                ToString::to_string,
5790            );
5791            if target == "HEAD" {
5792                return Err(HivemindError::user(
5793                    "detached_head",
5794                    "Cannot prepare merge from detached HEAD",
5795                    "registry:merge_prepare",
5796                )
5797                .with_hint("Re-run with --target <branch> or checkout a branch"));
5798            }
5799            let base_ref = target.as_str();
5800
5801            let merge_branch = format!("integration/{}/prepare", flow.id);
5802            let merge_path = manager
5803                .config()
5804                .base_dir
5805                .join(flow.id.to_string())
5806                .join("_integration_prepare");
5807
5808            if merge_path.exists() {
5809                let _ = std::process::Command::new("git")
5810                    .current_dir(manager.repo_path())
5811                    .args([
5812                        "worktree",
5813                        "remove",
5814                        "--force",
5815                        merge_path.to_str().unwrap_or(""),
5816                    ])
5817                    .output();
5818                let _ = fs::remove_dir_all(&merge_path);
5819            }
5820
5821            if let Some(parent) = merge_path.parent() {
5822                fs::create_dir_all(parent).map_err(|e| {
5823                    HivemindError::system(
5824                        "create_dir_failed",
5825                        e.to_string(),
5826                        "registry:merge_prepare",
5827                    )
5828                })?;
5829            }
5830
5831            let add = std::process::Command::new("git")
5832                .current_dir(manager.repo_path())
5833                .args([
5834                    "worktree",
5835                    "add",
5836                    "-B",
5837                    &merge_branch,
5838                    merge_path.to_str().unwrap_or(""),
5839                    base_ref,
5840                ])
5841                .output()
5842                .map_err(|e| {
5843                    HivemindError::system(
5844                        "git_worktree_add_failed",
5845                        e.to_string(),
5846                        "registry:merge_prepare",
5847                    )
5848                })?;
5849            if !add.status.success() {
5850                return Err(HivemindError::git(
5851                    "git_worktree_add_failed",
5852                    String::from_utf8_lossy(&add.stderr).to_string(),
5853                    "registry:merge_prepare",
5854                ));
5855            }
5856
5857            for task_id in graph.topological_order() {
5858                if flow
5859                    .task_executions
5860                    .get(&task_id)
5861                    .is_none_or(|e| e.state != TaskExecState::Success)
5862                {
5863                    continue;
5864                }
5865
5866                let task_branch = format!("exec/{}/{task_id}", flow.id);
5867                let task_ref = format!("refs/heads/{task_branch}");
5868
5869                let ref_exists = std::process::Command::new("git")
5870                    .current_dir(&merge_path)
5871                    .args(["show-ref", "--verify", "--quiet", &task_ref])
5872                    .status()
5873                    .map(|s| s.success())
5874                    .unwrap_or(false);
5875
5876                if !ref_exists {
5877                    let details = format!("task {task_id}: missing branch '{task_branch}'");
5878                    conflicts.push(details.clone());
5879                    self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
5880                    break;
5881                }
5882
5883                let _ = std::process::Command::new("git")
5884                    .current_dir(&merge_path)
5885                    .args(["checkout", &merge_branch])
5886                    .output();
5887
5888                if let Some(deps) = graph.dependencies.get(&task_id) {
5889                    for dep in deps {
5890                        let dep_branch = format!("exec/{}/{dep}", flow.id);
5891                        let Some(dep_sha) = Self::resolve_git_ref(&merge_path, &dep_branch) else {
5892                            let details = format!(
5893                                "task {task_id}: dependency branch missing for {dep_branch}"
5894                            );
5895                            conflicts.push(details.clone());
5896                            self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
5897                            break;
5898                        };
5899
5900                        let contains_dependency = std::process::Command::new("git")
5901                            .current_dir(&merge_path)
5902                            .args(["merge-base", "--is-ancestor", &dep_sha, &task_branch])
5903                            .status()
5904                            .map(|s| s.success())
5905                            .unwrap_or(false);
5906                        if !contains_dependency {
5907                            let details = format!(
5908                                "task {task_id}: drift detected (missing prerequisite integrated changes)"
5909                            );
5910                            conflicts.push(details.clone());
5911                            self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
5912                            break;
5913                        }
5914                    }
5915
5916                    if !conflicts.is_empty() {
5917                        break;
5918                    }
5919                }
5920
5921                let sandbox_branch = format!("integration/{}/{task_id}", flow.id);
5922                let checkout = std::process::Command::new("git")
5923                    .current_dir(&merge_path)
5924                    .args(["checkout", "-B", &sandbox_branch, &merge_branch])
5925                    .output()
5926                    .map_err(|e| {
5927                        HivemindError::system(
5928                            "git_checkout_failed",
5929                            e.to_string(),
5930                            "registry:merge_prepare",
5931                        )
5932                    })?;
5933                if !checkout.status.success() {
5934                    return Err(HivemindError::git(
5935                        "git_checkout_failed",
5936                        String::from_utf8_lossy(&checkout.stderr).to_string(),
5937                        "registry:merge_prepare",
5938                    ));
5939                }
5940
5941                let merge = std::process::Command::new("git")
5942                    .current_dir(&merge_path)
5943                    .env("GIT_AUTHOR_NAME", "Hivemind")
5944                    .env("GIT_AUTHOR_EMAIL", "hivemind@example.com")
5945                    .env("GIT_COMMITTER_NAME", "Hivemind")
5946                    .env("GIT_COMMITTER_EMAIL", "hivemind@example.com")
5947                    .args([
5948                        "-c",
5949                        "user.name=Hivemind",
5950                        "-c",
5951                        "user.email=hivemind@example.com",
5952                        "-c",
5953                        "commit.gpgsign=false",
5954                        "merge",
5955                        "--no-commit",
5956                        "--no-ff",
5957                        &task_branch,
5958                    ])
5959                    .output()
5960                    .map_err(|e| {
5961                        HivemindError::system(
5962                            "git_merge_failed",
5963                            e.to_string(),
5964                            "registry:merge_prepare",
5965                        )
5966                    })?;
5967
5968                if !merge.status.success() {
5969                    let unmerged = std::process::Command::new("git")
5970                        .current_dir(&merge_path)
5971                        .args(["diff", "--name-only", "--diff-filter=U"])
5972                        .output()
5973                        .ok()
5974                        .filter(|o| o.status.success())
5975                        .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
5976                        .unwrap_or_default();
5977
5978                    let details = if unmerged.is_empty() {
5979                        String::from_utf8_lossy(&merge.stderr).to_string()
5980                    } else {
5981                        format!("conflicts in: {unmerged}")
5982                    };
5983                    conflicts.push(format!("task {task_id}: {details}"));
5984                    self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
5985
5986                    let _ = std::process::Command::new("git")
5987                        .current_dir(&merge_path)
5988                        .args(["merge", "--abort"])
5989                        .output();
5990                    let _ = std::process::Command::new("git")
5991                        .current_dir(&merge_path)
5992                        .args(["checkout", &merge_branch])
5993                        .output();
5994                    break;
5995                }
5996
5997                let merge_in_progress = std::process::Command::new("git")
5998                    .current_dir(&merge_path)
5999                    .args(["rev-parse", "-q", "--verify", "MERGE_HEAD"])
6000                    .status()
6001                    .map(|s| s.success())
6002                    .unwrap_or(false);
6003                if !merge_in_progress {
6004                    continue;
6005                }
6006
6007                let commit_msg = format!(
6008                        "Integrate task {task_id}\n\nFlow: {}\nTask: {task_id}\nTarget: {target}\nVerification-Summary: task_checks_passed\nTimestamp: {}\nHivemind-Version: {}",
6009                        flow.id,
6010                        Utc::now().to_rfc3339(),
6011                        env!("CARGO_PKG_VERSION")
6012                    );
6013                let commit = std::process::Command::new("git")
6014                    .current_dir(&merge_path)
6015                    .env("GIT_AUTHOR_NAME", "Hivemind")
6016                    .env("GIT_AUTHOR_EMAIL", "hivemind@example.com")
6017                    .env("GIT_COMMITTER_NAME", "Hivemind")
6018                    .env("GIT_COMMITTER_EMAIL", "hivemind@example.com")
6019                    .args([
6020                        "-c",
6021                        "user.name=Hivemind",
6022                        "-c",
6023                        "user.email=hivemind@example.com",
6024                        "-c",
6025                        "commit.gpgsign=false",
6026                        "commit",
6027                        "-m",
6028                        &commit_msg,
6029                    ])
6030                    .output()
6031                    .map_err(|e| {
6032                        HivemindError::system(
6033                            "git_commit_failed",
6034                            e.to_string(),
6035                            "registry:merge_prepare",
6036                        )
6037                    })?;
6038                if !commit.status.success() {
6039                    return Err(HivemindError::git(
6040                        "git_commit_failed",
6041                        String::from_utf8_lossy(&commit.stderr).to_string(),
6042                        "registry:merge_prepare",
6043                    ));
6044                }
6045
6046                let _ = std::process::Command::new("git")
6047                    .current_dir(&merge_path)
6048                    .args(["checkout", &merge_branch])
6049                    .output();
6050                let promote = std::process::Command::new("git")
6051                    .current_dir(&merge_path)
6052                    .args(["merge", "--ff-only", &sandbox_branch])
6053                    .output()
6054                    .map_err(|e| {
6055                        HivemindError::system(
6056                            "git_merge_failed",
6057                            e.to_string(),
6058                            "registry:merge_prepare",
6059                        )
6060                    })?;
6061                if !promote.status.success() {
6062                    let details = String::from_utf8_lossy(&promote.stderr).trim().to_string();
6063                    conflicts.push(format!("task {task_id}: {details}"));
6064                    self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
6065                    break;
6066                }
6067
6068                let integrated_sha = Self::resolve_git_ref(&merge_path, "HEAD");
6069                integrated_tasks.push((task_id, integrated_sha));
6070            }
6071
6072            if conflicts.is_empty() {
6073                let target_dir = self
6074                    .config
6075                    .data_dir
6076                    .join("cargo-target")
6077                    .join(flow.id.to_string())
6078                    .join("_integration_prepare")
6079                    .join("checks");
6080                let _ = fs::create_dir_all(&target_dir);
6081
6082                let mut unique_checks: Vec<crate::core::verification::CheckConfig> = Vec::new();
6083                for task_id in graph.topological_order() {
6084                    if flow
6085                        .task_executions
6086                        .get(&task_id)
6087                        .is_none_or(|e| e.state != TaskExecState::Success)
6088                    {
6089                        continue;
6090                    }
6091                    if let Some(task) = graph.tasks.get(&task_id) {
6092                        for check in &task.criteria.checks {
6093                            if let Some(existing) = unique_checks
6094                                .iter_mut()
6095                                .find(|c| c.name == check.name && c.command == check.command)
6096                            {
6097                                existing.required = existing.required || check.required;
6098                                if existing.timeout_ms.is_none() {
6099                                    existing.timeout_ms = check.timeout_ms;
6100                                }
6101                            } else {
6102                                unique_checks.push(check.clone());
6103                            }
6104                        }
6105                    }
6106                }
6107
6108                for check in &unique_checks {
6109                    self.append_event(
6110                        Event::new(
6111                            EventPayload::MergeCheckStarted {
6112                                flow_id: flow.id,
6113                                task_id: None,
6114                                check_name: check.name.clone(),
6115                                required: check.required,
6116                            },
6117                            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
6118                        ),
6119                        "registry:merge_prepare",
6120                    )?;
6121
6122                    let started = Instant::now();
6123                    let (exit_code, combined) = match Self::run_check_command(
6124                        &merge_path,
6125                        &target_dir,
6126                        &check.command,
6127                        check.timeout_ms,
6128                    ) {
6129                        Ok((exit_code, output, _timed_out)) => (exit_code, output),
6130                        Err(e) => (127, e.to_string()),
6131                    };
6132                    let duration_ms =
6133                        u64::try_from(started.elapsed().as_millis().min(u128::from(u64::MAX)))
6134                            .unwrap_or(u64::MAX);
6135                    let passed = exit_code == 0;
6136
6137                    let safe_name = check
6138                        .name
6139                        .chars()
6140                        .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
6141                        .collect::<String>();
6142                    let out_path = target_dir.join(format!("merge_check_{safe_name}.log"));
6143                    if let Err(e) = fs::write(&out_path, &combined) {
6144                        let details = format!(
6145                            "failed to write check output for {} to {}: {}",
6146                            check.name,
6147                            out_path.display(),
6148                            e
6149                        );
6150                        conflicts.push(details.clone());
6151                        self.emit_merge_conflict(&flow, None, details, origin)?;
6152                        break;
6153                    }
6154
6155                    self.append_event(
6156                        Event::new(
6157                            EventPayload::MergeCheckCompleted {
6158                                flow_id: flow.id,
6159                                task_id: None,
6160                                check_name: check.name.clone(),
6161                                passed,
6162                                exit_code,
6163                                output: combined.clone(),
6164                                duration_ms,
6165                                required: check.required,
6166                            },
6167                            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
6168                        ),
6169                        "registry:merge_prepare",
6170                    )?;
6171
6172                    if check.required && !passed {
6173                        let details = format!(
6174                            "required check failed: {} (exit={exit_code}, duration={}ms)",
6175                            check.name, duration_ms
6176                        );
6177                        conflicts.push(details.clone());
6178                        self.emit_merge_conflict(&flow, None, details, origin)?;
6179                        if !combined.trim().is_empty() {
6180                            let snippet = combined.lines().take(10).collect::<Vec<_>>().join("\n");
6181                            conflicts.push(format!("check output (first lines): {snippet}"));
6182                        }
6183                        break;
6184                    }
6185                }
6186            }
6187
6188            if conflicts.is_empty() {
6189                let flow_branch = format!("flow/{}", flow.id);
6190                let update = std::process::Command::new("git")
6191                    .current_dir(manager.repo_path())
6192                    .args(["branch", "-f", &flow_branch, &merge_branch])
6193                    .output()
6194                    .map_err(|e| {
6195                        HivemindError::system(
6196                            "git_branch_update_failed",
6197                            e.to_string(),
6198                            "registry:merge_prepare",
6199                        )
6200                    })?;
6201                if !update.status.success() {
6202                    return Err(HivemindError::git(
6203                        "git_branch_update_failed",
6204                        String::from_utf8_lossy(&update.stderr).to_string(),
6205                        "registry:merge_prepare",
6206                    ));
6207                }
6208
6209                for (task_id, commit_sha) in &integrated_tasks {
6210                    self.append_event(
6211                        Event::new(
6212                            EventPayload::TaskIntegratedIntoFlow {
6213                                flow_id: flow.id,
6214                                task_id: *task_id,
6215                                commit_sha: commit_sha.clone(),
6216                            },
6217                            CorrelationIds::for_graph_flow_task(
6218                                flow.project_id,
6219                                flow.graph_id,
6220                                flow.id,
6221                                *task_id,
6222                            ),
6223                        ),
6224                        origin,
6225                    )?;
6226                }
6227            }
6228
6229            target
6230        };
6231
6232        if conflicts.is_empty() {
6233            for (repo_name, manager) in managers {
6234                let merge_branch = format!("integration/{}/prepare", flow.id);
6235                let merge_path = manager
6236                    .config()
6237                    .base_dir
6238                    .join(flow.id.to_string())
6239                    .join("_integration_prepare");
6240
6241                if merge_path.exists() {
6242                    let _ = std::process::Command::new("git")
6243                        .current_dir(manager.repo_path())
6244                        .args([
6245                            "worktree",
6246                            "remove",
6247                            "--force",
6248                            merge_path.to_str().unwrap_or(""),
6249                        ])
6250                        .output();
6251                    let _ = fs::remove_dir_all(&merge_path);
6252                }
6253
6254                if let Some(parent) = merge_path.parent() {
6255                    fs::create_dir_all(parent).map_err(|e| {
6256                        HivemindError::system("create_dir_failed", e.to_string(), origin)
6257                    })?;
6258                }
6259
6260                let add = std::process::Command::new("git")
6261                    .current_dir(manager.repo_path())
6262                    .args([
6263                        "worktree",
6264                        "add",
6265                        "-B",
6266                        &merge_branch,
6267                        merge_path.to_str().unwrap_or(""),
6268                        &prepared_target_branch,
6269                    ])
6270                    .output()
6271                    .map_err(|e| {
6272                        HivemindError::system("git_worktree_add_failed", e.to_string(), origin)
6273                    })?;
6274                if !add.status.success() {
6275                    let details = format!(
6276                        "repo {repo_name}: {}",
6277                        String::from_utf8_lossy(&add.stderr).trim()
6278                    );
6279                    conflicts.push(details.clone());
6280                    self.emit_merge_conflict(&flow, None, details, origin)?;
6281                    continue;
6282                }
6283
6284                for task_id in graph.topological_order() {
6285                    if flow
6286                        .task_executions
6287                        .get(&task_id)
6288                        .is_none_or(|e| e.state != TaskExecState::Success)
6289                    {
6290                        continue;
6291                    }
6292                    let task_branch = format!("exec/{}/{task_id}", flow.id);
6293                    let task_ref = format!("refs/heads/{task_branch}");
6294                    if !Self::git_ref_exists(&merge_path, &task_ref) {
6295                        let details = format!("repo {repo_name}: task {task_id}: missing branch");
6296                        conflicts.push(details.clone());
6297                        self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
6298                        break;
6299                    }
6300
6301                    let sandbox_branch = format!("integration/{}/{task_id}", flow.id);
6302                    let checkout = std::process::Command::new("git")
6303                        .current_dir(&merge_path)
6304                        .args(["checkout", "-B", &sandbox_branch, &merge_branch])
6305                        .output()
6306                        .map_err(|e| {
6307                            HivemindError::system("git_checkout_failed", e.to_string(), origin)
6308                        })?;
6309                    if !checkout.status.success() {
6310                        let details = format!(
6311                            "repo {repo_name}: task {task_id}: {}",
6312                            String::from_utf8_lossy(&checkout.stderr).trim()
6313                        );
6314                        conflicts.push(details.clone());
6315                        self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
6316                        break;
6317                    }
6318
6319                    let merge = std::process::Command::new("git")
6320                        .current_dir(&merge_path)
6321                        .args([
6322                            "-c",
6323                            "user.name=Hivemind",
6324                            "-c",
6325                            "user.email=hivemind@example.com",
6326                            "-c",
6327                            "commit.gpgsign=false",
6328                            "merge",
6329                            "--no-commit",
6330                            "--no-ff",
6331                            &task_branch,
6332                        ])
6333                        .output()
6334                        .map_err(|e| {
6335                            HivemindError::system("git_merge_failed", e.to_string(), origin)
6336                        })?;
6337                    if !merge.status.success() {
6338                        let details = format!(
6339                            "repo {repo_name}: task {task_id}: {}",
6340                            String::from_utf8_lossy(&merge.stderr).trim()
6341                        );
6342                        conflicts.push(details.clone());
6343                        self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
6344                        let _ = std::process::Command::new("git")
6345                            .current_dir(&merge_path)
6346                            .args(["merge", "--abort"])
6347                            .output();
6348                        break;
6349                    }
6350
6351                    let merge_in_progress = std::process::Command::new("git")
6352                        .current_dir(&merge_path)
6353                        .args(["rev-parse", "-q", "--verify", "MERGE_HEAD"])
6354                        .status()
6355                        .map(|s| s.success())
6356                        .unwrap_or(false);
6357                    if !merge_in_progress {
6358                        let _ = std::process::Command::new("git")
6359                            .current_dir(&merge_path)
6360                            .args(["checkout", &merge_branch])
6361                            .output();
6362                        continue;
6363                    }
6364
6365                    let commit_msg = format!(
6366                        "Integrate task {task_id}\n\nFlow: {}\nTask: {task_id}\nTarget: {}\nRepository: {}\nTimestamp: {}\nHivemind-Version: {}",
6367                        flow.id,
6368                        prepared_target_branch,
6369                        repo_name,
6370                        Utc::now().to_rfc3339(),
6371                        env!("CARGO_PKG_VERSION")
6372                    );
6373                    let commit = std::process::Command::new("git")
6374                        .current_dir(&merge_path)
6375                        .args([
6376                            "-c",
6377                            "user.name=Hivemind",
6378                            "-c",
6379                            "user.email=hivemind@example.com",
6380                            "-c",
6381                            "commit.gpgsign=false",
6382                            "commit",
6383                            "-m",
6384                            &commit_msg,
6385                        ])
6386                        .output()
6387                        .map_err(|e| {
6388                            HivemindError::system("git_commit_failed", e.to_string(), origin)
6389                        })?;
6390                    if !commit.status.success() {
6391                        let details = format!(
6392                            "repo {repo_name}: task {task_id}: {}",
6393                            String::from_utf8_lossy(&commit.stderr).trim()
6394                        );
6395                        conflicts.push(details.clone());
6396                        self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
6397                        break;
6398                    }
6399
6400                    let _ = std::process::Command::new("git")
6401                        .current_dir(&merge_path)
6402                        .args(["checkout", &merge_branch])
6403                        .output();
6404                    let promote = std::process::Command::new("git")
6405                        .current_dir(&merge_path)
6406                        .args(["merge", "--ff-only", &sandbox_branch])
6407                        .output()
6408                        .map_err(|e| {
6409                            HivemindError::system("git_merge_failed", e.to_string(), origin)
6410                        })?;
6411                    if !promote.status.success() {
6412                        let details = format!(
6413                            "repo {repo_name}: task {task_id}: {}",
6414                            String::from_utf8_lossy(&promote.stderr).trim()
6415                        );
6416                        conflicts.push(details.clone());
6417                        self.emit_merge_conflict(&flow, Some(task_id), details, origin)?;
6418                        break;
6419                    }
6420                }
6421
6422                if conflicts.is_empty() {
6423                    let flow_branch = format!("flow/{}", flow.id);
6424                    let _ = std::process::Command::new("git")
6425                        .current_dir(manager.repo_path())
6426                        .args(["branch", "-f", &flow_branch, &merge_branch])
6427                        .output();
6428                }
6429            }
6430        }
6431
6432        let event = Event::new(
6433            EventPayload::MergePrepared {
6434                flow_id: flow.id,
6435                target_branch: Some(prepared_target_branch),
6436                conflicts,
6437            },
6438            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
6439        );
6440
6441        self.store.append(event).map_err(|e| {
6442            HivemindError::system(
6443                "event_append_failed",
6444                e.to_string(),
6445                "registry:merge_prepare",
6446            )
6447        })?;
6448
6449        let state = self.state()?;
6450        state.merge_states.get(&flow.id).cloned().ok_or_else(|| {
6451            HivemindError::system(
6452                "merge_state_not_found",
6453                "Merge state not found after prepare",
6454                "registry:merge_prepare",
6455            )
6456        })
6457    }
6458
6459    pub fn merge_approve(&self, flow_id: &str) -> Result<crate::core::state::MergeState> {
6460        let flow = self.get_flow(flow_id)?;
6461
6462        let state = self.state()?;
6463        let ms = state.merge_states.get(&flow.id).ok_or_else(|| {
6464            HivemindError::user(
6465                "merge_not_prepared",
6466                "No merge preparation exists for this flow",
6467                "registry:merge_approve",
6468            )
6469        })?;
6470
6471        if ms.status == crate::core::state::MergeStatus::Approved {
6472            return Ok(ms.clone());
6473        }
6474
6475        if !ms.conflicts.is_empty() {
6476            return Err(HivemindError::user(
6477                "unresolved_conflicts",
6478                "Merge has unresolved conflicts",
6479                "registry:merge_approve",
6480            ));
6481        }
6482
6483        let user = env::var("HIVEMIND_USER")
6484            .or_else(|_| env::var("USER"))
6485            .ok()
6486            .filter(|u| !u.trim().is_empty());
6487
6488        let event = Event::new(
6489            EventPayload::MergeApproved {
6490                flow_id: flow.id,
6491                user,
6492            },
6493            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
6494        );
6495
6496        self.store.append(event).map_err(|e| {
6497            HivemindError::system(
6498                "event_append_failed",
6499                e.to_string(),
6500                "registry:merge_approve",
6501            )
6502        })?;
6503
6504        let state = self.state()?;
6505        state.merge_states.get(&flow.id).cloned().ok_or_else(|| {
6506            HivemindError::system(
6507                "merge_state_not_found",
6508                "Merge state not found after approve",
6509                "registry:merge_approve",
6510            )
6511        })
6512    }
6513
6514    #[allow(clippy::too_many_lines)]
6515    pub fn merge_execute(&self, flow_id: &str) -> Result<crate::core::state::MergeState> {
6516        let origin = "registry:merge_execute";
6517        let flow = self.get_flow(flow_id)?;
6518
6519        let state = self.state()?;
6520        let ms = state.merge_states.get(&flow.id).ok_or_else(|| {
6521            HivemindError::user(
6522                "merge_not_prepared",
6523                "No merge preparation exists for this flow",
6524                "registry:merge_execute",
6525            )
6526        })?;
6527
6528        if ms.status != crate::core::state::MergeStatus::Approved {
6529            return Err(HivemindError::user(
6530                "merge_not_approved",
6531                "Merge has not been approved",
6532                "registry:merge_execute",
6533            ));
6534        }
6535
6536        if flow.state != FlowState::FrozenForMerge {
6537            return Err(HivemindError::user(
6538                "flow_not_frozen_for_merge",
6539                "Flow must be frozen for merge before execution",
6540                origin,
6541            ));
6542        }
6543
6544        let _integration_lock = self.acquire_flow_integration_lock(flow.id, origin)?;
6545        self.emit_integration_lock_acquired(&flow, "merge_execute", origin)?;
6546
6547        let mut commits = Vec::new();
6548        if state.projects.contains_key(&flow.project_id) {
6549            let managers = Self::worktree_managers_for_flow(&flow, &state, origin)?;
6550            let merge_branch = format!("integration/{}/prepare", flow.id);
6551            let merge_ref = format!("refs/heads/{merge_branch}");
6552
6553            let mut repo_merge_meta: Vec<(String, PathBuf, String, String, WorktreeManager)> =
6554                Vec::new();
6555            for (repo_name, manager) in managers {
6556                let repo_path = manager.repo_path().to_path_buf();
6557                let dirty = std::process::Command::new("git")
6558                    .current_dir(&repo_path)
6559                    .args(["status", "--porcelain"])
6560                    .output()
6561                    .map_err(|e| {
6562                        HivemindError::system("git_status_failed", e.to_string(), origin)
6563                    })?;
6564                if !dirty.status.success() {
6565                    return Err(HivemindError::git(
6566                        "git_status_failed",
6567                        String::from_utf8_lossy(&dirty.stderr).to_string(),
6568                        origin,
6569                    ));
6570                }
6571                let has_dirty_files = String::from_utf8_lossy(&dirty.stdout)
6572                    .lines()
6573                    .map(str::trim)
6574                    .filter(|l| !l.is_empty())
6575                    .any(|l| {
6576                        let path = l
6577                            .strip_prefix("?? ")
6578                            .or_else(|| l.get(3..))
6579                            .unwrap_or("")
6580                            .trim();
6581                        !path.starts_with(".hivemind/")
6582                    });
6583                if has_dirty_files {
6584                    return Err(HivemindError::user(
6585                        "repo_dirty",
6586                        format!("Repository '{repo_name}' has uncommitted changes"),
6587                        origin,
6588                    ));
6589                }
6590
6591                let current_branch = std::process::Command::new("git")
6592                    .current_dir(&repo_path)
6593                    .args(["rev-parse", "--abbrev-ref", "HEAD"])
6594                    .output()
6595                    .ok()
6596                    .filter(|o| o.status.success())
6597                    .map_or_else(
6598                        || "HEAD".to_string(),
6599                        |o| String::from_utf8_lossy(&o.stdout).trim().to_string(),
6600                    );
6601                let target = ms
6602                    .target_branch
6603                    .clone()
6604                    .unwrap_or_else(|| current_branch.clone());
6605                if target == "HEAD" {
6606                    return Err(HivemindError::user(
6607                        "detached_head",
6608                        format!("Repository '{repo_name}' is in detached HEAD"),
6609                        origin,
6610                    ));
6611                }
6612                if !Self::git_ref_exists(&repo_path, &merge_ref) {
6613                    return Err(HivemindError::user(
6614                        "merge_branch_not_found",
6615                        format!("Prepared integration branch not found in repo '{repo_name}'"),
6616                        origin,
6617                    )
6618                    .with_hint("Run 'hivemind merge prepare' again"));
6619                }
6620
6621                let ff_possible = std::process::Command::new("git")
6622                    .current_dir(&repo_path)
6623                    .args(["merge-base", "--is-ancestor", &target, &merge_branch])
6624                    .status()
6625                    .map(|s| s.success())
6626                    .unwrap_or(false);
6627                if !ff_possible {
6628                    return Err(HivemindError::git(
6629                        "git_merge_failed",
6630                        format!("Fast-forward is not possible in repo '{repo_name}'"),
6631                        origin,
6632                    ));
6633                }
6634
6635                repo_merge_meta.push((repo_name, repo_path, current_branch, target, manager));
6636            }
6637
6638            let mut merged: Vec<(PathBuf, String, String)> = Vec::new();
6639            for (repo_name, repo_path, current_branch, target, manager) in &repo_merge_meta {
6640                if current_branch != target {
6641                    let checkout = std::process::Command::new("git")
6642                        .current_dir(repo_path)
6643                        .args(["checkout", target])
6644                        .output()
6645                        .map_err(|e| {
6646                            HivemindError::system("git_checkout_failed", e.to_string(), origin)
6647                        })?;
6648                    if !checkout.status.success() {
6649                        return Err(HivemindError::git(
6650                            "git_checkout_failed",
6651                            String::from_utf8_lossy(&checkout.stderr).to_string(),
6652                            origin,
6653                        ));
6654                    }
6655                }
6656
6657                let old_head = std::process::Command::new("git")
6658                    .current_dir(repo_path)
6659                    .args(["rev-parse", "HEAD"])
6660                    .output()
6661                    .ok()
6662                    .filter(|o| o.status.success())
6663                    .map_or_else(
6664                        || "HEAD".to_string(),
6665                        |o| String::from_utf8_lossy(&o.stdout).trim().to_string(),
6666                    );
6667                let merge_out = std::process::Command::new("git")
6668                    .current_dir(repo_path)
6669                    .args(["merge", "--ff-only", &merge_branch])
6670                    .output()
6671                    .map_err(|e| {
6672                        HivemindError::system("git_merge_failed", e.to_string(), origin)
6673                    })?;
6674                if !merge_out.status.success() {
6675                    for (merged_repo, rollback_head, checkout_back) in merged.iter().rev() {
6676                        let _ = std::process::Command::new("git")
6677                            .current_dir(merged_repo)
6678                            .args(["reset", "--hard", rollback_head])
6679                            .output();
6680                        let _ = std::process::Command::new("git")
6681                            .current_dir(merged_repo)
6682                            .args(["checkout", checkout_back])
6683                            .output();
6684                    }
6685                    return Err(HivemindError::git(
6686                        "git_merge_failed",
6687                        format!(
6688                            "Merge failed in repo '{repo_name}': {}",
6689                            String::from_utf8_lossy(&merge_out.stderr).trim()
6690                        ),
6691                        origin,
6692                    ));
6693                }
6694
6695                let new_head = std::process::Command::new("git")
6696                    .current_dir(repo_path)
6697                    .args(["rev-parse", "HEAD"])
6698                    .output()
6699                    .ok()
6700                    .filter(|o| o.status.success())
6701                    .map_or_else(
6702                        || "HEAD".to_string(),
6703                        |o| String::from_utf8_lossy(&o.stdout).trim().to_string(),
6704                    );
6705                let rev_list = std::process::Command::new("git")
6706                    .current_dir(repo_path)
6707                    .args(["rev-list", "--reverse", &format!("{old_head}..{new_head}")])
6708                    .output()
6709                    .ok()
6710                    .filter(|o| o.status.success())
6711                    .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
6712                    .unwrap_or_default();
6713                if !rev_list.is_empty() {
6714                    commits.extend(
6715                        rev_list
6716                            .lines()
6717                            .map(str::trim)
6718                            .filter(|s| !s.is_empty())
6719                            .map(String::from),
6720                    );
6721                }
6722                merged.push((repo_path.clone(), old_head, current_branch.clone()));
6723
6724                if current_branch != target {
6725                    let _ = std::process::Command::new("git")
6726                        .current_dir(repo_path)
6727                        .args(["checkout", current_branch])
6728                        .output();
6729                }
6730
6731                let merge_path = manager
6732                    .config()
6733                    .base_dir
6734                    .join(flow.id.to_string())
6735                    .join("_merge");
6736                if merge_path.exists() {
6737                    let _ = std::process::Command::new("git")
6738                        .current_dir(repo_path)
6739                        .args([
6740                            "worktree",
6741                            "remove",
6742                            "--force",
6743                            merge_path.to_str().unwrap_or(""),
6744                        ])
6745                        .output();
6746                    let _ = fs::remove_dir_all(&merge_path);
6747                }
6748                let prepare_path = manager
6749                    .config()
6750                    .base_dir
6751                    .join(flow.id.to_string())
6752                    .join("_integration_prepare");
6753                if prepare_path.exists() {
6754                    let _ = std::process::Command::new("git")
6755                        .current_dir(repo_path)
6756                        .args([
6757                            "worktree",
6758                            "remove",
6759                            "--force",
6760                            prepare_path.to_str().unwrap_or(""),
6761                        ])
6762                        .output();
6763                    let _ = fs::remove_dir_all(&prepare_path);
6764                }
6765                let prepare_branch = format!("integration/{}/prepare", flow.id);
6766                let _ = std::process::Command::new("git")
6767                    .current_dir(repo_path)
6768                    .args(["branch", "-D", &prepare_branch])
6769                    .output();
6770                if manager.config().cleanup_on_success {
6771                    for task_id in flow.task_executions.keys() {
6772                        let branch = format!("exec/{}/{task_id}", flow.id);
6773                        let _ = std::process::Command::new("git")
6774                            .current_dir(repo_path)
6775                            .args(["branch", "-D", &branch])
6776                            .output();
6777                        let integration_branch = format!("integration/{}/{task_id}", flow.id);
6778                        let _ = std::process::Command::new("git")
6779                            .current_dir(repo_path)
6780                            .args(["branch", "-D", &integration_branch])
6781                            .output();
6782                    }
6783                    let flow_branch = format!("flow/{}", flow.id);
6784                    if current_branch != &flow_branch {
6785                        let _ = std::process::Command::new("git")
6786                            .current_dir(repo_path)
6787                            .args(["branch", "-D", &flow_branch])
6788                            .output();
6789                    }
6790                }
6791            }
6792        }
6793
6794        let event = Event::new(
6795            EventPayload::MergeCompleted {
6796                flow_id: flow.id,
6797                commits,
6798            },
6799            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
6800        );
6801
6802        self.store.append(event).map_err(|e| {
6803            HivemindError::system(
6804                "event_append_failed",
6805                e.to_string(),
6806                "registry:merge_execute",
6807            )
6808        })?;
6809
6810        let state = self.state()?;
6811        state.merge_states.get(&flow.id).cloned().ok_or_else(|| {
6812            HivemindError::system(
6813                "merge_state_not_found",
6814                "Merge state not found after execute",
6815                "registry:merge_execute",
6816            )
6817        })
6818    }
6819
6820    pub fn replay_flow(&self, flow_id: &str) -> Result<TaskFlow> {
6821        let fid = Uuid::parse_str(flow_id).map_err(|_| {
6822            HivemindError::user(
6823                "invalid_flow_id",
6824                format!("'{flow_id}' is not a valid flow ID"),
6825                "registry:replay_flow",
6826            )
6827        })?;
6828
6829        let filter = EventFilter {
6830            flow_id: Some(fid),
6831            ..EventFilter::default()
6832        };
6833        let events = self.read_events(&filter)?;
6834        if events.is_empty() {
6835            return Err(HivemindError::user(
6836                "flow_not_found",
6837                format!("No events found for flow '{flow_id}'"),
6838                "registry:replay_flow",
6839            ));
6840        }
6841
6842        let all_events = self.store.read_all().map_err(|e| {
6843            HivemindError::system("event_read_failed", e.to_string(), "registry:replay_flow")
6844        })?;
6845        let flow_related: Vec<Event> = all_events
6846            .into_iter()
6847            .filter(|e| {
6848                e.metadata.correlation.flow_id == Some(fid)
6849                    || match &e.payload {
6850                        EventPayload::TaskFlowCreated { flow_id: f, .. } => *f == fid,
6851                        _ => false,
6852                    }
6853            })
6854            .collect();
6855
6856        let replayed = crate::core::state::AppState::replay(&flow_related);
6857        replayed.flows.get(&fid).cloned().ok_or_else(|| {
6858            HivemindError::user(
6859                "flow_not_found",
6860                format!("Flow '{flow_id}' not found in replayed state"),
6861                "registry:replay_flow",
6862            )
6863        })
6864    }
6865}
6866
6867#[cfg(test)]
6868mod tests {
6869    use super::*;
6870    use crate::core::scope::{FilePermission, FilesystemScope, PathRule};
6871    use crate::storage::event_store::InMemoryEventStore;
6872    use std::process::Command;
6873
6874    fn test_registry() -> Registry {
6875        let store = Arc::new(InMemoryEventStore::new());
6876        let config = RegistryConfig::with_dir(PathBuf::from("/tmp/test"));
6877        Registry::with_store(store, config)
6878    }
6879
6880    fn init_git_repo(repo_dir: &std::path::Path) {
6881        std::fs::create_dir_all(repo_dir).expect("create repo dir");
6882
6883        let out = Command::new("git")
6884            .args(["init"])
6885            .current_dir(repo_dir)
6886            .output()
6887            .expect("git init");
6888        assert!(
6889            out.status.success(),
6890            "git init: {}",
6891            String::from_utf8_lossy(&out.stderr)
6892        );
6893
6894        std::fs::write(repo_dir.join("README.md"), "test\n").expect("write file");
6895
6896        let out = Command::new("git")
6897            .args(["add", "."])
6898            .current_dir(repo_dir)
6899            .output()
6900            .expect("git add");
6901        assert!(
6902            out.status.success(),
6903            "git add: {}",
6904            String::from_utf8_lossy(&out.stderr)
6905        );
6906
6907        let out = Command::new("git")
6908            .args([
6909                "-c",
6910                "user.name=Hivemind",
6911                "-c",
6912                "user.email=hivemind@example.com",
6913                "commit",
6914                "-m",
6915                "init",
6916            ])
6917            .current_dir(repo_dir)
6918            .output()
6919            .expect("git commit");
6920        assert!(
6921            out.status.success(),
6922            "git commit: {}",
6923            String::from_utf8_lossy(&out.stderr)
6924        );
6925
6926        let _ = Command::new("git")
6927            .args(["branch", "-M", "main"])
6928            .current_dir(repo_dir)
6929            .output();
6930    }
6931
6932    fn configure_failing_runtime(registry: &Registry) {
6933        registry
6934            .project_runtime_set(
6935                "proj",
6936                "opencode",
6937                "/usr/bin/env",
6938                None,
6939                &[
6940                    "sh".to_string(),
6941                    "-c".to_string(),
6942                    "echo runtime_started; exit 1".to_string(),
6943                ],
6944                &[],
6945                1000,
6946                4,
6947            )
6948            .unwrap();
6949    }
6950
6951    #[test]
6952    fn create_and_list_projects() {
6953        let registry = test_registry();
6954
6955        registry.create_project("project-a", None).unwrap();
6956        registry
6957            .create_project("project-b", Some("Description"))
6958            .unwrap();
6959
6960        let projects = registry.list_projects().unwrap();
6961        assert_eq!(projects.len(), 2);
6962        assert_eq!(projects[0].name, "project-a");
6963        assert_eq!(projects[1].name, "project-b");
6964    }
6965
6966    #[test]
6967    fn duplicate_project_name_rejected() {
6968        let registry = test_registry();
6969
6970        registry.create_project("test", None).unwrap();
6971        let result = registry.create_project("test", None);
6972
6973        assert!(result.is_err());
6974    }
6975
6976    #[test]
6977    fn get_project_by_name() {
6978        let registry = test_registry();
6979
6980        let created = registry.create_project("my-project", None).unwrap();
6981        let found = registry.get_project("my-project").unwrap();
6982
6983        assert_eq!(created.id, found.id);
6984    }
6985
6986    #[test]
6987    fn get_project_by_id() {
6988        let registry = test_registry();
6989
6990        let created = registry.create_project("my-project", None).unwrap();
6991        let found = registry.get_project(&created.id.to_string()).unwrap();
6992
6993        assert_eq!(created.id, found.id);
6994    }
6995
6996    #[test]
6997    fn update_project() {
6998        let registry = test_registry();
6999
7000        registry.create_project("old-name", None).unwrap();
7001        let updated = registry
7002            .update_project("old-name", Some("new-name"), Some("New desc"))
7003            .unwrap();
7004
7005        assert_eq!(updated.name, "new-name");
7006        assert_eq!(updated.description, Some("New desc".to_string()));
7007    }
7008
7009    #[test]
7010    fn list_graphs_and_flows_support_project_filters() {
7011        let registry = test_registry();
7012        registry.create_project("proj-a", None).unwrap();
7013        registry.create_project("proj-b", None).unwrap();
7014
7015        let a_task = registry
7016            .create_task("proj-a", "Task A", None, None)
7017            .unwrap();
7018        let b_task = registry
7019            .create_task("proj-b", "Task B", None, None)
7020            .unwrap();
7021
7022        let a_graph = registry
7023            .create_graph("proj-a", "graph-a", &[a_task.id])
7024            .unwrap();
7025        let b_graph = registry
7026            .create_graph("proj-b", "graph-b", &[b_task.id])
7027            .unwrap();
7028
7029        let a_flow = registry
7030            .create_flow(&a_graph.id.to_string(), Some("flow-a"))
7031            .unwrap();
7032        let b_flow = registry
7033            .create_flow(&b_graph.id.to_string(), Some("flow-b"))
7034            .unwrap();
7035
7036        let graphs_a = registry.list_graphs(Some("proj-a")).unwrap();
7037        assert_eq!(graphs_a.len(), 1);
7038        assert_eq!(graphs_a[0].id, a_graph.id);
7039
7040        let graphs_b = registry.list_graphs(Some("proj-b")).unwrap();
7041        assert_eq!(graphs_b.len(), 1);
7042        assert_eq!(graphs_b[0].id, b_graph.id);
7043
7044        let flows_a = registry.list_flows(Some("proj-a")).unwrap();
7045        assert_eq!(flows_a.len(), 1);
7046        assert_eq!(flows_a[0].id, a_flow.id);
7047
7048        let flows_b = registry.list_flows(Some("proj-b")).unwrap();
7049        assert_eq!(flows_b.len(), 1);
7050        assert_eq!(flows_b[0].id, b_flow.id);
7051
7052        let all_graphs = registry.list_graphs(None).unwrap();
7053        assert!(all_graphs.len() >= 2);
7054        let all_flows = registry.list_flows(None).unwrap();
7055        assert!(all_flows.len() >= 2);
7056    }
7057
7058    #[test]
7059    fn project_runtime_set_rejects_invalid_env_pairs() {
7060        let registry = test_registry();
7061        registry.create_project("proj", None).unwrap();
7062
7063        let res = registry.project_runtime_set(
7064            "proj",
7065            "opencode",
7066            "opencode",
7067            None,
7068            &[],
7069            &["NO_EQUALS".to_string()],
7070            1000,
7071            1,
7072        );
7073        assert!(res.is_err());
7074        assert_eq!(res.unwrap_err().code, "invalid_env");
7075    }
7076
7077    #[test]
7078    fn attach_repo_duplicate_path_includes_recovery_hint() {
7079        let tmp = tempfile::tempdir().expect("tempdir");
7080        let repo_dir = tmp.path().join("repo");
7081        init_git_repo(&repo_dir);
7082
7083        let registry = test_registry();
7084        registry.create_project("proj", None).unwrap();
7085
7086        let repo_path = repo_dir.to_string_lossy().to_string();
7087        registry
7088            .attach_repo("proj", &repo_path, Some("main"), RepoAccessMode::ReadWrite)
7089            .unwrap();
7090
7091        let err = registry
7092            .attach_repo("proj", &repo_path, Some("main"), RepoAccessMode::ReadWrite)
7093            .unwrap_err();
7094
7095        assert_eq!(err.code, "repo_already_attached");
7096        assert!(err
7097            .recovery_hint
7098            .as_deref()
7099            .is_some_and(|hint| hint.contains("detach-repo")));
7100    }
7101
7102    #[test]
7103    fn tick_flow_rejects_non_running_flow() {
7104        let registry = test_registry();
7105        registry.create_project("proj", None).unwrap();
7106        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7107        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7108        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7109
7110        let res = registry.tick_flow(&flow.id.to_string(), false, None);
7111        assert!(res.is_err());
7112        assert_eq!(res.unwrap_err().code, "flow_not_running");
7113    }
7114
7115    #[test]
7116    fn tick_flow_requires_runtime_configuration() {
7117        let registry = test_registry();
7118        registry.create_project("proj", None).unwrap();
7119        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7120        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7121        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7122        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7123
7124        let res = registry.tick_flow(&flow.id.to_string(), false, None);
7125        assert!(res.is_err());
7126        assert_eq!(res.unwrap_err().code, "runtime_not_configured");
7127    }
7128
7129    #[test]
7130    fn project_runtime_set_rejects_unsupported_runtime_adapter() {
7131        let registry = test_registry();
7132        registry.create_project("proj", None).unwrap();
7133
7134        let res = registry.project_runtime_set(
7135            "proj",
7136            "not-a-real-adapter",
7137            "opencode",
7138            None,
7139            &[],
7140            &[],
7141            1000,
7142            1,
7143        );
7144        assert!(res.is_err());
7145        assert_eq!(res.unwrap_err().code, "invalid_runtime_adapter");
7146    }
7147
7148    #[test]
7149    fn tick_flow_errors_when_project_has_no_repo() {
7150        let registry = test_registry();
7151        registry.create_project("proj", None).unwrap();
7152        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7153        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7154        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7155        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7156
7157        registry
7158            .project_runtime_set("proj", "opencode", "opencode", None, &[], &[], 1000, 1)
7159            .unwrap();
7160
7161        let res = registry.tick_flow(&flow.id.to_string(), false, None);
7162        assert!(res.is_err());
7163        assert_eq!(res.unwrap_err().code, "project_has_no_repo");
7164    }
7165
7166    #[test]
7167    fn tick_flow_executes_ready_task_and_emits_runtime_events() {
7168        let tmp = tempfile::tempdir().expect("tempdir");
7169        let repo_dir = tmp.path().join("repo");
7170        init_git_repo(&repo_dir);
7171
7172        let registry = test_registry();
7173        registry.create_project("proj", None).unwrap();
7174
7175        let repo_path = repo_dir.to_string_lossy().to_string();
7176        registry
7177            .attach_repo("proj", &repo_path, None, RepoAccessMode::ReadWrite)
7178            .unwrap();
7179
7180        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7181        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7182        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7183        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7184
7185        registry
7186            .project_runtime_set(
7187                "proj",
7188                "opencode",
7189                "/usr/bin/env",
7190                None,
7191                &[
7192                    "sh".to_string(),
7193                    "-c".to_string(),
7194                    "echo '$ cargo test'; echo 'Tool: grep'; echo '- [ ] collect logs'; echo '- [x] collect logs'; echo 'I will verify outputs'; echo unit_stderr 1>&2; printf data > hm_unit.txt"
7195                        .to_string(),
7196                ],
7197                &[],
7198                1000,
7199                1,
7200            )
7201            .unwrap();
7202
7203        let err = registry
7204            .tick_flow(&flow.id.to_string(), false, None)
7205            .unwrap_err();
7206        assert_eq!(err.code, "checkpoints_incomplete");
7207
7208        let events = registry.read_events(&EventFilter::all()).unwrap();
7209        assert!(events
7210            .iter()
7211            .any(|e| matches!(e.payload, EventPayload::RuntimeStarted { .. })));
7212        assert!(events
7213            .iter()
7214            .any(|e| matches!(e.payload, EventPayload::RuntimeOutputChunk { .. })));
7215        assert!(events
7216            .iter()
7217            .any(|e| matches!(e.payload, EventPayload::RuntimeFilesystemObserved { .. })));
7218        assert!(events
7219            .iter()
7220            .any(|e| matches!(e.payload, EventPayload::RuntimeCommandObserved { .. })));
7221        assert!(events
7222            .iter()
7223            .any(|e| matches!(e.payload, EventPayload::RuntimeToolCallObserved { .. })));
7224        assert!(events
7225            .iter()
7226            .any(|e| matches!(e.payload, EventPayload::RuntimeTodoSnapshotUpdated { .. })));
7227        assert!(events.iter().any(|e| matches!(
7228            e.payload,
7229            EventPayload::RuntimeNarrativeOutputObserved { .. }
7230        )));
7231        assert!(events
7232            .iter()
7233            .any(|e| matches!(e.payload, EventPayload::RuntimeExited { .. })));
7234    }
7235
7236    #[test]
7237    fn runtime_list_includes_phase_28_adapters() {
7238        let registry = test_registry();
7239        let list = registry.runtime_list();
7240        let names = list
7241            .iter()
7242            .map(|r| r.adapter_name.as_str())
7243            .collect::<std::collections::HashSet<_>>();
7244        assert!(names.contains("opencode"));
7245        assert!(names.contains("codex"));
7246        assert!(names.contains("claude-code"));
7247        assert!(names.contains("kilo"));
7248    }
7249
7250    #[test]
7251    fn tick_flow_executes_with_codex_adapter() {
7252        let tmp = tempfile::tempdir().expect("tempdir");
7253        let repo_dir = tmp.path().join("repo");
7254        init_git_repo(&repo_dir);
7255
7256        let registry = test_registry();
7257        registry.create_project("proj", None).unwrap();
7258
7259        let repo_path = repo_dir.to_string_lossy().to_string();
7260        registry
7261            .attach_repo("proj", &repo_path, None, RepoAccessMode::ReadWrite)
7262            .unwrap();
7263
7264        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7265        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7266        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7267        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7268
7269        registry
7270            .project_runtime_set(
7271                "proj",
7272                "codex",
7273                "/usr/bin/env",
7274                None,
7275                &[
7276                    "sh".to_string(),
7277                    "-c".to_string(),
7278                    "echo '$ cargo fmt --check'; echo 'Tool: rg'; echo codex_stderr 1>&2; printf codex > codex.txt"
7279                        .to_string(),
7280                ],
7281                &[],
7282                1000,
7283                1,
7284            )
7285            .unwrap();
7286
7287        let _ = registry.tick_flow(&flow.id.to_string(), false, None);
7288        let events = registry.read_events(&EventFilter::all()).unwrap();
7289        assert!(events.iter().any(|e| {
7290            matches!(
7291                &e.payload,
7292                EventPayload::RuntimeStarted { adapter_name, .. } if adapter_name == "codex"
7293            )
7294        }));
7295        assert!(events
7296            .iter()
7297            .any(|e| matches!(e.payload, EventPayload::RuntimeCommandObserved { .. })));
7298        assert!(events
7299            .iter()
7300            .any(|e| matches!(e.payload, EventPayload::RuntimeToolCallObserved { .. })));
7301    }
7302
7303    #[test]
7304    fn tick_flow_rejects_interactive_mode() {
7305        let tmp = tempfile::tempdir().expect("tempdir");
7306        let repo_dir = tmp.path().join("repo");
7307        init_git_repo(&repo_dir);
7308
7309        let registry = test_registry();
7310        registry.create_project("proj", None).unwrap();
7311        registry
7312            .attach_repo(
7313                "proj",
7314                &repo_dir.to_string_lossy(),
7315                None,
7316                RepoAccessMode::ReadWrite,
7317            )
7318            .unwrap();
7319
7320        let task = registry.create_task("proj", "Task 1", None, None).unwrap();
7321        let graph = registry.create_graph("proj", "g1", &[task.id]).unwrap();
7322        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7323        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7324
7325        let err = registry
7326            .tick_flow(&flow.id.to_string(), true, None)
7327            .unwrap_err();
7328        assert_eq!(err.code, "interactive_mode_deprecated");
7329    }
7330
7331    #[test]
7332    fn tick_flow_captures_runtime_output_with_quoted_args() {
7333        let tmp = tempfile::tempdir().expect("tempdir");
7334        let repo_dir = tmp.path().join("repo");
7335        init_git_repo(&repo_dir);
7336
7337        let registry = test_registry();
7338        registry.create_project("proj", None).unwrap();
7339        registry
7340            .attach_repo(
7341                "proj",
7342                &repo_dir.to_string_lossy(),
7343                None,
7344                RepoAccessMode::ReadWrite,
7345            )
7346            .unwrap();
7347
7348        let task = registry.create_task("proj", "Task 1", None, None).unwrap();
7349        let graph = registry.create_graph("proj", "g1", &[task.id]).unwrap();
7350        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7351        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7352
7353        registry
7354            .project_runtime_set(
7355                "proj",
7356                "opencode",
7357                "/usr/bin/env",
7358                None,
7359                &[
7360                    "sh".to_string(),
7361                    "-c".to_string(),
7362                    "echo \"Runtime output test successful\"".to_string(),
7363                ],
7364                &[],
7365                1000,
7366                1,
7367            )
7368            .unwrap();
7369
7370        let err = registry
7371            .tick_flow(&flow.id.to_string(), false, None)
7372            .unwrap_err();
7373        assert_eq!(err.code, "checkpoints_incomplete");
7374
7375        let events = registry.read_events(&EventFilter::all()).unwrap();
7376        assert!(events.iter().any(|e| {
7377            matches!(
7378                &e.payload,
7379                EventPayload::RuntimeOutputChunk { content, .. }
7380                    if content.contains("Runtime output test successful")
7381            )
7382        }));
7383        assert!(events
7384            .iter()
7385            .any(|e| matches!(e.payload, EventPayload::RuntimeExited { .. })));
7386    }
7387
7388    #[test]
7389    fn task_runtime_override_takes_precedence_over_project_runtime() {
7390        let tmp = tempfile::tempdir().expect("tempdir");
7391        let repo_dir = tmp.path().join("repo");
7392        init_git_repo(&repo_dir);
7393
7394        let registry = test_registry();
7395        registry.create_project("proj", None).unwrap();
7396        registry
7397            .attach_repo(
7398                "proj",
7399                &repo_dir.to_string_lossy(),
7400                None,
7401                RepoAccessMode::ReadWrite,
7402            )
7403            .unwrap();
7404
7405        let task = registry.create_task("proj", "Task 1", None, None).unwrap();
7406        let graph = registry.create_graph("proj", "g1", &[task.id]).unwrap();
7407        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7408        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7409
7410        registry
7411            .project_runtime_set(
7412                "proj",
7413                "opencode",
7414                "/usr/bin/env",
7415                None,
7416                &[
7417                    "sh".to_string(),
7418                    "-c".to_string(),
7419                    "echo project_runtime; exit 1".to_string(),
7420                ],
7421                &[],
7422                1000,
7423                1,
7424            )
7425            .unwrap();
7426
7427        registry
7428            .task_runtime_set(
7429                &task.id.to_string(),
7430                "kilo",
7431                "/usr/bin/env",
7432                None,
7433                &[
7434                    "sh".to_string(),
7435                    "-c".to_string(),
7436                    "echo task_override_runtime; printf override > override.txt".to_string(),
7437                ],
7438                &[],
7439                1000,
7440            )
7441            .unwrap();
7442
7443        let _ = registry.tick_flow(&flow.id.to_string(), false, None);
7444        let events = registry.read_events(&EventFilter::all()).unwrap();
7445        assert!(events.iter().any(|e| {
7446            matches!(
7447                &e.payload,
7448                EventPayload::RuntimeStarted { adapter_name, .. } if adapter_name == "kilo"
7449            )
7450        }));
7451    }
7452
7453    #[test]
7454    fn tick_flow_runs_multiple_compatible_tasks_when_max_parallel_allows() {
7455        let tmp = tempfile::tempdir().expect("tempdir");
7456        let repo_dir = tmp.path().join("repo");
7457        init_git_repo(&repo_dir);
7458
7459        let registry = test_registry();
7460        registry.create_project("proj", None).unwrap();
7461        registry
7462            .attach_repo(
7463                "proj",
7464                &repo_dir.to_string_lossy(),
7465                None,
7466                RepoAccessMode::ReadWrite,
7467            )
7468            .unwrap();
7469        configure_failing_runtime(&registry);
7470
7471        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7472        let t2 = registry.create_task("proj", "Task 2", None, None).unwrap();
7473        let graph = registry
7474            .create_graph("proj", "g1", &[t1.id, t2.id])
7475            .unwrap();
7476        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7477        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7478
7479        let updated = registry
7480            .tick_flow(&flow.id.to_string(), false, Some(2))
7481            .unwrap();
7482
7483        let events = registry.read_events(&EventFilter::all()).unwrap();
7484        let runtime_started = events
7485            .iter()
7486            .filter(|event| {
7487                matches!(event.payload, EventPayload::RuntimeStarted { .. })
7488                    && event.metadata.correlation.flow_id == Some(flow.id)
7489            })
7490            .count();
7491        assert_eq!(runtime_started, 2);
7492
7493        let failed = updated
7494            .task_executions
7495            .values()
7496            .filter(|exec| exec.state == TaskExecState::Failed)
7497            .count();
7498        assert_eq!(failed, 2);
7499    }
7500
7501    #[test]
7502    fn tick_flow_serializes_hard_scope_conflicts_with_observability_events() {
7503        let tmp = tempfile::tempdir().expect("tempdir");
7504        let repo_dir = tmp.path().join("repo");
7505        init_git_repo(&repo_dir);
7506
7507        let registry = test_registry();
7508        registry.create_project("proj", None).unwrap();
7509        registry
7510            .attach_repo(
7511                "proj",
7512                &repo_dir.to_string_lossy(),
7513                None,
7514                RepoAccessMode::ReadWrite,
7515            )
7516            .unwrap();
7517        configure_failing_runtime(&registry);
7518
7519        let hard_scope = Scope::new().with_filesystem(
7520            FilesystemScope::new().with_rule(PathRule::new("src", FilePermission::Write)),
7521        );
7522
7523        let t1 = registry
7524            .create_task("proj", "Task 1", None, Some(hard_scope.clone()))
7525            .unwrap();
7526        let t2 = registry
7527            .create_task("proj", "Task 2", None, Some(hard_scope))
7528            .unwrap();
7529        let graph = registry
7530            .create_graph("proj", "g-hard", &[t1.id, t2.id])
7531            .unwrap();
7532        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7533        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7534
7535        let updated = registry
7536            .tick_flow(&flow.id.to_string(), false, Some(2))
7537            .unwrap();
7538
7539        let events = registry.read_events(&EventFilter::all()).unwrap();
7540        let runtime_started = events
7541            .iter()
7542            .filter(|event| {
7543                matches!(event.payload, EventPayload::RuntimeStarted { .. })
7544                    && event.metadata.correlation.flow_id == Some(flow.id)
7545            })
7546            .count();
7547        assert_eq!(runtime_started, 1);
7548
7549        let failed = updated
7550            .task_executions
7551            .values()
7552            .filter(|exec| exec.state == TaskExecState::Failed)
7553            .count();
7554        assert_eq!(failed, 1);
7555
7556        assert!(events.iter().any(|event| {
7557            matches!(
7558                &event.payload,
7559                EventPayload::ScopeConflictDetected {
7560                    flow_id,
7561                    severity,
7562                    action,
7563                    ..
7564                } if *flow_id == flow.id && severity == "hard_conflict" && action == "serialized"
7565            )
7566        }));
7567        assert!(events.iter().any(|event| {
7568            matches!(
7569                &event.payload,
7570                EventPayload::TaskSchedulingDeferred { flow_id, .. } if *flow_id == flow.id
7571            )
7572        }));
7573    }
7574
7575    #[test]
7576    fn parse_global_parallel_limit_defaults_to_unbounded_when_missing() {
7577        let parsed = Registry::parse_global_parallel_limit(None).unwrap();
7578        assert_eq!(parsed, u16::MAX);
7579    }
7580
7581    #[test]
7582    fn parse_global_parallel_limit_accepts_positive_value() {
7583        let parsed = Registry::parse_global_parallel_limit(Some("3".to_string())).unwrap();
7584        assert_eq!(parsed, 3);
7585    }
7586
7587    #[test]
7588    fn parse_global_parallel_limit_rejects_zero() {
7589        let err = Registry::parse_global_parallel_limit(Some("0".to_string())).unwrap_err();
7590        assert_eq!(err.code, "invalid_global_parallel_limit");
7591    }
7592
7593    #[test]
7594    fn parse_global_parallel_limit_rejects_non_numeric() {
7595        let err = Registry::parse_global_parallel_limit(Some("abc".to_string())).unwrap_err();
7596        assert_eq!(err.code, "invalid_global_parallel_limit");
7597    }
7598
7599    #[test]
7600    fn tick_flow_warns_on_soft_scope_conflicts_and_allows_parallel_dispatch() {
7601        let tmp = tempfile::tempdir().expect("tempdir");
7602        let repo_dir = tmp.path().join("repo");
7603        init_git_repo(&repo_dir);
7604
7605        let registry = test_registry();
7606        registry.create_project("proj", None).unwrap();
7607        registry
7608            .attach_repo(
7609                "proj",
7610                &repo_dir.to_string_lossy(),
7611                None,
7612                RepoAccessMode::ReadWrite,
7613            )
7614            .unwrap();
7615        configure_failing_runtime(&registry);
7616
7617        let write_scope = Scope::new().with_filesystem(
7618            FilesystemScope::new().with_rule(PathRule::new("src", FilePermission::Write)),
7619        );
7620        let read_scope = Scope::new().with_filesystem(
7621            FilesystemScope::new().with_rule(PathRule::new("src", FilePermission::Read)),
7622        );
7623
7624        let t1 = registry
7625            .create_task("proj", "Task 1", None, Some(write_scope))
7626            .unwrap();
7627        let t2 = registry
7628            .create_task("proj", "Task 2", None, Some(read_scope))
7629            .unwrap();
7630        let graph = registry
7631            .create_graph("proj", "g-soft", &[t1.id, t2.id])
7632            .unwrap();
7633        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7634        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7635
7636        let updated = registry
7637            .tick_flow(&flow.id.to_string(), false, Some(2))
7638            .unwrap();
7639
7640        let events = registry.read_events(&EventFilter::all()).unwrap();
7641        let runtime_started = events
7642            .iter()
7643            .filter(|event| {
7644                matches!(event.payload, EventPayload::RuntimeStarted { .. })
7645                    && event.metadata.correlation.flow_id == Some(flow.id)
7646            })
7647            .count();
7648        assert_eq!(runtime_started, 2);
7649
7650        let failed = updated
7651            .task_executions
7652            .values()
7653            .filter(|exec| exec.state == TaskExecState::Failed)
7654            .count();
7655        assert_eq!(failed, 2);
7656
7657        assert!(events.iter().any(|event| {
7658            matches!(
7659                &event.payload,
7660                EventPayload::ScopeConflictDetected {
7661                    flow_id,
7662                    severity,
7663                    action,
7664                    ..
7665                } if *flow_id == flow.id && severity == "soft_conflict" && action == "warn_parallel"
7666            )
7667        }));
7668    }
7669
7670    #[test]
7671    fn create_and_list_tasks() {
7672        let registry = test_registry();
7673        registry.create_project("proj", None).unwrap();
7674
7675        registry.create_task("proj", "Task 1", None, None).unwrap();
7676        registry
7677            .create_task("proj", "Task 2", Some("Description"), None)
7678            .unwrap();
7679
7680        let tasks = registry.list_tasks("proj", None).unwrap();
7681        assert_eq!(tasks.len(), 2);
7682    }
7683
7684    #[test]
7685    fn task_lifecycle() {
7686        let registry = test_registry();
7687        registry.create_project("proj", None).unwrap();
7688
7689        let task = registry.create_task("proj", "My Task", None, None).unwrap();
7690        assert_eq!(task.state, TaskState::Open);
7691
7692        let closed = registry.close_task(&task.id.to_string(), None).unwrap();
7693        assert_eq!(closed.state, TaskState::Closed);
7694    }
7695
7696    #[test]
7697    fn filter_tasks_by_state() {
7698        let registry = test_registry();
7699        registry.create_project("proj", None).unwrap();
7700
7701        let t1 = registry
7702            .create_task("proj", "Open Task", None, None)
7703            .unwrap();
7704        let t2 = registry
7705            .create_task("proj", "Closed Task", None, None)
7706            .unwrap();
7707        registry.close_task(&t2.id.to_string(), None).unwrap();
7708
7709        let open_tasks = registry.list_tasks("proj", Some(TaskState::Open)).unwrap();
7710        assert_eq!(open_tasks.len(), 1);
7711        assert_eq!(open_tasks[0].id, t1.id);
7712
7713        let closed_tasks = registry
7714            .list_tasks("proj", Some(TaskState::Closed))
7715            .unwrap();
7716        assert_eq!(closed_tasks.len(), 1);
7717    }
7718
7719    #[test]
7720    fn update_task() {
7721        let registry = test_registry();
7722        registry.create_project("proj", None).unwrap();
7723
7724        let task = registry
7725            .create_task("proj", "Original", None, None)
7726            .unwrap();
7727        let updated = registry
7728            .update_task(&task.id.to_string(), Some("Updated"), Some("Desc"))
7729            .unwrap();
7730
7731        assert_eq!(updated.title, "Updated");
7732        assert_eq!(updated.description, Some("Desc".to_string()));
7733    }
7734
7735    #[test]
7736    fn graph_create_from_tasks_and_dependency() {
7737        let registry = test_registry();
7738        let proj = registry.create_project("proj", None).unwrap();
7739
7740        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7741        let t2 = registry.create_task("proj", "Task 2", None, None).unwrap();
7742
7743        let graph = registry
7744            .create_graph("proj", "g1", &[t1.id, t2.id])
7745            .unwrap();
7746        assert_eq!(graph.project_id, proj.id);
7747        assert_eq!(graph.tasks.len(), 2);
7748        assert!(graph.tasks.contains_key(&t1.id));
7749        assert!(graph.tasks.contains_key(&t2.id));
7750
7751        let updated = registry
7752            .add_graph_dependency(
7753                &graph.id.to_string(),
7754                &t1.id.to_string(),
7755                &t2.id.to_string(),
7756            )
7757            .unwrap();
7758        assert!(updated
7759            .dependencies
7760            .get(&t2.id)
7761            .is_some_and(|deps| deps.contains(&t1.id)));
7762
7763        let again = registry
7764            .add_graph_dependency(
7765                &graph.id.to_string(),
7766                &t1.id.to_string(),
7767                &t2.id.to_string(),
7768            )
7769            .unwrap();
7770        assert_eq!(
7771            again.dependencies.get(&t2.id),
7772            updated.dependencies.get(&t2.id)
7773        );
7774    }
7775
7776    #[test]
7777    fn add_graph_dependency_missing_task_has_hint() {
7778        let registry = test_registry();
7779        registry.create_project("proj", None).unwrap();
7780
7781        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7782        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7783
7784        let err = registry
7785            .add_graph_dependency(
7786                &graph.id.to_string(),
7787                &t1.id.to_string(),
7788                &Uuid::new_v4().to_string(),
7789            )
7790            .unwrap_err();
7791
7792        assert_eq!(err.code, "task_not_in_graph");
7793        assert!(err
7794            .recovery_hint
7795            .as_deref()
7796            .is_some_and(|hint| hint.contains("included when the graph was created")));
7797    }
7798
7799    #[test]
7800    fn add_graph_dependency_locked_graph_includes_locking_flow_context() {
7801        let registry = test_registry();
7802        registry.create_project("proj", None).unwrap();
7803
7804        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7805        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7806        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7807
7808        let err = registry
7809            .add_graph_dependency(
7810                &graph.id.to_string(),
7811                &t1.id.to_string(),
7812                &t1.id.to_string(),
7813            )
7814            .unwrap_err();
7815
7816        assert_eq!(err.code, "graph_immutable");
7817        assert!(err.message.contains(&flow.id.to_string()));
7818        assert_eq!(
7819            err.context.get("locking_flow_id").map(String::as_str),
7820            Some(flow.id.to_string().as_str())
7821        );
7822        assert!(err
7823            .recovery_hint
7824            .as_deref()
7825            .is_some_and(|hint| hint.contains("Create a new graph")));
7826    }
7827
7828    #[test]
7829    fn flow_create_locks_graph_and_start_sets_ready() {
7830        let registry = test_registry();
7831        registry.create_project("proj", None).unwrap();
7832
7833        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7834        let t2 = registry.create_task("proj", "Task 2", None, None).unwrap();
7835
7836        let graph = registry
7837            .create_graph("proj", "g1", &[t1.id, t2.id])
7838            .unwrap();
7839        registry
7840            .add_graph_dependency(
7841                &graph.id.to_string(),
7842                &t1.id.to_string(),
7843                &t2.id.to_string(),
7844            )
7845            .unwrap();
7846
7847        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7848        let locked = registry.get_graph(&graph.id.to_string()).unwrap();
7849        assert_eq!(locked.state, GraphState::Locked);
7850
7851        let started = registry.start_flow(&flow.id.to_string()).unwrap();
7852        assert_eq!(started.state, FlowState::Running);
7853
7854        let started = registry.get_flow(&flow.id.to_string()).unwrap();
7855        assert_eq!(
7856            started.task_executions.get(&t1.id).map(|e| e.state),
7857            Some(TaskExecState::Ready)
7858        );
7859        assert_eq!(
7860            started.task_executions.get(&t2.id).map(|e| e.state),
7861            Some(TaskExecState::Pending)
7862        );
7863    }
7864
7865    #[test]
7866    fn flow_pause_resume_abort_semantics() {
7867        let registry = test_registry();
7868        registry.create_project("proj", None).unwrap();
7869
7870        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7871        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7872        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7873
7874        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7875        let flow = registry.pause_flow(&flow.id.to_string()).unwrap();
7876        assert_eq!(flow.state, FlowState::Paused);
7877
7878        let flow2 = registry.pause_flow(&flow.id.to_string()).unwrap();
7879        assert_eq!(flow2.state, FlowState::Paused);
7880
7881        let flow = registry.resume_flow(&flow.id.to_string()).unwrap();
7882        assert_eq!(flow.state, FlowState::Running);
7883
7884        let flow = registry
7885            .abort_flow(&flow.id.to_string(), Some("stop"), true)
7886            .unwrap();
7887        assert_eq!(flow.state, FlowState::Aborted);
7888        let flow2 = registry
7889            .abort_flow(&flow.id.to_string(), None, false)
7890            .unwrap();
7891        assert_eq!(flow2.state, FlowState::Aborted);
7892    }
7893
7894    #[test]
7895    fn task_abort_and_retry_affect_flow_task_state() {
7896        let registry = test_registry();
7897        registry.create_project("proj", None).unwrap();
7898
7899        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7900
7901        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7902        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7903        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
7904
7905        let flow = registry.abort_task(&t1.id.to_string(), Some("no"));
7906        assert!(flow.is_ok());
7907        let flow = registry.get_flow(&flow.unwrap().id.to_string()).unwrap();
7908        assert_eq!(
7909            flow.task_executions.get(&t1.id).map(|e| e.state),
7910            Some(TaskExecState::Failed)
7911        );
7912
7913        let flow = registry
7914            .retry_task(&t1.id.to_string(), true, RetryMode::Clean)
7915            .unwrap();
7916        assert_eq!(
7917            flow.task_executions.get(&t1.id).map(|e| e.state),
7918            Some(TaskExecState::Pending)
7919        );
7920        assert_eq!(
7921            flow.task_executions.get(&t1.id).map(|e| e.attempt_count),
7922            Some(0)
7923        );
7924    }
7925
7926    #[test]
7927    fn close_task_disallowed_in_active_flow() {
7928        let registry = test_registry();
7929        registry.create_project("proj", None).unwrap();
7930
7931        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7932        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7933        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7934        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
7935
7936        let res = registry.close_task(&t1.id.to_string(), None);
7937        assert!(res.is_err());
7938    }
7939
7940    #[test]
7941    fn detach_repo_disallowed_with_active_flow() {
7942        let tmp = tempfile::tempdir().expect("tempdir");
7943        let repo_dir = tmp.path().join("repo");
7944        init_git_repo(&repo_dir);
7945
7946        let registry = test_registry();
7947        registry.create_project("proj", None).unwrap();
7948        registry
7949            .attach_repo(
7950                "proj",
7951                repo_dir.to_string_lossy().as_ref(),
7952                Some("main"),
7953                RepoAccessMode::ReadWrite,
7954            )
7955            .unwrap();
7956
7957        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7958        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7959        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7960        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
7961
7962        let res = registry.detach_repo("proj", "main");
7963        assert!(res.is_err());
7964        assert_eq!(res.unwrap_err().code, "project_in_active_flow");
7965    }
7966
7967    #[test]
7968    fn retry_limit_exceeded_requires_reset_count() {
7969        let registry = test_registry();
7970        registry.create_project("proj", None).unwrap();
7971
7972        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
7973        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
7974        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
7975        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
7976
7977        for _ in 0..4 {
7978            let event = Event::new(
7979                EventPayload::TaskExecutionStateChanged {
7980                    flow_id: flow.id,
7981                    task_id: t1.id,
7982                    from: TaskExecState::Ready,
7983                    to: TaskExecState::Running,
7984                },
7985                CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, t1.id),
7986            );
7987            registry.store.append(event).unwrap();
7988        }
7989
7990        let event = Event::new(
7991            EventPayload::TaskExecutionStateChanged {
7992                flow_id: flow.id,
7993                task_id: t1.id,
7994                from: TaskExecState::Running,
7995                to: TaskExecState::Failed,
7996            },
7997            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, t1.id),
7998        );
7999        registry.store.append(event).unwrap();
8000
8001        assert!(registry
8002            .retry_task(&t1.id.to_string(), false, RetryMode::Clean)
8003            .is_err());
8004        assert!(registry
8005            .retry_task(&t1.id.to_string(), true, RetryMode::Clean)
8006            .is_ok());
8007    }
8008
8009    #[test]
8010    fn error_occurred_emitted_on_close_task_in_active_flow() {
8011        let registry = test_registry();
8012        registry.create_project("proj", None).unwrap();
8013
8014        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8015        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8016        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8017        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
8018
8019        let res = registry.close_task(&t1.id.to_string(), None);
8020        assert!(res.is_err());
8021
8022        let events = registry.store.read_all().unwrap();
8023        assert!(events.iter().any(|e| {
8024            matches!(&e.payload, EventPayload::ErrorOccurred { error } if error.code == "task_in_active_flow")
8025        }));
8026    }
8027
8028    #[test]
8029    fn error_occurred_emitted_on_detach_repo_with_active_flow() {
8030        let tmp = tempfile::tempdir().expect("tempdir");
8031        let repo_dir = tmp.path().join("repo");
8032        init_git_repo(&repo_dir);
8033
8034        let registry = test_registry();
8035        let project = registry.create_project("proj", None).unwrap();
8036        registry
8037            .attach_repo(
8038                "proj",
8039                repo_dir.to_string_lossy().as_ref(),
8040                Some("main"),
8041                RepoAccessMode::ReadWrite,
8042            )
8043            .unwrap();
8044
8045        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8046        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8047        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8048        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
8049
8050        let res = registry.detach_repo("proj", "main");
8051        assert!(res.is_err());
8052
8053        let events = registry.store.read_all().unwrap();
8054        assert!(events.iter().any(|e| {
8055            matches!(&e.payload, EventPayload::ErrorOccurred { error } if error.code == "project_in_active_flow")
8056                && e.metadata.correlation.project_id == Some(project.id)
8057        }));
8058    }
8059
8060    #[test]
8061    fn error_occurred_emitted_on_runtime_set_invalid_env() {
8062        let registry = test_registry();
8063        registry.create_project("proj", None).unwrap();
8064
8065        let res = registry.project_runtime_set(
8066            "proj",
8067            "opencode",
8068            "opencode",
8069            None,
8070            &[],
8071            &["=VALUE".to_string()],
8072            1000,
8073            1,
8074        );
8075        assert!(res.is_err());
8076
8077        let events = registry.store.read_all().unwrap();
8078        assert!(events.iter().any(|e| {
8079            matches!(&e.payload, EventPayload::ErrorOccurred { error } if error.code == "invalid_env")
8080        }));
8081    }
8082
8083    #[test]
8084    fn error_occurred_emitted_on_attach_repo_missing_path() {
8085        let registry = test_registry();
8086        let project = registry.create_project("proj", None).unwrap();
8087
8088        let res = registry.attach_repo(
8089            "proj",
8090            "/path/does/not/exist",
8091            None,
8092            RepoAccessMode::ReadWrite,
8093        );
8094        assert!(res.is_err());
8095
8096        let events = registry.store.read_all().unwrap();
8097        assert!(events.iter().any(|e| {
8098            matches!(&e.payload, EventPayload::ErrorOccurred { error } if error.code == "repo_path_not_found")
8099                && e.metadata.correlation.project_id == Some(project.id)
8100        }));
8101    }
8102
8103    #[test]
8104    fn error_occurred_not_emitted_for_read_only_get_task_failure() {
8105        let registry = test_registry();
8106        registry.create_project("proj", None).unwrap();
8107
8108        let before = registry.store.read_all().unwrap().len();
8109        let _ = registry
8110            .get_task("00000000-0000-0000-0000-000000000000")
8111            .err();
8112        let after = registry.store.read_all().unwrap().len();
8113        assert_eq!(before, after);
8114    }
8115
8116    fn setup_flow_with_verifying_task(registry: &Registry) -> (TaskFlow, Uuid) {
8117        registry.create_project("proj", None).unwrap();
8118        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8119        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8120        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8121        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
8122
8123        let event = Event::new(
8124            EventPayload::TaskExecutionStateChanged {
8125                flow_id: flow.id,
8126                task_id: t1.id,
8127                from: TaskExecState::Ready,
8128                to: TaskExecState::Running,
8129            },
8130            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, t1.id),
8131        );
8132        registry.store.append(event).unwrap();
8133
8134        let event = Event::new(
8135            EventPayload::TaskExecutionStateChanged {
8136                flow_id: flow.id,
8137                task_id: t1.id,
8138                from: TaskExecState::Running,
8139                to: TaskExecState::Verifying,
8140            },
8141            CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, t1.id),
8142        );
8143        registry.store.append(event).unwrap();
8144
8145        let flow = registry.get_flow(&flow.id.to_string()).unwrap();
8146        (flow, t1.id)
8147    }
8148
8149    #[test]
8150    fn verify_override_pass_transitions_to_success() {
8151        let registry = test_registry();
8152        let (_flow, t1_id) = setup_flow_with_verifying_task(&registry);
8153
8154        let updated = registry
8155            .verify_override(&t1_id.to_string(), "pass", "looks good")
8156            .unwrap();
8157        assert_eq!(
8158            updated.task_executions.get(&t1_id).map(|e| e.state),
8159            Some(TaskExecState::Success)
8160        );
8161
8162        let events = registry.store.read_all().unwrap();
8163        assert!(events.iter().any(|e| {
8164            matches!(
8165                &e.payload,
8166                EventPayload::TaskExecutionFrozen { task_id, .. } if *task_id == t1_id
8167            )
8168        }));
8169    }
8170
8171    #[test]
8172    fn verify_override_fail_transitions_to_failed() {
8173        let registry = test_registry();
8174        let (_, t1_id) = setup_flow_with_verifying_task(&registry);
8175
8176        let updated = registry
8177            .verify_override(&t1_id.to_string(), "fail", "bad output")
8178            .unwrap();
8179        assert_eq!(
8180            updated.task_executions.get(&t1_id).map(|e| e.state),
8181            Some(TaskExecState::Failed)
8182        );
8183    }
8184
8185    #[test]
8186    fn verify_override_rejects_non_verifying_task() {
8187        let registry = test_registry();
8188        registry.create_project("proj", None).unwrap();
8189        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8190        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8191        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8192        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
8193
8194        let res = registry.verify_override(&t1.id.to_string(), "pass", "reason");
8195        assert!(res.is_err());
8196        assert_eq!(res.unwrap_err().code, "task_not_overridable");
8197    }
8198
8199    #[test]
8200    fn verify_override_rejects_empty_reason() {
8201        let registry = test_registry();
8202        let (_, t1_id) = setup_flow_with_verifying_task(&registry);
8203
8204        let res = registry.verify_override(&t1_id.to_string(), "pass", "   ");
8205        assert!(res.is_err());
8206        assert_eq!(res.unwrap_err().code, "invalid_reason");
8207    }
8208
8209    #[test]
8210    fn verify_override_rejects_invalid_decision() {
8211        let registry = test_registry();
8212        let (_, t1_id) = setup_flow_with_verifying_task(&registry);
8213
8214        let res = registry.verify_override(&t1_id.to_string(), "maybe", "reason");
8215        assert!(res.is_err());
8216        assert_eq!(res.unwrap_err().code, "invalid_decision");
8217    }
8218
8219    fn setup_completed_flow_with_repo(registry: &Registry) -> (tempfile::TempDir, TaskFlow) {
8220        let tmp = tempfile::tempdir().expect("tempdir");
8221        let repo_dir = tmp.path().join("repo");
8222        init_git_repo(&repo_dir);
8223
8224        registry.create_project("proj", None).unwrap();
8225        registry
8226            .attach_repo(
8227                "proj",
8228                repo_dir.to_string_lossy().as_ref(),
8229                None,
8230                RepoAccessMode::ReadWrite,
8231            )
8232            .unwrap();
8233        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8234        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8235        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8236        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
8237
8238        let exec_branch = format!("exec/{}/{t1_id}", flow.id, t1_id = t1.id);
8239        let out = Command::new("git")
8240            .args(["branch", "-f", &exec_branch, "main"])
8241            .current_dir(&repo_dir)
8242            .output()
8243            .expect("create exec branch");
8244        assert!(
8245            out.status.success(),
8246            "git branch: {}",
8247            String::from_utf8_lossy(&out.stderr)
8248        );
8249
8250        for (from, to) in [
8251            (TaskExecState::Ready, TaskExecState::Running),
8252            (TaskExecState::Running, TaskExecState::Verifying),
8253            (TaskExecState::Verifying, TaskExecState::Success),
8254        ] {
8255            let event = Event::new(
8256                EventPayload::TaskExecutionStateChanged {
8257                    flow_id: flow.id,
8258                    task_id: t1.id,
8259                    from,
8260                    to,
8261                },
8262                CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, t1.id),
8263            );
8264            registry.store.append(event).unwrap();
8265        }
8266
8267        let event = Event::new(
8268            EventPayload::TaskFlowCompleted { flow_id: flow.id },
8269            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
8270        );
8271        registry.store.append(event).unwrap();
8272
8273        (tmp, registry.get_flow(&flow.id.to_string()).unwrap())
8274    }
8275
8276    fn setup_completed_flow_with_two_repos(
8277        registry: &Registry,
8278    ) -> (tempfile::TempDir, PathBuf, PathBuf, TaskFlow, Uuid) {
8279        let tmp = tempfile::tempdir().expect("tempdir");
8280        let repo_a = tmp.path().join("repo-a");
8281        let repo_b = tmp.path().join("repo-b");
8282        init_git_repo(&repo_a);
8283        init_git_repo(&repo_b);
8284
8285        registry.create_project("proj", None).unwrap();
8286        registry
8287            .attach_repo(
8288                "proj",
8289                repo_a.to_string_lossy().as_ref(),
8290                Some("repo-a"),
8291                RepoAccessMode::ReadWrite,
8292            )
8293            .unwrap();
8294        registry
8295            .attach_repo(
8296                "proj",
8297                repo_b.to_string_lossy().as_ref(),
8298                Some("repo-b"),
8299                RepoAccessMode::ReadWrite,
8300            )
8301            .unwrap();
8302
8303        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8304        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8305        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8306        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
8307
8308        let exec_branch = format!("exec/{}/{task_id}", flow.id, task_id = t1.id);
8309        for repo in [&repo_a, &repo_b] {
8310            let out = Command::new("git")
8311                .args(["branch", "-f", &exec_branch, "main"])
8312                .current_dir(repo)
8313                .output()
8314                .expect("create exec branch");
8315            assert!(
8316                out.status.success(),
8317                "git branch: {}",
8318                String::from_utf8_lossy(&out.stderr)
8319            );
8320        }
8321
8322        for (from, to) in [
8323            (TaskExecState::Ready, TaskExecState::Running),
8324            (TaskExecState::Running, TaskExecState::Verifying),
8325            (TaskExecState::Verifying, TaskExecState::Success),
8326        ] {
8327            let event = Event::new(
8328                EventPayload::TaskExecutionStateChanged {
8329                    flow_id: flow.id,
8330                    task_id: t1.id,
8331                    from,
8332                    to,
8333                },
8334                CorrelationIds::for_graph_flow_task(flow.project_id, flow.graph_id, flow.id, t1.id),
8335            );
8336            registry.store.append(event).unwrap();
8337        }
8338
8339        let event = Event::new(
8340            EventPayload::TaskFlowCompleted { flow_id: flow.id },
8341            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
8342        );
8343        registry.store.append(event).unwrap();
8344
8345        (
8346            tmp,
8347            repo_a,
8348            repo_b,
8349            registry.get_flow(&flow.id.to_string()).unwrap(),
8350            t1.id,
8351        )
8352    }
8353
8354    #[test]
8355    fn merge_lifecycle_prepare_approve_execute() {
8356        let registry = test_registry();
8357        let (_tmp, flow) = setup_completed_flow_with_repo(&registry);
8358
8359        let ms = registry
8360            .merge_prepare(&flow.id.to_string(), Some("main"))
8361            .unwrap();
8362        assert_eq!(ms.status, crate::core::state::MergeStatus::Prepared);
8363        assert_eq!(ms.target_branch, Some("main".to_string()));
8364
8365        let frozen = registry.get_flow(&flow.id.to_string()).unwrap();
8366        assert_eq!(frozen.state, FlowState::FrozenForMerge);
8367
8368        let events = registry.store.read_all().unwrap();
8369        assert!(events.iter().any(|e| {
8370            matches!(
8371                &e.payload,
8372                EventPayload::FlowFrozenForMerge { flow_id } if *flow_id == flow.id
8373            )
8374        }));
8375        assert!(events.iter().any(|e| {
8376            matches!(
8377                &e.payload,
8378                EventPayload::FlowIntegrationLockAcquired { flow_id, operation }
8379                    if *flow_id == flow.id && operation == "merge_prepare"
8380            )
8381        }));
8382
8383        let ms = registry.merge_approve(&flow.id.to_string()).unwrap();
8384        assert_eq!(ms.status, crate::core::state::MergeStatus::Approved);
8385
8386        let ms = registry.merge_execute(&flow.id.to_string()).unwrap();
8387        assert_eq!(ms.status, crate::core::state::MergeStatus::Completed);
8388
8389        let merged = registry.get_flow(&flow.id.to_string()).unwrap();
8390        assert_eq!(merged.state, FlowState::Merged);
8391
8392        let events = registry.store.read_all().unwrap();
8393        assert!(events.iter().any(|e| {
8394            matches!(
8395                &e.payload,
8396                EventPayload::FlowIntegrationLockAcquired { flow_id, operation }
8397                    if *flow_id == flow.id && operation == "merge_execute"
8398            )
8399        }));
8400    }
8401
8402    #[test]
8403    fn merge_prepare_idempotent() {
8404        let registry = test_registry();
8405        let (_tmp, flow) = setup_completed_flow_with_repo(&registry);
8406
8407        let ms1 = registry
8408            .merge_prepare(&flow.id.to_string(), Some("main"))
8409            .unwrap();
8410        let ms2 = registry
8411            .merge_prepare(&flow.id.to_string(), Some("main"))
8412            .unwrap();
8413        assert_eq!(ms1.status, ms2.status);
8414    }
8415
8416    #[test]
8417    fn merge_approve_idempotent() {
8418        let registry = test_registry();
8419        let (_tmp, flow) = setup_completed_flow_with_repo(&registry);
8420
8421        registry.merge_prepare(&flow.id.to_string(), None).unwrap();
8422        let ms1 = registry.merge_approve(&flow.id.to_string()).unwrap();
8423        let ms2 = registry.merge_approve(&flow.id.to_string()).unwrap();
8424        assert_eq!(ms1.status, ms2.status);
8425    }
8426
8427    #[test]
8428    fn merge_prepare_rejects_non_completed_flow() {
8429        let registry = test_registry();
8430        registry.create_project("proj", None).unwrap();
8431        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8432        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8433        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8434        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
8435
8436        let res = registry.merge_prepare(&flow.id.to_string(), None);
8437        assert!(res.is_err());
8438        assert_eq!(res.unwrap_err().code, "flow_not_completed");
8439    }
8440
8441    #[test]
8442    fn merge_prepare_supports_multi_repo_projects() {
8443        let registry = test_registry();
8444        let (_tmp, repo_a, repo_b, flow, _task_id) = setup_completed_flow_with_two_repos(&registry);
8445
8446        let ms = registry
8447            .merge_prepare(&flow.id.to_string(), Some("main"))
8448            .unwrap();
8449        assert_eq!(ms.status, crate::core::state::MergeStatus::Prepared);
8450        assert!(ms.conflicts.is_empty(), "conflicts: {:?}", ms.conflicts);
8451
8452        let merge_ref = format!("refs/heads/integration/{}/prepare", flow.id);
8453        for repo in [&repo_a, &repo_b] {
8454            let status = Command::new("git")
8455                .current_dir(repo)
8456                .args(["show-ref", "--verify", "--quiet", &merge_ref])
8457                .status()
8458                .expect("show-ref");
8459            assert!(
8460                status.success(),
8461                "merge branch missing in {}",
8462                repo.display()
8463            );
8464        }
8465    }
8466
8467    #[test]
8468    fn merge_execute_is_all_or_nothing_across_repos() {
8469        let registry = test_registry();
8470        let (_tmp, repo_a, repo_b, flow, _task_id) = setup_completed_flow_with_two_repos(&registry);
8471
8472        registry
8473            .merge_prepare(&flow.id.to_string(), Some("main"))
8474            .unwrap();
8475        registry.merge_approve(&flow.id.to_string()).unwrap();
8476
8477        let head_a_before = String::from_utf8_lossy(
8478            &Command::new("git")
8479                .current_dir(&repo_a)
8480                .args(["rev-parse", "HEAD"])
8481                .output()
8482                .expect("rev-parse a")
8483                .stdout,
8484        )
8485        .trim()
8486        .to_string();
8487
8488        let prepare_branch = format!("integration/{}/prepare", flow.id);
8489        let prepare_worktree = repo_b
8490            .join(".hivemind")
8491            .join("worktrees")
8492            .join(flow.id.to_string())
8493            .join("_integration_prepare");
8494        let _ = Command::new("git")
8495            .current_dir(&repo_b)
8496            .args([
8497                "worktree",
8498                "remove",
8499                "--force",
8500                prepare_worktree.to_str().unwrap_or(""),
8501            ])
8502            .output()
8503            .expect("remove prepare worktree");
8504        let _ = Command::new("git")
8505            .current_dir(&repo_b)
8506            .args(["branch", "-D", &prepare_branch])
8507            .output()
8508            .expect("delete prepare branch in repo-b");
8509
8510        let err = registry.merge_execute(&flow.id.to_string()).unwrap_err();
8511        assert_eq!(err.code, "merge_branch_not_found");
8512
8513        let head_a_after = String::from_utf8_lossy(
8514            &Command::new("git")
8515                .current_dir(&repo_a)
8516                .args(["rev-parse", "HEAD"])
8517                .output()
8518                .expect("rev-parse a")
8519                .stdout,
8520        )
8521        .trim()
8522        .to_string();
8523        assert_eq!(
8524            head_a_before, head_a_after,
8525            "repo-a must not merge on partial failure"
8526        );
8527    }
8528
8529    #[test]
8530    fn merge_execute_rejects_unapproved() {
8531        let registry = test_registry();
8532        let (_tmp, flow) = setup_completed_flow_with_repo(&registry);
8533
8534        registry.merge_prepare(&flow.id.to_string(), None).unwrap();
8535        let res = registry.merge_execute(&flow.id.to_string());
8536        assert!(res.is_err());
8537        assert_eq!(res.unwrap_err().code, "merge_not_approved");
8538    }
8539
8540    #[test]
8541    fn merge_execute_rejects_unprepared() {
8542        let registry = test_registry();
8543        let (_tmp, flow) = setup_completed_flow_with_repo(&registry);
8544
8545        let res = registry.merge_execute(&flow.id.to_string());
8546        assert!(res.is_err());
8547        assert_eq!(res.unwrap_err().code, "merge_not_prepared");
8548    }
8549
8550    #[test]
8551    #[allow(clippy::too_many_lines)]
8552    fn merge_prepare_execute_merges_exec_branches_into_target() {
8553        let tmp = tempfile::tempdir().expect("tempdir");
8554        let repo_dir = tmp.path().join("repo");
8555        init_git_repo(&repo_dir);
8556
8557        let registry = test_registry();
8558        registry.create_project("proj", None).unwrap();
8559        registry
8560            .attach_repo(
8561                "proj",
8562                repo_dir.to_string_lossy().as_ref(),
8563                None,
8564                RepoAccessMode::ReadWrite,
8565            )
8566            .unwrap();
8567
8568        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8569        let t2 = registry.create_task("proj", "Task 2", None, None).unwrap();
8570        let graph = registry
8571            .create_graph("proj", "g1", &[t1.id, t2.id])
8572            .unwrap();
8573        registry
8574            .add_graph_dependency(
8575                &graph.id.to_string(),
8576                t1.id.to_string().as_str(),
8577                t2.id.to_string().as_str(),
8578            )
8579            .unwrap();
8580
8581        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8582        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
8583
8584        let manager = WorktreeManager::new(repo_dir.clone(), WorktreeConfig::default()).unwrap();
8585        let wt1_path = manager.path_for(flow.id, t1.id);
8586        if !wt1_path.exists() {
8587            let _ = manager.create(flow.id, t1.id, Some("HEAD")).unwrap();
8588        }
8589
8590        let wt2_path = manager.path_for(flow.id, t2.id);
8591        if !wt2_path.exists() {
8592            let _ = manager.create(flow.id, t2.id, Some("HEAD")).unwrap();
8593        }
8594
8595        std::fs::write(wt1_path.join("t1.txt"), "t1\n").unwrap();
8596        let out = Command::new("git")
8597            .current_dir(&wt1_path)
8598            .args(["add", "-A"])
8599            .output()
8600            .unwrap();
8601        assert!(out.status.success());
8602        let out = Command::new("git")
8603            .current_dir(&wt1_path)
8604            .args([
8605                "-c",
8606                "user.name=Hivemind",
8607                "-c",
8608                "user.email=hivemind@example.com",
8609                "commit",
8610                "-m",
8611                "t1",
8612            ])
8613            .output()
8614            .unwrap();
8615        assert!(out.status.success());
8616
8617        let out = Command::new("git")
8618            .current_dir(&wt2_path)
8619            .args(["merge", "--ff-only", &format!("exec/{}/{}", flow.id, t1.id)])
8620            .output()
8621            .unwrap();
8622        assert!(
8623            out.status.success(),
8624            "git merge --ff-only: {}",
8625            String::from_utf8_lossy(&out.stderr)
8626        );
8627
8628        std::fs::write(wt2_path.join("t2.txt"), "t2\n").unwrap();
8629        let out = Command::new("git")
8630            .current_dir(&wt2_path)
8631            .args(["add", "-A"])
8632            .output()
8633            .unwrap();
8634        assert!(out.status.success());
8635        let out = Command::new("git")
8636            .current_dir(&wt2_path)
8637            .args([
8638                "-c",
8639                "user.name=Hivemind",
8640                "-c",
8641                "user.email=hivemind@example.com",
8642                "commit",
8643                "-m",
8644                "t2",
8645            ])
8646            .output()
8647            .unwrap();
8648        assert!(out.status.success());
8649
8650        for task_id in [t1.id, t2.id] {
8651            let event = Event::new(
8652                EventPayload::TaskExecutionStateChanged {
8653                    flow_id: flow.id,
8654                    task_id,
8655                    from: TaskExecState::Verifying,
8656                    to: TaskExecState::Success,
8657                },
8658                CorrelationIds::for_graph_flow_task(
8659                    flow.project_id,
8660                    flow.graph_id,
8661                    flow.id,
8662                    task_id,
8663                ),
8664            );
8665            registry.store.append(event).unwrap();
8666        }
8667        let event = Event::new(
8668            EventPayload::TaskFlowCompleted { flow_id: flow.id },
8669            CorrelationIds::for_graph_flow(flow.project_id, flow.graph_id, flow.id),
8670        );
8671        registry.store.append(event).unwrap();
8672
8673        let ms = registry.merge_prepare(&flow.id.to_string(), None).unwrap();
8674        assert!(ms.conflicts.is_empty(), "conflicts: {:?}", ms.conflicts);
8675
8676        let events = registry.store.read_all().unwrap();
8677        let integrated = events
8678            .iter()
8679            .filter(|e| {
8680                matches!(
8681                    &e.payload,
8682                    EventPayload::TaskIntegratedIntoFlow { flow_id, .. } if *flow_id == flow.id
8683                )
8684            })
8685            .count();
8686        assert_eq!(integrated, 2);
8687
8688        registry.merge_approve(&flow.id.to_string()).unwrap();
8689        let ms = registry.merge_execute(&flow.id.to_string()).unwrap();
8690        assert_eq!(ms.status, crate::core::state::MergeStatus::Completed);
8691        assert!(!ms.commits.is_empty());
8692
8693        let merge_path = repo_dir
8694            .join(".hivemind/worktrees")
8695            .join(flow.id.to_string())
8696            .join("_merge");
8697        assert!(!merge_path.exists());
8698    }
8699
8700    #[test]
8701    fn replay_flow_reconstructs_state() {
8702        let registry = test_registry();
8703        registry.create_project("proj", None).unwrap();
8704        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8705        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8706        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8707        let flow = registry.start_flow(&flow.id.to_string()).unwrap();
8708
8709        let replayed = registry.replay_flow(&flow.id.to_string()).unwrap();
8710        assert_eq!(replayed.id, flow.id);
8711        assert_eq!(replayed.state, FlowState::Running);
8712        assert!(replayed.task_executions.contains_key(&t1.id));
8713    }
8714
8715    #[test]
8716    fn replay_flow_not_found() {
8717        let registry = test_registry();
8718        let res = registry.replay_flow(&Uuid::new_v4().to_string());
8719        assert!(res.is_err());
8720        assert_eq!(res.unwrap_err().code, "flow_not_found");
8721    }
8722
8723    #[test]
8724    fn read_events_with_flow_filter() {
8725        let registry = test_registry();
8726        registry.create_project("proj", None).unwrap();
8727        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8728        let graph = registry.create_graph("proj", "g1", &[t1.id]).unwrap();
8729        let flow = registry.create_flow(&graph.id.to_string(), None).unwrap();
8730        let _ = registry.start_flow(&flow.id.to_string()).unwrap();
8731
8732        let filter = EventFilter {
8733            flow_id: Some(flow.id),
8734            ..EventFilter::default()
8735        };
8736        let events = registry.read_events(&filter).unwrap();
8737        assert!(!events.is_empty());
8738        for ev in &events {
8739            assert_eq!(ev.metadata.correlation.flow_id, Some(flow.id));
8740        }
8741    }
8742
8743    #[test]
8744    fn read_events_with_task_filter() {
8745        let registry = test_registry();
8746        registry.create_project("proj", None).unwrap();
8747        let t1 = registry.create_task("proj", "Task 1", None, None).unwrap();
8748
8749        let filter = EventFilter {
8750            task_id: Some(t1.id),
8751            ..EventFilter::default()
8752        };
8753        let events = registry.read_events(&filter).unwrap();
8754        assert_eq!(events.len(), 1);
8755    }
8756}