Skip to main content

zeph_orchestration/
graph.rs

1// SPDX-FileCopyrightText: 2026 Andrei G <bug-ops>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4use std::fmt;
5use std::path::PathBuf;
6use std::str::FromStr;
7
8use serde::{Deserialize, Serialize};
9use uuid::Uuid;
10use zeph_memory::store::graph_store::{GraphSummary, RawGraphStore};
11
12use super::error::OrchestrationError;
13use super::verify_predicate::{PredicateOutcome, VerifyPredicate};
14
15/// Index of a task within a [`TaskGraph::tasks`] `Vec`.
16///
17/// `TaskId` is a dense, zero-based `u32` index. The invariant
18/// `tasks[i].id == TaskId(i as u32)` holds throughout the lifetime of a graph.
19///
20/// # Examples
21///
22/// ```rust
23/// use zeph_orchestration::TaskId;
24///
25/// let id = TaskId(3);
26/// assert_eq!(id.index(), 3);
27/// assert_eq!(id.as_u32(), 3);
28/// assert_eq!(id.to_string(), "3");
29/// ```
30#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
31pub struct TaskId(pub u32);
32
33impl TaskId {
34    /// Returns the index for Vec access.
35    #[must_use]
36    pub fn index(self) -> usize {
37        self.0 as usize
38    }
39
40    /// Returns the raw `u32` value.
41    #[must_use]
42    pub fn as_u32(self) -> u32 {
43        self.0
44    }
45}
46
47impl fmt::Display for TaskId {
48    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
49        write!(f, "{}", self.0)
50    }
51}
52
53/// Unique identifier for a [`TaskGraph`].
54///
55/// Backed by a UUID v4. Implements `FromStr` / `Display` for serialization and
56/// CLI lookup.
57///
58/// # Examples
59///
60/// ```rust
61/// use zeph_orchestration::GraphId;
62///
63/// let id = GraphId::new();
64/// let s = id.to_string();
65/// assert_eq!(s.len(), 36); // UUID string representation
66///
67/// let parsed: GraphId = s.parse().expect("valid UUID");
68/// assert_eq!(id, parsed);
69/// ```
70#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
71pub struct GraphId(Uuid);
72
73impl GraphId {
74    /// Generate a new random v4 `GraphId`.
75    #[must_use]
76    pub fn new() -> Self {
77        Self(Uuid::new_v4())
78    }
79}
80
81impl Default for GraphId {
82    fn default() -> Self {
83        Self::new()
84    }
85}
86
87impl fmt::Display for GraphId {
88    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
89        write!(f, "{}", self.0)
90    }
91}
92
93impl FromStr for GraphId {
94    type Err = OrchestrationError;
95
96    fn from_str(s: &str) -> Result<Self, Self::Err> {
97        Uuid::parse_str(s)
98            .map(GraphId)
99            .map_err(|e| OrchestrationError::InvalidGraph(format!("invalid graph id '{s}': {e}")))
100    }
101}
102
103/// Lifecycle status of a single task node.
104///
105/// State machine:
106///
107/// ```text
108/// Pending → Ready → Running → Completed  (success)
109///                           → Failed     (error; then failure strategy applies)
110///                           → Skipped    (upstream failed with Skip strategy)
111///                           → Canceled   (graph aborted while task was running)
112/// ```
113///
114/// Only `Completed`, `Failed`, `Skipped`, and `Canceled` are terminal — see
115/// [`TaskStatus::is_terminal`].
116///
117/// # Examples
118///
119/// ```rust
120/// use zeph_orchestration::TaskStatus;
121///
122/// assert!(TaskStatus::Completed.is_terminal());
123/// assert!(!TaskStatus::Running.is_terminal());
124/// assert_eq!(TaskStatus::Pending.to_string(), "pending");
125/// ```
126#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
127#[serde(rename_all = "snake_case")]
128pub enum TaskStatus {
129    /// Waiting for dependencies to complete.
130    Pending,
131    /// All dependencies completed; ready to be scheduled.
132    Ready,
133    /// A sub-agent is actively executing this task.
134    Running,
135    /// Sub-agent completed successfully.
136    Completed,
137    /// Sub-agent returned an error.
138    Failed,
139    /// Task was skipped because an upstream task failed with [`FailureStrategy::Skip`].
140    Skipped,
141    /// Task was running when the graph was aborted ([`FailureStrategy::Abort`]).
142    Canceled,
143}
144
145impl TaskStatus {
146    /// Returns `true` if the status is a terminal state.
147    #[must_use]
148    pub fn is_terminal(self) -> bool {
149        matches!(
150            self,
151            TaskStatus::Completed | TaskStatus::Failed | TaskStatus::Skipped | TaskStatus::Canceled
152        )
153    }
154}
155
156impl fmt::Display for TaskStatus {
157    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
158        match self {
159            TaskStatus::Pending => write!(f, "pending"),
160            TaskStatus::Ready => write!(f, "ready"),
161            TaskStatus::Running => write!(f, "running"),
162            TaskStatus::Completed => write!(f, "completed"),
163            TaskStatus::Failed => write!(f, "failed"),
164            TaskStatus::Skipped => write!(f, "skipped"),
165            TaskStatus::Canceled => write!(f, "canceled"),
166        }
167    }
168}
169
170/// Lifecycle status of a [`TaskGraph`].
171///
172/// # Examples
173///
174/// ```rust
175/// use zeph_orchestration::GraphStatus;
176///
177/// assert_eq!(GraphStatus::Running.to_string(), "running");
178/// assert_eq!(GraphStatus::Failed.to_string(), "failed");
179/// ```
180#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
181#[serde(rename_all = "snake_case")]
182pub enum GraphStatus {
183    /// Graph has been created but the scheduler has not started yet.
184    Created,
185    /// Scheduler is actively dispatching tasks.
186    Running,
187    /// All tasks reached a terminal state successfully.
188    Completed,
189    /// At least one task failed and the `Abort` strategy halted the graph.
190    Failed,
191    /// The graph was canceled by an external caller.
192    Canceled,
193    /// Graph is paused; waiting for user input (triggered by [`FailureStrategy::Ask`]).
194    Paused,
195}
196
197impl fmt::Display for GraphStatus {
198    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
199        match self {
200            GraphStatus::Created => write!(f, "created"),
201            GraphStatus::Running => write!(f, "running"),
202            GraphStatus::Completed => write!(f, "completed"),
203            GraphStatus::Failed => write!(f, "failed"),
204            GraphStatus::Canceled => write!(f, "canceled"),
205            GraphStatus::Paused => write!(f, "paused"),
206        }
207    }
208}
209
210/// What to do when a task fails.
211///
212/// Set at the graph level via [`TaskGraph::default_failure_strategy`] and
213/// optionally overridden per task via [`TaskNode::failure_strategy`].
214///
215/// # Examples
216///
217/// ```rust
218/// use std::str::FromStr;
219/// use zeph_orchestration::FailureStrategy;
220///
221/// assert_eq!(FailureStrategy::default(), FailureStrategy::Abort);
222/// assert_eq!("skip".parse::<FailureStrategy>().unwrap(), FailureStrategy::Skip);
223/// assert_eq!(FailureStrategy::Retry.to_string(), "retry");
224/// ```
225#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
226#[serde(rename_all = "snake_case")]
227pub enum FailureStrategy {
228    /// Abort the entire graph and cancel all running tasks.
229    #[default]
230    Abort,
231    /// Retry the task up to [`TaskNode::max_retries`] times, then abort.
232    Retry,
233    /// Skip the failed task and transitively skip all its dependents.
234    Skip,
235    /// Pause the graph ([`GraphStatus::Paused`]) and wait for user intervention.
236    Ask,
237}
238
239impl fmt::Display for FailureStrategy {
240    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
241        match self {
242            FailureStrategy::Abort => write!(f, "abort"),
243            FailureStrategy::Retry => write!(f, "retry"),
244            FailureStrategy::Skip => write!(f, "skip"),
245            FailureStrategy::Ask => write!(f, "ask"),
246        }
247    }
248}
249
250impl FromStr for FailureStrategy {
251    type Err = OrchestrationError;
252
253    fn from_str(s: &str) -> Result<Self, Self::Err> {
254        match s {
255            "abort" => Ok(FailureStrategy::Abort),
256            "retry" => Ok(FailureStrategy::Retry),
257            "skip" => Ok(FailureStrategy::Skip),
258            "ask" => Ok(FailureStrategy::Ask),
259            other => Err(OrchestrationError::InvalidGraph(format!(
260                "unknown failure strategy '{other}': expected one of abort, retry, skip, ask"
261            ))),
262        }
263    }
264}
265
266/// Output produced by a completed task.
267///
268/// Stored in [`TaskNode::result`] after the sub-agent finishes. Used by
269/// [`Aggregator`] to build the final synthesised response.
270///
271/// [`Aggregator`]: crate::aggregator::Aggregator
272#[derive(Debug, Clone, Serialize, Deserialize)]
273pub struct TaskResult {
274    /// Raw text output returned by the sub-agent.
275    pub output: String,
276    /// File-system paths to any artifacts produced (e.g. build outputs, reports).
277    pub artifacts: Vec<PathBuf>,
278    /// Wall-clock execution time in milliseconds.
279    pub duration_ms: u64,
280    /// Handle ID of the sub-agent instance that produced this result.
281    pub agent_id: Option<String>,
282    /// Name of the agent definition used to spawn the sub-agent.
283    pub agent_def: Option<String>,
284}
285
286/// Execution mode annotation emitted by the LLM planner for each task.
287///
288/// Controls how the [`DagScheduler`] dispatches a task relative to its siblings.
289/// The annotation is set by the planner and stored in [`TaskNode::execution_mode`].
290/// Absent or `null` in stored JSON deserialises to the default `Parallel`.
291///
292/// [`DagScheduler`]: crate::scheduler::DagScheduler
293///
294/// # Examples
295///
296/// ```rust
297/// use zeph_orchestration::ExecutionMode;
298///
299/// assert_eq!(ExecutionMode::default(), ExecutionMode::Parallel);
300/// let mode: ExecutionMode = serde_json::from_str("\"sequential\"").unwrap();
301/// assert_eq!(mode, ExecutionMode::Sequential);
302/// ```
303#[derive(
304    Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, schemars::JsonSchema,
305)]
306#[serde(rename_all = "snake_case")]
307pub enum ExecutionMode {
308    /// Task can run in parallel with others at the same DAG level.
309    #[default]
310    Parallel,
311    /// Task is globally serialized: at most one `Sequential` task runs at a time across
312    /// the entire graph (e.g. deploy, exclusive-resource access, shared-state mutation).
313    Sequential,
314}
315
316/// A single node in the task DAG.
317///
318/// Constructed by [`Planner`] and stored inside a [`TaskGraph`].  The
319/// scheduler drives each node through its [`TaskStatus`] lifecycle.
320///
321/// [`Planner`]: crate::planner::Planner
322///
323/// # Examples
324///
325/// ```rust
326/// use zeph_orchestration::{TaskNode, TaskStatus, ExecutionMode};
327///
328/// let node = TaskNode::new(0, "fetch data", "Download the dataset from source.");
329/// assert_eq!(node.status, TaskStatus::Pending);
330/// assert!(node.depends_on.is_empty());
331/// assert_eq!(node.execution_mode, ExecutionMode::Parallel);
332/// ```
333#[derive(Debug, Clone, Serialize, Deserialize)]
334pub struct TaskNode {
335    /// Dense zero-based index. Invariant: `tasks[i].id == TaskId(i)`.
336    pub id: TaskId,
337    /// Short, human-readable task title.
338    pub title: String,
339    /// Full task description passed verbatim to the assigned sub-agent as its prompt.
340    pub description: String,
341    /// Preferred agent name suggested by the planner; `None` lets the router decide.
342    pub agent_hint: Option<String>,
343    /// Current lifecycle status.
344    pub status: TaskStatus,
345    /// Indices of tasks this node depends on.
346    pub depends_on: Vec<TaskId>,
347    /// Result populated by the scheduler after the sub-agent finishes.
348    pub result: Option<TaskResult>,
349    /// Agent name actually assigned by the router at dispatch time.
350    pub assigned_agent: Option<String>,
351    /// Number of times this task has been retried so far (execution retries only).
352    pub retry_count: u32,
353    /// Number of predicate-driven re-runs for this task (independent of `retry_count`).
354    #[serde(default)]
355    pub predicate_rerun_count: u32,
356    /// Per-task failure strategy override; `None` means use [`TaskGraph::default_failure_strategy`].
357    pub failure_strategy: Option<FailureStrategy>,
358    /// Maximum retry attempts for this task; `None` means use [`TaskGraph::default_max_retries`].
359    pub max_retries: Option<u32>,
360    /// LLM planner annotation. Old SQLite-stored JSON without this field
361    /// deserialises to the default (`Parallel`).
362    #[serde(default)]
363    pub execution_mode: ExecutionMode,
364    /// Per-subtask verification predicate (predicate gate).
365    ///
366    /// When `Some`, the task's output must satisfy this criterion before downstream
367    /// tasks may consume it. The scheduler emits `SchedulerAction::VerifyPredicate`
368    /// after task completion and blocks downstream dispatch until
369    /// `predicate_outcome.is_some()`.
370    #[serde(default)]
371    pub verify_predicate: Option<VerifyPredicate>,
372    /// Outcome of the most recent predicate evaluation.
373    ///
374    /// `None` means the gate has not been evaluated yet (in-memory only; restart
375    /// re-evaluates any pending predicates). The scheduler re-emits `VerifyPredicate`
376    /// on every tick while this is `None` and `verify_predicate.is_some()`.
377    #[serde(default)]
378    pub predicate_outcome: Option<PredicateOutcome>,
379}
380
381impl TaskNode {
382    /// Create a new pending task with the given index.
383    #[must_use]
384    pub fn new(id: u32, title: impl Into<String>, description: impl Into<String>) -> Self {
385        Self {
386            id: TaskId(id),
387            title: title.into(),
388            description: description.into(),
389            agent_hint: None,
390            status: TaskStatus::Pending,
391            depends_on: Vec::new(),
392            result: None,
393            assigned_agent: None,
394            retry_count: 0,
395            predicate_rerun_count: 0,
396            failure_strategy: None,
397            max_retries: None,
398            execution_mode: ExecutionMode::default(),
399            verify_predicate: None,
400            predicate_outcome: None,
401        }
402    }
403}
404
405/// A directed acyclic graph of tasks to be executed by the orchestrator.
406///
407/// Created by the [`Planner`] and driven to completion by the [`DagScheduler`].
408/// The `tasks` vec is the authoritative store; all indices (`TaskId`) reference
409/// positions within it.
410///
411/// [`Planner`]: crate::planner::Planner
412/// [`DagScheduler`]: crate::scheduler::DagScheduler
413///
414/// # Examples
415///
416/// ```rust
417/// use zeph_orchestration::{TaskGraph, TaskNode, GraphStatus, FailureStrategy};
418///
419/// let mut graph = TaskGraph::new("build and deploy service");
420/// assert_eq!(graph.status, GraphStatus::Created);
421/// assert_eq!(graph.default_failure_strategy, FailureStrategy::Abort);
422/// assert_eq!(graph.default_max_retries, 3);
423/// ```
424#[derive(Debug, Clone, Serialize, Deserialize)]
425pub struct TaskGraph {
426    /// Unique graph identifier (UUID v4).
427    pub id: GraphId,
428    /// High-level user goal that was decomposed into this graph.
429    pub goal: String,
430    /// All task nodes. Index `i` must satisfy `tasks[i].id == TaskId(i)`.
431    pub tasks: Vec<TaskNode>,
432    /// Current lifecycle status of the graph as a whole.
433    pub status: GraphStatus,
434    /// Graph-wide failure strategy applied when a task has no per-task override.
435    pub default_failure_strategy: FailureStrategy,
436    /// Graph-wide maximum retry count applied when a task has no per-task override.
437    pub default_max_retries: u32,
438    /// ISO-8601 UTC timestamp of graph creation.
439    pub created_at: String,
440    /// ISO-8601 UTC timestamp set when the graph reaches a terminal status.
441    pub finished_at: Option<String>,
442}
443
444impl TaskGraph {
445    /// Create a new graph with `Created` status.
446    #[must_use]
447    pub fn new(goal: impl Into<String>) -> Self {
448        Self {
449            id: GraphId::new(),
450            goal: goal.into(),
451            tasks: Vec::new(),
452            status: GraphStatus::Created,
453            default_failure_strategy: FailureStrategy::default(),
454            default_max_retries: 3,
455            created_at: chrono_now(),
456            finished_at: None,
457        }
458    }
459}
460
461pub(crate) fn chrono_now() -> String {
462    // ISO-8601 UTC timestamp, consistent with the rest of the codebase.
463    // Format: "2026-03-05T22:04:41Z"
464    let secs = std::time::SystemTime::now()
465        .duration_since(std::time::UNIX_EPOCH)
466        .map_or(0, |d| d.as_secs());
467    // Manual formatting: seconds since epoch → ISO-8601 UTC
468    // Days since epoch, then decompose into year/month/day
469    let (y, mo, d, h, mi, s) = epoch_secs_to_datetime(secs);
470    format!("{y:04}-{mo:02}-{d:02}T{h:02}:{mi:02}:{s:02}Z")
471}
472
473/// Convert Unix epoch seconds to (year, month, day, hour, min, sec) UTC.
474fn epoch_secs_to_datetime(secs: u64) -> (u64, u8, u8, u8, u8, u8) {
475    let s = (secs % 60) as u8;
476    let mins = secs / 60;
477    let mi = (mins % 60) as u8;
478    let hours = mins / 60;
479    let h = (hours % 24) as u8;
480    let days = hours / 24; // days since 1970-01-01
481
482    // Gregorian calendar decomposition
483    // 400-year cycle = 146097 days
484    let (mut year, mut remaining_days) = {
485        let cycles = days / 146_097;
486        let rem = days % 146_097;
487        (1970 + cycles * 400, rem)
488    };
489    // 100-year century (36524 days, no leap on century unless /400)
490    let centuries = (remaining_days / 36_524).min(3);
491    year += centuries * 100;
492    remaining_days -= centuries * 36_524;
493    // 4-year cycle (1461 days)
494    let quads = remaining_days / 1_461;
495    year += quads * 4;
496    remaining_days -= quads * 1_461;
497    // remaining years
498    let extra_years = (remaining_days / 365).min(3);
499    year += extra_years;
500    remaining_days -= extra_years * 365;
501
502    let is_leap = (year % 4 == 0 && year % 100 != 0) || (year % 400 == 0);
503    let days_in_month: [u64; 12] = if is_leap {
504        [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
505    } else {
506        [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
507    };
508
509    let mut month = 0u8;
510    for (i, &dim) in days_in_month.iter().enumerate() {
511        if remaining_days < dim {
512            // i is in 0..12, so i+1 fits in u8
513            month = u8::try_from(i + 1).unwrap_or(1);
514            break;
515        }
516        remaining_days -= dim;
517    }
518    // remaining_days is in 0..30, so +1 fits in u8
519    let day = u8::try_from(remaining_days + 1).unwrap_or(1);
520
521    (year, month, day, h, mi, s)
522}
523
524/// Maximum allowed length for a `TaskGraph` goal string.
525const MAX_GOAL_LEN: usize = 1024;
526
527/// Type-safe wrapper around `RawGraphStore` that handles `TaskGraph` serialization.
528///
529/// Consumers in `zeph-core` use this instead of `RawGraphStore` directly, so they
530/// never need to deal with JSON strings.
531///
532/// # Storage layout
533///
534/// The `task_graphs` table stores both metadata columns (`goal`, `status`,
535/// `created_at`, `finished_at`) and the full `graph_json` blob. The metadata
536/// columns are summary/index data used for listing and filtering; `graph_json`
537/// is the authoritative source for full graph reconstruction. On `load`, only
538/// `graph_json` is deserialized — the columns are not consulted.
539pub struct GraphPersistence<S: RawGraphStore> {
540    store: S,
541}
542
543impl<S: RawGraphStore> GraphPersistence<S> {
544    /// Create a new `GraphPersistence` wrapping the given store.
545    pub fn new(store: S) -> Self {
546        Self { store }
547    }
548
549    /// Persist a `TaskGraph` (upsert).
550    ///
551    /// Returns `OrchestrationError::InvalidGraph` if `graph.goal` exceeds
552    /// `MAX_GOAL_LEN` (1024) characters.
553    ///
554    /// # Errors
555    ///
556    /// Returns `OrchestrationError::Persistence` on serialization or database failure.
557    pub async fn save(&self, graph: &TaskGraph) -> Result<(), OrchestrationError> {
558        if graph.goal.len() > MAX_GOAL_LEN {
559            return Err(OrchestrationError::InvalidGraph(format!(
560                "goal exceeds {MAX_GOAL_LEN} character limit ({} chars)",
561                graph.goal.len()
562            )));
563        }
564        let json = serde_json::to_string(graph)
565            .map_err(|e| OrchestrationError::Persistence(e.to_string()))?;
566        self.store
567            .save_graph(
568                &graph.id.to_string(),
569                &graph.goal,
570                &graph.status.to_string(),
571                &json,
572                &graph.created_at,
573                graph.finished_at.as_deref(),
574            )
575            .await
576            .map_err(|e| OrchestrationError::Persistence(e.to_string()))
577    }
578
579    /// Load a `TaskGraph` by its `GraphId`.
580    ///
581    /// Returns `None` if not found.
582    ///
583    /// # Errors
584    ///
585    /// Returns `OrchestrationError::Persistence` on database or deserialization failure.
586    pub async fn load(&self, id: &GraphId) -> Result<Option<TaskGraph>, OrchestrationError> {
587        match self
588            .store
589            .load_graph(&id.to_string())
590            .await
591            .map_err(|e| OrchestrationError::Persistence(e.to_string()))?
592        {
593            Some(json) => {
594                let graph = serde_json::from_str(&json)
595                    .map_err(|e| OrchestrationError::Persistence(e.to_string()))?;
596                Ok(Some(graph))
597            }
598            None => Ok(None),
599        }
600    }
601
602    /// List stored graphs (newest first).
603    ///
604    /// # Errors
605    ///
606    /// Returns `OrchestrationError::Persistence` on database failure.
607    pub async fn list(&self, limit: u32) -> Result<Vec<GraphSummary>, OrchestrationError> {
608        self.store
609            .list_graphs(limit)
610            .await
611            .map_err(|e| OrchestrationError::Persistence(e.to_string()))
612    }
613
614    /// Delete a graph by its `GraphId`.
615    ///
616    /// Returns `true` if a row was deleted.
617    ///
618    /// # Errors
619    ///
620    /// Returns `OrchestrationError::Persistence` on database failure.
621    pub async fn delete(&self, id: &GraphId) -> Result<bool, OrchestrationError> {
622        self.store
623            .delete_graph(&id.to_string())
624            .await
625            .map_err(|e| OrchestrationError::Persistence(e.to_string()))
626    }
627}
628
629#[cfg(test)]
630mod tests {
631    use super::*;
632
633    #[test]
634    fn test_taskid_display() {
635        assert_eq!(TaskId(3).to_string(), "3");
636    }
637
638    #[test]
639    fn test_graphid_display_and_new() {
640        let id = GraphId::new();
641        let s = id.to_string();
642        assert_eq!(s.len(), 36, "UUID string should be 36 chars");
643        let parsed: GraphId = s.parse().expect("should parse back");
644        assert_eq!(id, parsed);
645    }
646
647    #[test]
648    fn test_graphid_from_str_invalid() {
649        let err = "not-a-uuid".parse::<GraphId>();
650        assert!(err.is_err());
651    }
652
653    #[test]
654    fn test_task_status_is_terminal() {
655        assert!(TaskStatus::Completed.is_terminal());
656        assert!(TaskStatus::Failed.is_terminal());
657        assert!(TaskStatus::Skipped.is_terminal());
658        assert!(TaskStatus::Canceled.is_terminal());
659
660        assert!(!TaskStatus::Pending.is_terminal());
661        assert!(!TaskStatus::Ready.is_terminal());
662        assert!(!TaskStatus::Running.is_terminal());
663    }
664
665    #[test]
666    fn test_task_status_display() {
667        assert_eq!(TaskStatus::Pending.to_string(), "pending");
668        assert_eq!(TaskStatus::Ready.to_string(), "ready");
669        assert_eq!(TaskStatus::Running.to_string(), "running");
670        assert_eq!(TaskStatus::Completed.to_string(), "completed");
671        assert_eq!(TaskStatus::Failed.to_string(), "failed");
672        assert_eq!(TaskStatus::Skipped.to_string(), "skipped");
673        assert_eq!(TaskStatus::Canceled.to_string(), "canceled");
674    }
675
676    #[test]
677    fn test_failure_strategy_default() {
678        assert_eq!(FailureStrategy::default(), FailureStrategy::Abort);
679    }
680
681    #[test]
682    fn test_failure_strategy_display() {
683        assert_eq!(FailureStrategy::Abort.to_string(), "abort");
684        assert_eq!(FailureStrategy::Retry.to_string(), "retry");
685        assert_eq!(FailureStrategy::Skip.to_string(), "skip");
686        assert_eq!(FailureStrategy::Ask.to_string(), "ask");
687    }
688
689    #[test]
690    fn test_graph_status_display() {
691        assert_eq!(GraphStatus::Created.to_string(), "created");
692        assert_eq!(GraphStatus::Running.to_string(), "running");
693        assert_eq!(GraphStatus::Completed.to_string(), "completed");
694        assert_eq!(GraphStatus::Failed.to_string(), "failed");
695        assert_eq!(GraphStatus::Canceled.to_string(), "canceled");
696        assert_eq!(GraphStatus::Paused.to_string(), "paused");
697    }
698
699    #[test]
700    fn test_task_graph_serde_roundtrip() {
701        let mut graph = TaskGraph::new("test goal");
702        graph.tasks.push(TaskNode::new(0, "task 0", "do something"));
703        let json = serde_json::to_string(&graph).expect("serialize");
704        let restored: TaskGraph = serde_json::from_str(&json).expect("deserialize");
705        assert_eq!(graph.id, restored.id);
706        assert_eq!(graph.goal, restored.goal);
707        assert_eq!(graph.tasks.len(), restored.tasks.len());
708    }
709
710    #[test]
711    fn test_task_node_serde_roundtrip() {
712        let mut node = TaskNode::new(1, "compile", "run cargo build");
713        node.agent_hint = Some("rust-dev".to_string());
714        node.depends_on = vec![TaskId(0)];
715        let json = serde_json::to_string(&node).expect("serialize");
716        let restored: TaskNode = serde_json::from_str(&json).expect("deserialize");
717        assert_eq!(node.id, restored.id);
718        assert_eq!(node.title, restored.title);
719        assert_eq!(node.depends_on, restored.depends_on);
720    }
721
722    #[test]
723    fn test_task_result_serde_roundtrip() {
724        let result = TaskResult {
725            output: "ok".to_string(),
726            artifacts: vec![PathBuf::from("/tmp/out.bin")],
727            duration_ms: 500,
728            agent_id: Some("agent-1".to_string()),
729            agent_def: None,
730        };
731        let json = serde_json::to_string(&result).expect("serialize");
732        let restored: TaskResult = serde_json::from_str(&json).expect("deserialize");
733        assert_eq!(result.output, restored.output);
734        assert_eq!(result.duration_ms, restored.duration_ms);
735        assert_eq!(result.artifacts, restored.artifacts);
736    }
737
738    #[test]
739    fn test_failure_strategy_from_str() {
740        assert_eq!(
741            "abort".parse::<FailureStrategy>().unwrap(),
742            FailureStrategy::Abort
743        );
744        assert_eq!(
745            "retry".parse::<FailureStrategy>().unwrap(),
746            FailureStrategy::Retry
747        );
748        assert_eq!(
749            "skip".parse::<FailureStrategy>().unwrap(),
750            FailureStrategy::Skip
751        );
752        assert_eq!(
753            "ask".parse::<FailureStrategy>().unwrap(),
754            FailureStrategy::Ask
755        );
756        assert!("abort_all".parse::<FailureStrategy>().is_err());
757        assert!("".parse::<FailureStrategy>().is_err());
758    }
759
760    #[test]
761    fn test_chrono_now_iso8601_format() {
762        let ts = chrono_now();
763        // Format: "YYYY-MM-DDTHH:MM:SSZ" — 20 chars
764        assert_eq!(ts.len(), 20, "timestamp should be 20 chars: {ts}");
765        assert!(ts.ends_with('Z'), "should end with Z: {ts}");
766        assert!(ts.contains('T'), "should contain T: {ts}");
767        // Year should be >= 2024
768        let year: u32 = ts[..4].parse().expect("year should be numeric");
769        assert!(year >= 2024, "year should be >= 2024: {year}");
770    }
771
772    #[test]
773    fn test_failure_strategy_serde_snake_case() {
774        assert_eq!(
775            serde_json::to_string(&FailureStrategy::Abort).unwrap(),
776            "\"abort\""
777        );
778        assert_eq!(
779            serde_json::to_string(&FailureStrategy::Retry).unwrap(),
780            "\"retry\""
781        );
782        assert_eq!(
783            serde_json::to_string(&FailureStrategy::Skip).unwrap(),
784            "\"skip\""
785        );
786        assert_eq!(
787            serde_json::to_string(&FailureStrategy::Ask).unwrap(),
788            "\"ask\""
789        );
790    }
791
792    #[test]
793    fn test_graph_persistence_save_rejects_long_goal() {
794        // GraphPersistence::save() is async and requires a real store;
795        // we verify the goal-length guard directly via the const.
796        let long_goal = "x".repeat(MAX_GOAL_LEN + 1);
797        let mut graph = TaskGraph::new(long_goal);
798        graph.goal = "x".repeat(MAX_GOAL_LEN + 1);
799        assert!(
800            graph.goal.len() > MAX_GOAL_LEN,
801            "test setup: goal must exceed limit"
802        );
803        // The check itself lives in GraphPersistence::save(), exercised by
804        // the async persistence tests in zeph-memory; here we verify the constant.
805        assert_eq!(MAX_GOAL_LEN, 1024);
806    }
807
808    #[test]
809    fn test_task_node_predicate_fields_default_to_none() {
810        // Old SQLite blobs without verify_predicate / predicate_outcome must deserialize
811        // to None without error (#[serde(default)]).
812        let json = r#"{
813            "id": 0,
814            "title": "t",
815            "description": "d",
816            "agent_hint": null,
817            "status": "pending",
818            "depends_on": [],
819            "result": null,
820            "assigned_agent": null,
821            "retry_count": 0,
822            "failure_strategy": null,
823            "max_retries": null
824        }"#;
825        let node: TaskNode = serde_json::from_str(json).expect("should deserialize old JSON");
826        assert!(node.verify_predicate.is_none());
827        assert!(node.predicate_outcome.is_none());
828    }
829
830    #[test]
831    fn test_task_node_missing_execution_mode_deserializes_as_parallel() {
832        // Old SQLite-stored JSON blobs lack the execution_mode field.
833        // #[serde(default)] must make them deserialize to Parallel without error.
834        let json = r#"{
835            "id": 0,
836            "title": "t",
837            "description": "d",
838            "agent_hint": null,
839            "status": "pending",
840            "depends_on": [],
841            "result": null,
842            "assigned_agent": null,
843            "retry_count": 0,
844            "failure_strategy": null,
845            "max_retries": null
846        }"#;
847        let node: TaskNode = serde_json::from_str(json).expect("should deserialize old JSON");
848        assert_eq!(node.execution_mode, ExecutionMode::Parallel);
849    }
850
851    #[test]
852    fn test_execution_mode_serde_snake_case() {
853        assert_eq!(
854            serde_json::to_string(&ExecutionMode::Parallel).unwrap(),
855            "\"parallel\""
856        );
857        assert_eq!(
858            serde_json::to_string(&ExecutionMode::Sequential).unwrap(),
859            "\"sequential\""
860        );
861        let p: ExecutionMode = serde_json::from_str("\"parallel\"").unwrap();
862        assert_eq!(p, ExecutionMode::Parallel);
863        let s: ExecutionMode = serde_json::from_str("\"sequential\"").unwrap();
864        assert_eq!(s, ExecutionMode::Sequential);
865    }
866}