Skip to main content

zeph_orchestration/
graph.rs

1// SPDX-FileCopyrightText: 2026 Andrei G <bug-ops>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4use std::fmt;
5use std::path::PathBuf;
6use std::str::FromStr;
7
8use serde::{Deserialize, Serialize};
9use uuid::Uuid;
10use zeph_memory::store::graph_store::{GraphSummary, RawGraphStore};
11
12use super::error::OrchestrationError;
13
14/// Index of a task within a [`TaskGraph::tasks`] `Vec`.
15///
16/// `TaskId` is a dense, zero-based `u32` index. The invariant
17/// `tasks[i].id == TaskId(i as u32)` holds throughout the lifetime of a graph.
18///
19/// # Examples
20///
21/// ```rust
22/// use zeph_orchestration::TaskId;
23///
24/// let id = TaskId(3);
25/// assert_eq!(id.index(), 3);
26/// assert_eq!(id.as_u32(), 3);
27/// assert_eq!(id.to_string(), "3");
28/// ```
29#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
30pub struct TaskId(pub u32);
31
32impl TaskId {
33    /// Returns the index for Vec access.
34    #[must_use]
35    pub fn index(self) -> usize {
36        self.0 as usize
37    }
38
39    /// Returns the raw `u32` value.
40    #[must_use]
41    pub fn as_u32(self) -> u32 {
42        self.0
43    }
44}
45
46impl fmt::Display for TaskId {
47    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
48        write!(f, "{}", self.0)
49    }
50}
51
52/// Unique identifier for a [`TaskGraph`].
53///
54/// Backed by a UUID v4. Implements `FromStr` / `Display` for serialization and
55/// CLI lookup.
56///
57/// # Examples
58///
59/// ```rust
60/// use zeph_orchestration::GraphId;
61///
62/// let id = GraphId::new();
63/// let s = id.to_string();
64/// assert_eq!(s.len(), 36); // UUID string representation
65///
66/// let parsed: GraphId = s.parse().expect("valid UUID");
67/// assert_eq!(id, parsed);
68/// ```
69#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
70pub struct GraphId(Uuid);
71
72impl GraphId {
73    /// Generate a new random v4 `GraphId`.
74    #[must_use]
75    pub fn new() -> Self {
76        Self(Uuid::new_v4())
77    }
78}
79
80impl Default for GraphId {
81    fn default() -> Self {
82        Self::new()
83    }
84}
85
86impl fmt::Display for GraphId {
87    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
88        write!(f, "{}", self.0)
89    }
90}
91
92impl FromStr for GraphId {
93    type Err = OrchestrationError;
94
95    fn from_str(s: &str) -> Result<Self, Self::Err> {
96        Uuid::parse_str(s)
97            .map(GraphId)
98            .map_err(|e| OrchestrationError::InvalidGraph(format!("invalid graph id '{s}': {e}")))
99    }
100}
101
102/// Lifecycle status of a single task node.
103///
104/// State machine:
105///
106/// ```text
107/// Pending → Ready → Running → Completed  (success)
108///                           → Failed     (error; then failure strategy applies)
109///                           → Skipped    (upstream failed with Skip strategy)
110///                           → Canceled   (graph aborted while task was running)
111/// ```
112///
113/// Only `Completed`, `Failed`, `Skipped`, and `Canceled` are terminal — see
114/// [`TaskStatus::is_terminal`].
115///
116/// # Examples
117///
118/// ```rust
119/// use zeph_orchestration::TaskStatus;
120///
121/// assert!(TaskStatus::Completed.is_terminal());
122/// assert!(!TaskStatus::Running.is_terminal());
123/// assert_eq!(TaskStatus::Pending.to_string(), "pending");
124/// ```
125#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
126#[serde(rename_all = "snake_case")]
127pub enum TaskStatus {
128    /// Waiting for dependencies to complete.
129    Pending,
130    /// All dependencies completed; ready to be scheduled.
131    Ready,
132    /// A sub-agent is actively executing this task.
133    Running,
134    /// Sub-agent completed successfully.
135    Completed,
136    /// Sub-agent returned an error.
137    Failed,
138    /// Task was skipped because an upstream task failed with [`FailureStrategy::Skip`].
139    Skipped,
140    /// Task was running when the graph was aborted ([`FailureStrategy::Abort`]).
141    Canceled,
142}
143
144impl TaskStatus {
145    /// Returns `true` if the status is a terminal state.
146    #[must_use]
147    pub fn is_terminal(self) -> bool {
148        matches!(
149            self,
150            TaskStatus::Completed | TaskStatus::Failed | TaskStatus::Skipped | TaskStatus::Canceled
151        )
152    }
153}
154
155impl fmt::Display for TaskStatus {
156    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
157        match self {
158            TaskStatus::Pending => write!(f, "pending"),
159            TaskStatus::Ready => write!(f, "ready"),
160            TaskStatus::Running => write!(f, "running"),
161            TaskStatus::Completed => write!(f, "completed"),
162            TaskStatus::Failed => write!(f, "failed"),
163            TaskStatus::Skipped => write!(f, "skipped"),
164            TaskStatus::Canceled => write!(f, "canceled"),
165        }
166    }
167}
168
169/// Lifecycle status of a [`TaskGraph`].
170///
171/// # Examples
172///
173/// ```rust
174/// use zeph_orchestration::GraphStatus;
175///
176/// assert_eq!(GraphStatus::Running.to_string(), "running");
177/// assert_eq!(GraphStatus::Failed.to_string(), "failed");
178/// ```
179#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
180#[serde(rename_all = "snake_case")]
181pub enum GraphStatus {
182    /// Graph has been created but the scheduler has not started yet.
183    Created,
184    /// Scheduler is actively dispatching tasks.
185    Running,
186    /// All tasks reached a terminal state successfully.
187    Completed,
188    /// At least one task failed and the `Abort` strategy halted the graph.
189    Failed,
190    /// The graph was canceled by an external caller.
191    Canceled,
192    /// Graph is paused; waiting for user input (triggered by [`FailureStrategy::Ask`]).
193    Paused,
194}
195
196impl fmt::Display for GraphStatus {
197    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
198        match self {
199            GraphStatus::Created => write!(f, "created"),
200            GraphStatus::Running => write!(f, "running"),
201            GraphStatus::Completed => write!(f, "completed"),
202            GraphStatus::Failed => write!(f, "failed"),
203            GraphStatus::Canceled => write!(f, "canceled"),
204            GraphStatus::Paused => write!(f, "paused"),
205        }
206    }
207}
208
209/// What to do when a task fails.
210///
211/// Set at the graph level via [`TaskGraph::default_failure_strategy`] and
212/// optionally overridden per task via [`TaskNode::failure_strategy`].
213///
214/// # Examples
215///
216/// ```rust
217/// use std::str::FromStr;
218/// use zeph_orchestration::FailureStrategy;
219///
220/// assert_eq!(FailureStrategy::default(), FailureStrategy::Abort);
221/// assert_eq!("skip".parse::<FailureStrategy>().unwrap(), FailureStrategy::Skip);
222/// assert_eq!(FailureStrategy::Retry.to_string(), "retry");
223/// ```
224#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
225#[serde(rename_all = "snake_case")]
226pub enum FailureStrategy {
227    /// Abort the entire graph and cancel all running tasks.
228    #[default]
229    Abort,
230    /// Retry the task up to [`TaskNode::max_retries`] times, then abort.
231    Retry,
232    /// Skip the failed task and transitively skip all its dependents.
233    Skip,
234    /// Pause the graph ([`GraphStatus::Paused`]) and wait for user intervention.
235    Ask,
236}
237
238impl fmt::Display for FailureStrategy {
239    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240        match self {
241            FailureStrategy::Abort => write!(f, "abort"),
242            FailureStrategy::Retry => write!(f, "retry"),
243            FailureStrategy::Skip => write!(f, "skip"),
244            FailureStrategy::Ask => write!(f, "ask"),
245        }
246    }
247}
248
249impl FromStr for FailureStrategy {
250    type Err = OrchestrationError;
251
252    fn from_str(s: &str) -> Result<Self, Self::Err> {
253        match s {
254            "abort" => Ok(FailureStrategy::Abort),
255            "retry" => Ok(FailureStrategy::Retry),
256            "skip" => Ok(FailureStrategy::Skip),
257            "ask" => Ok(FailureStrategy::Ask),
258            other => Err(OrchestrationError::InvalidGraph(format!(
259                "unknown failure strategy '{other}': expected one of abort, retry, skip, ask"
260            ))),
261        }
262    }
263}
264
265/// Output produced by a completed task.
266///
267/// Stored in [`TaskNode::result`] after the sub-agent finishes. Used by
268/// [`Aggregator`] to build the final synthesised response.
269///
270/// [`Aggregator`]: crate::aggregator::Aggregator
271#[derive(Debug, Clone, Serialize, Deserialize)]
272pub struct TaskResult {
273    /// Raw text output returned by the sub-agent.
274    pub output: String,
275    /// File-system paths to any artifacts produced (e.g. build outputs, reports).
276    pub artifacts: Vec<PathBuf>,
277    /// Wall-clock execution time in milliseconds.
278    pub duration_ms: u64,
279    /// Handle ID of the sub-agent instance that produced this result.
280    pub agent_id: Option<String>,
281    /// Name of the agent definition used to spawn the sub-agent.
282    pub agent_def: Option<String>,
283}
284
285/// Execution mode annotation emitted by the LLM planner for each task.
286///
287/// Controls how the [`DagScheduler`] dispatches a task relative to its siblings.
288/// The annotation is set by the planner and stored in [`TaskNode::execution_mode`].
289/// Absent or `null` in stored JSON deserialises to the default `Parallel`.
290///
291/// [`DagScheduler`]: crate::scheduler::DagScheduler
292///
293/// # Examples
294///
295/// ```rust
296/// use zeph_orchestration::ExecutionMode;
297///
298/// assert_eq!(ExecutionMode::default(), ExecutionMode::Parallel);
299/// let mode: ExecutionMode = serde_json::from_str("\"sequential\"").unwrap();
300/// assert_eq!(mode, ExecutionMode::Sequential);
301/// ```
302#[derive(
303    Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, schemars::JsonSchema,
304)]
305#[serde(rename_all = "snake_case")]
306pub enum ExecutionMode {
307    /// Task can run in parallel with others at the same DAG level.
308    #[default]
309    Parallel,
310    /// Task is globally serialized: at most one `Sequential` task runs at a time across
311    /// the entire graph (e.g. deploy, exclusive-resource access, shared-state mutation).
312    Sequential,
313}
314
315/// A single node in the task DAG.
316///
317/// Constructed by [`Planner`] and stored inside a [`TaskGraph`].  The
318/// scheduler drives each node through its [`TaskStatus`] lifecycle.
319///
320/// [`Planner`]: crate::planner::Planner
321///
322/// # Examples
323///
324/// ```rust
325/// use zeph_orchestration::{TaskNode, TaskStatus, ExecutionMode};
326///
327/// let node = TaskNode::new(0, "fetch data", "Download the dataset from source.");
328/// assert_eq!(node.status, TaskStatus::Pending);
329/// assert!(node.depends_on.is_empty());
330/// assert_eq!(node.execution_mode, ExecutionMode::Parallel);
331/// ```
332#[derive(Debug, Clone, Serialize, Deserialize)]
333pub struct TaskNode {
334    /// Dense zero-based index. Invariant: `tasks[i].id == TaskId(i)`.
335    pub id: TaskId,
336    /// Short, human-readable task title.
337    pub title: String,
338    /// Full task description passed verbatim to the assigned sub-agent as its prompt.
339    pub description: String,
340    /// Preferred agent name suggested by the planner; `None` lets the router decide.
341    pub agent_hint: Option<String>,
342    /// Current lifecycle status.
343    pub status: TaskStatus,
344    /// Indices of tasks this node depends on.
345    pub depends_on: Vec<TaskId>,
346    /// Result populated by the scheduler after the sub-agent finishes.
347    pub result: Option<TaskResult>,
348    /// Agent name actually assigned by the router at dispatch time.
349    pub assigned_agent: Option<String>,
350    /// Number of times this task has been retried so far.
351    pub retry_count: u32,
352    /// Per-task failure strategy override; `None` means use [`TaskGraph::default_failure_strategy`].
353    pub failure_strategy: Option<FailureStrategy>,
354    /// Maximum retry attempts for this task; `None` means use [`TaskGraph::default_max_retries`].
355    pub max_retries: Option<u32>,
356    /// LLM planner annotation. Old SQLite-stored JSON without this field
357    /// deserialises to the default (`Parallel`).
358    #[serde(default)]
359    pub execution_mode: ExecutionMode,
360}
361
362impl TaskNode {
363    /// Create a new pending task with the given index.
364    #[must_use]
365    pub fn new(id: u32, title: impl Into<String>, description: impl Into<String>) -> Self {
366        Self {
367            id: TaskId(id),
368            title: title.into(),
369            description: description.into(),
370            agent_hint: None,
371            status: TaskStatus::Pending,
372            depends_on: Vec::new(),
373            result: None,
374            assigned_agent: None,
375            retry_count: 0,
376            failure_strategy: None,
377            max_retries: None,
378            execution_mode: ExecutionMode::default(),
379        }
380    }
381}
382
383/// A directed acyclic graph of tasks to be executed by the orchestrator.
384///
385/// Created by the [`Planner`] and driven to completion by the [`DagScheduler`].
386/// The `tasks` vec is the authoritative store; all indices (`TaskId`) reference
387/// positions within it.
388///
389/// [`Planner`]: crate::planner::Planner
390/// [`DagScheduler`]: crate::scheduler::DagScheduler
391///
392/// # Examples
393///
394/// ```rust
395/// use zeph_orchestration::{TaskGraph, TaskNode, GraphStatus, FailureStrategy};
396///
397/// let mut graph = TaskGraph::new("build and deploy service");
398/// assert_eq!(graph.status, GraphStatus::Created);
399/// assert_eq!(graph.default_failure_strategy, FailureStrategy::Abort);
400/// assert_eq!(graph.default_max_retries, 3);
401/// ```
402#[derive(Debug, Clone, Serialize, Deserialize)]
403pub struct TaskGraph {
404    /// Unique graph identifier (UUID v4).
405    pub id: GraphId,
406    /// High-level user goal that was decomposed into this graph.
407    pub goal: String,
408    /// All task nodes. Index `i` must satisfy `tasks[i].id == TaskId(i)`.
409    pub tasks: Vec<TaskNode>,
410    /// Current lifecycle status of the graph as a whole.
411    pub status: GraphStatus,
412    /// Graph-wide failure strategy applied when a task has no per-task override.
413    pub default_failure_strategy: FailureStrategy,
414    /// Graph-wide maximum retry count applied when a task has no per-task override.
415    pub default_max_retries: u32,
416    /// ISO-8601 UTC timestamp of graph creation.
417    pub created_at: String,
418    /// ISO-8601 UTC timestamp set when the graph reaches a terminal status.
419    pub finished_at: Option<String>,
420}
421
422impl TaskGraph {
423    /// Create a new graph with `Created` status.
424    #[must_use]
425    pub fn new(goal: impl Into<String>) -> Self {
426        Self {
427            id: GraphId::new(),
428            goal: goal.into(),
429            tasks: Vec::new(),
430            status: GraphStatus::Created,
431            default_failure_strategy: FailureStrategy::default(),
432            default_max_retries: 3,
433            created_at: chrono_now(),
434            finished_at: None,
435        }
436    }
437}
438
439pub(crate) fn chrono_now() -> String {
440    // ISO-8601 UTC timestamp, consistent with the rest of the codebase.
441    // Format: "2026-03-05T22:04:41Z"
442    let secs = std::time::SystemTime::now()
443        .duration_since(std::time::UNIX_EPOCH)
444        .map_or(0, |d| d.as_secs());
445    // Manual formatting: seconds since epoch → ISO-8601 UTC
446    // Days since epoch, then decompose into year/month/day
447    let (y, mo, d, h, mi, s) = epoch_secs_to_datetime(secs);
448    format!("{y:04}-{mo:02}-{d:02}T{h:02}:{mi:02}:{s:02}Z")
449}
450
451/// Convert Unix epoch seconds to (year, month, day, hour, min, sec) UTC.
452fn epoch_secs_to_datetime(secs: u64) -> (u64, u8, u8, u8, u8, u8) {
453    let s = (secs % 60) as u8;
454    let mins = secs / 60;
455    let mi = (mins % 60) as u8;
456    let hours = mins / 60;
457    let h = (hours % 24) as u8;
458    let days = hours / 24; // days since 1970-01-01
459
460    // Gregorian calendar decomposition
461    // 400-year cycle = 146097 days
462    let (mut year, mut remaining_days) = {
463        let cycles = days / 146_097;
464        let rem = days % 146_097;
465        (1970 + cycles * 400, rem)
466    };
467    // 100-year century (36524 days, no leap on century unless /400)
468    let centuries = (remaining_days / 36_524).min(3);
469    year += centuries * 100;
470    remaining_days -= centuries * 36_524;
471    // 4-year cycle (1461 days)
472    let quads = remaining_days / 1_461;
473    year += quads * 4;
474    remaining_days -= quads * 1_461;
475    // remaining years
476    let extra_years = (remaining_days / 365).min(3);
477    year += extra_years;
478    remaining_days -= extra_years * 365;
479
480    let is_leap = (year % 4 == 0 && year % 100 != 0) || (year % 400 == 0);
481    let days_in_month: [u64; 12] = if is_leap {
482        [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
483    } else {
484        [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
485    };
486
487    let mut month = 0u8;
488    for (i, &dim) in days_in_month.iter().enumerate() {
489        if remaining_days < dim {
490            // i is in 0..12, so i+1 fits in u8
491            month = u8::try_from(i + 1).unwrap_or(1);
492            break;
493        }
494        remaining_days -= dim;
495    }
496    // remaining_days is in 0..30, so +1 fits in u8
497    let day = u8::try_from(remaining_days + 1).unwrap_or(1);
498
499    (year, month, day, h, mi, s)
500}
501
502/// Maximum allowed length for a `TaskGraph` goal string.
503const MAX_GOAL_LEN: usize = 1024;
504
505/// Type-safe wrapper around `RawGraphStore` that handles `TaskGraph` serialization.
506///
507/// Consumers in `zeph-core` use this instead of `RawGraphStore` directly, so they
508/// never need to deal with JSON strings.
509///
510/// # Storage layout
511///
512/// The `task_graphs` table stores both metadata columns (`goal`, `status`,
513/// `created_at`, `finished_at`) and the full `graph_json` blob. The metadata
514/// columns are summary/index data used for listing and filtering; `graph_json`
515/// is the authoritative source for full graph reconstruction. On `load`, only
516/// `graph_json` is deserialized — the columns are not consulted.
517pub struct GraphPersistence<S: RawGraphStore> {
518    store: S,
519}
520
521impl<S: RawGraphStore> GraphPersistence<S> {
522    /// Create a new `GraphPersistence` wrapping the given store.
523    pub fn new(store: S) -> Self {
524        Self { store }
525    }
526
527    /// Persist a `TaskGraph` (upsert).
528    ///
529    /// Returns `OrchestrationError::InvalidGraph` if `graph.goal` exceeds
530    /// `MAX_GOAL_LEN` (1024) characters.
531    ///
532    /// # Errors
533    ///
534    /// Returns `OrchestrationError::Persistence` on serialization or database failure.
535    pub async fn save(&self, graph: &TaskGraph) -> Result<(), OrchestrationError> {
536        if graph.goal.len() > MAX_GOAL_LEN {
537            return Err(OrchestrationError::InvalidGraph(format!(
538                "goal exceeds {MAX_GOAL_LEN} character limit ({} chars)",
539                graph.goal.len()
540            )));
541        }
542        let json = serde_json::to_string(graph)
543            .map_err(|e| OrchestrationError::Persistence(e.to_string()))?;
544        self.store
545            .save_graph(
546                &graph.id.to_string(),
547                &graph.goal,
548                &graph.status.to_string(),
549                &json,
550                &graph.created_at,
551                graph.finished_at.as_deref(),
552            )
553            .await
554            .map_err(|e| OrchestrationError::Persistence(e.to_string()))
555    }
556
557    /// Load a `TaskGraph` by its `GraphId`.
558    ///
559    /// Returns `None` if not found.
560    ///
561    /// # Errors
562    ///
563    /// Returns `OrchestrationError::Persistence` on database or deserialization failure.
564    pub async fn load(&self, id: &GraphId) -> Result<Option<TaskGraph>, OrchestrationError> {
565        match self
566            .store
567            .load_graph(&id.to_string())
568            .await
569            .map_err(|e| OrchestrationError::Persistence(e.to_string()))?
570        {
571            Some(json) => {
572                let graph = serde_json::from_str(&json)
573                    .map_err(|e| OrchestrationError::Persistence(e.to_string()))?;
574                Ok(Some(graph))
575            }
576            None => Ok(None),
577        }
578    }
579
580    /// List stored graphs (newest first).
581    ///
582    /// # Errors
583    ///
584    /// Returns `OrchestrationError::Persistence` on database failure.
585    pub async fn list(&self, limit: u32) -> Result<Vec<GraphSummary>, OrchestrationError> {
586        self.store
587            .list_graphs(limit)
588            .await
589            .map_err(|e| OrchestrationError::Persistence(e.to_string()))
590    }
591
592    /// Delete a graph by its `GraphId`.
593    ///
594    /// Returns `true` if a row was deleted.
595    ///
596    /// # Errors
597    ///
598    /// Returns `OrchestrationError::Persistence` on database failure.
599    pub async fn delete(&self, id: &GraphId) -> Result<bool, OrchestrationError> {
600        self.store
601            .delete_graph(&id.to_string())
602            .await
603            .map_err(|e| OrchestrationError::Persistence(e.to_string()))
604    }
605}
606
607#[cfg(test)]
608mod tests {
609    use super::*;
610
611    #[test]
612    fn test_taskid_display() {
613        assert_eq!(TaskId(3).to_string(), "3");
614    }
615
616    #[test]
617    fn test_graphid_display_and_new() {
618        let id = GraphId::new();
619        let s = id.to_string();
620        assert_eq!(s.len(), 36, "UUID string should be 36 chars");
621        let parsed: GraphId = s.parse().expect("should parse back");
622        assert_eq!(id, parsed);
623    }
624
625    #[test]
626    fn test_graphid_from_str_invalid() {
627        let err = "not-a-uuid".parse::<GraphId>();
628        assert!(err.is_err());
629    }
630
631    #[test]
632    fn test_task_status_is_terminal() {
633        assert!(TaskStatus::Completed.is_terminal());
634        assert!(TaskStatus::Failed.is_terminal());
635        assert!(TaskStatus::Skipped.is_terminal());
636        assert!(TaskStatus::Canceled.is_terminal());
637
638        assert!(!TaskStatus::Pending.is_terminal());
639        assert!(!TaskStatus::Ready.is_terminal());
640        assert!(!TaskStatus::Running.is_terminal());
641    }
642
643    #[test]
644    fn test_task_status_display() {
645        assert_eq!(TaskStatus::Pending.to_string(), "pending");
646        assert_eq!(TaskStatus::Ready.to_string(), "ready");
647        assert_eq!(TaskStatus::Running.to_string(), "running");
648        assert_eq!(TaskStatus::Completed.to_string(), "completed");
649        assert_eq!(TaskStatus::Failed.to_string(), "failed");
650        assert_eq!(TaskStatus::Skipped.to_string(), "skipped");
651        assert_eq!(TaskStatus::Canceled.to_string(), "canceled");
652    }
653
654    #[test]
655    fn test_failure_strategy_default() {
656        assert_eq!(FailureStrategy::default(), FailureStrategy::Abort);
657    }
658
659    #[test]
660    fn test_failure_strategy_display() {
661        assert_eq!(FailureStrategy::Abort.to_string(), "abort");
662        assert_eq!(FailureStrategy::Retry.to_string(), "retry");
663        assert_eq!(FailureStrategy::Skip.to_string(), "skip");
664        assert_eq!(FailureStrategy::Ask.to_string(), "ask");
665    }
666
667    #[test]
668    fn test_graph_status_display() {
669        assert_eq!(GraphStatus::Created.to_string(), "created");
670        assert_eq!(GraphStatus::Running.to_string(), "running");
671        assert_eq!(GraphStatus::Completed.to_string(), "completed");
672        assert_eq!(GraphStatus::Failed.to_string(), "failed");
673        assert_eq!(GraphStatus::Canceled.to_string(), "canceled");
674        assert_eq!(GraphStatus::Paused.to_string(), "paused");
675    }
676
677    #[test]
678    fn test_task_graph_serde_roundtrip() {
679        let mut graph = TaskGraph::new("test goal");
680        graph.tasks.push(TaskNode::new(0, "task 0", "do something"));
681        let json = serde_json::to_string(&graph).expect("serialize");
682        let restored: TaskGraph = serde_json::from_str(&json).expect("deserialize");
683        assert_eq!(graph.id, restored.id);
684        assert_eq!(graph.goal, restored.goal);
685        assert_eq!(graph.tasks.len(), restored.tasks.len());
686    }
687
688    #[test]
689    fn test_task_node_serde_roundtrip() {
690        let mut node = TaskNode::new(1, "compile", "run cargo build");
691        node.agent_hint = Some("rust-dev".to_string());
692        node.depends_on = vec![TaskId(0)];
693        let json = serde_json::to_string(&node).expect("serialize");
694        let restored: TaskNode = serde_json::from_str(&json).expect("deserialize");
695        assert_eq!(node.id, restored.id);
696        assert_eq!(node.title, restored.title);
697        assert_eq!(node.depends_on, restored.depends_on);
698    }
699
700    #[test]
701    fn test_task_result_serde_roundtrip() {
702        let result = TaskResult {
703            output: "ok".to_string(),
704            artifacts: vec![PathBuf::from("/tmp/out.bin")],
705            duration_ms: 500,
706            agent_id: Some("agent-1".to_string()),
707            agent_def: None,
708        };
709        let json = serde_json::to_string(&result).expect("serialize");
710        let restored: TaskResult = serde_json::from_str(&json).expect("deserialize");
711        assert_eq!(result.output, restored.output);
712        assert_eq!(result.duration_ms, restored.duration_ms);
713        assert_eq!(result.artifacts, restored.artifacts);
714    }
715
716    #[test]
717    fn test_failure_strategy_from_str() {
718        assert_eq!(
719            "abort".parse::<FailureStrategy>().unwrap(),
720            FailureStrategy::Abort
721        );
722        assert_eq!(
723            "retry".parse::<FailureStrategy>().unwrap(),
724            FailureStrategy::Retry
725        );
726        assert_eq!(
727            "skip".parse::<FailureStrategy>().unwrap(),
728            FailureStrategy::Skip
729        );
730        assert_eq!(
731            "ask".parse::<FailureStrategy>().unwrap(),
732            FailureStrategy::Ask
733        );
734        assert!("abort_all".parse::<FailureStrategy>().is_err());
735        assert!("".parse::<FailureStrategy>().is_err());
736    }
737
738    #[test]
739    fn test_chrono_now_iso8601_format() {
740        let ts = chrono_now();
741        // Format: "YYYY-MM-DDTHH:MM:SSZ" — 20 chars
742        assert_eq!(ts.len(), 20, "timestamp should be 20 chars: {ts}");
743        assert!(ts.ends_with('Z'), "should end with Z: {ts}");
744        assert!(ts.contains('T'), "should contain T: {ts}");
745        // Year should be >= 2024
746        let year: u32 = ts[..4].parse().expect("year should be numeric");
747        assert!(year >= 2024, "year should be >= 2024: {year}");
748    }
749
750    #[test]
751    fn test_failure_strategy_serde_snake_case() {
752        assert_eq!(
753            serde_json::to_string(&FailureStrategy::Abort).unwrap(),
754            "\"abort\""
755        );
756        assert_eq!(
757            serde_json::to_string(&FailureStrategy::Retry).unwrap(),
758            "\"retry\""
759        );
760        assert_eq!(
761            serde_json::to_string(&FailureStrategy::Skip).unwrap(),
762            "\"skip\""
763        );
764        assert_eq!(
765            serde_json::to_string(&FailureStrategy::Ask).unwrap(),
766            "\"ask\""
767        );
768    }
769
770    #[test]
771    fn test_graph_persistence_save_rejects_long_goal() {
772        // GraphPersistence::save() is async and requires a real store;
773        // we verify the goal-length guard directly via the const.
774        let long_goal = "x".repeat(MAX_GOAL_LEN + 1);
775        let mut graph = TaskGraph::new(long_goal);
776        graph.goal = "x".repeat(MAX_GOAL_LEN + 1);
777        assert!(
778            graph.goal.len() > MAX_GOAL_LEN,
779            "test setup: goal must exceed limit"
780        );
781        // The check itself lives in GraphPersistence::save(), exercised by
782        // the async persistence tests in zeph-memory; here we verify the constant.
783        assert_eq!(MAX_GOAL_LEN, 1024);
784    }
785
786    #[test]
787    fn test_task_node_missing_execution_mode_deserializes_as_parallel() {
788        // Old SQLite-stored JSON blobs lack the execution_mode field.
789        // #[serde(default)] must make them deserialize to Parallel without error.
790        let json = r#"{
791            "id": 0,
792            "title": "t",
793            "description": "d",
794            "agent_hint": null,
795            "status": "pending",
796            "depends_on": [],
797            "result": null,
798            "assigned_agent": null,
799            "retry_count": 0,
800            "failure_strategy": null,
801            "max_retries": null
802        }"#;
803        let node: TaskNode = serde_json::from_str(json).expect("should deserialize old JSON");
804        assert_eq!(node.execution_mode, ExecutionMode::Parallel);
805    }
806
807    #[test]
808    fn test_execution_mode_serde_snake_case() {
809        assert_eq!(
810            serde_json::to_string(&ExecutionMode::Parallel).unwrap(),
811            "\"parallel\""
812        );
813        assert_eq!(
814            serde_json::to_string(&ExecutionMode::Sequential).unwrap(),
815            "\"sequential\""
816        );
817        let p: ExecutionMode = serde_json::from_str("\"parallel\"").unwrap();
818        assert_eq!(p, ExecutionMode::Parallel);
819        let s: ExecutionMode = serde_json::from_str("\"sequential\"").unwrap();
820        assert_eq!(s, ExecutionMode::Sequential);
821    }
822}