Skip to main content

harn_vm/orchestration/
workflow_patch.rs

1//! Workflow patch proposals: a bounded, auditable contract that lets an
2//! agent author propose changes to a portable [`WorkflowBundle`] without
3//! touching the live runtime.
4//!
5//! The shape is intentionally small. An agent emits a JSON document
6//! describing a sequence of [`WorkflowPatchOperation`]s; Harn applies
7//! them to a copy of the bundle, runs the existing bundle validator,
8//! computes a capability-ceiling delta against the parent execution
9//! policy (when one is supplied), and returns a single
10//! [`WorkflowPatchValidationReport`] the host can render.
11//!
12//! The patch surface deliberately mirrors what the issue calls out:
13//! insert agent / verifier / approval node, update prompt capsule,
14//! update model & tool policy, add edge. Anything not in this list
15//! goes through a normal bundle edit instead — the patch contract is
16//! the meta-programming layer, not a general bundle DSL.
17
18use std::collections::{BTreeMap, BTreeSet};
19
20use serde::{Deserialize, Serialize};
21
22use super::workflow::{WorkflowEdge, WorkflowNode};
23use super::workflow_bundle::{
24    preview_workflow_bundle, validate_workflow_bundle, WorkflowBundle, WorkflowBundleGraphExport,
25    WorkflowBundlePolicy, WorkflowBundleValidationReport,
26};
27use super::CapabilityPolicy;
28
29pub const WORKFLOW_PATCH_SCHEMA_VERSION: u32 = 1;
30
31/// A bounded, auditable proposal to mutate a workflow bundle.
32///
33/// Patches are flat lists of [`WorkflowPatchOperation`]s applied in
34/// order. An empty operations list is rejected by [`apply_workflow_patch`]
35/// — silent no-ops would let agents claim work they did not do.
36#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
37#[serde(default)]
38pub struct WorkflowPatch {
39    pub schema_version: u32,
40    pub id: String,
41    pub summary: Option<String>,
42    pub operations: Vec<WorkflowPatchOperation>,
43}
44
45/// Operations are tagged by an external `op` discriminator so handlers
46/// can dispatch on the literal string from JSON.
47///
48/// Only the operations called out in #1423 are exposed: insert node,
49/// add edge, upsert prompt capsule, update node policy, update bundle
50/// policy. New operations require a deliberate contract bump.
51#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
52#[serde(tag = "op", rename_all = "snake_case")]
53pub enum WorkflowPatchOperation {
54    /// Insert a new workflow node. Fails if `node_id` already exists or
55    /// is empty. The `node` body uses the same shape as
56    /// `workflow.nodes[k]` in a bundle JSON, with sensible defaults
57    /// applied for unspecified fields.
58    InsertNode {
59        node_id: String,
60        #[serde(default)]
61        node: WorkflowPatchNodeBody,
62    },
63    /// Append an edge to the workflow graph. Fails if either endpoint
64    /// references an unknown node, or if the edge already exists.
65    AddEdge {
66        from: String,
67        to: String,
68        #[serde(default)]
69        branch: Option<String>,
70        #[serde(default)]
71        label: Option<String>,
72    },
73    /// Insert or replace a prompt capsule. The capsule's `node_id` must
74    /// reference a node that exists after all prior patch operations
75    /// have been applied (so an `InsertNode` followed by
76    /// `UpsertPromptCapsule` works).
77    UpsertPromptCapsule {
78        capsule_id: String,
79        capsule: WorkflowPatchPromptCapsuleBody,
80    },
81    /// Merge per-node policy fields onto an existing node. Only the
82    /// fields named on [`WorkflowPatchNodePolicyBody`] can be set —
83    /// arbitrary node fields are intentionally not patchable.
84    UpdateNodePolicy {
85        node_id: String,
86        policy: WorkflowPatchNodePolicyBody,
87    },
88    /// Merge bundle-level policy fields. Mirrors the safe knobs in
89    /// [`WorkflowBundlePolicy`].
90    UpdateBundlePolicy {
91        policy: WorkflowPatchBundlePolicyBody,
92    },
93}
94
95/// Shape used by [`WorkflowPatchOperation::InsertNode`]. Mirrors the
96/// editable fields on [`WorkflowNode`] without exposing every tunable.
97#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
98#[serde(default)]
99pub struct WorkflowPatchNodeBody {
100    pub kind: Option<String>,
101    pub task_label: Option<String>,
102    pub prompt: Option<String>,
103    pub system: Option<String>,
104    pub tools: Option<serde_json::Value>,
105    pub model_policy: Option<serde_json::Value>,
106    pub capability_policy: Option<CapabilityPolicy>,
107    pub approval_policy: Option<serde_json::Value>,
108    pub metadata: BTreeMap<String, serde_json::Value>,
109}
110
111/// Shape used by [`WorkflowPatchOperation::UpdateNodePolicy`]. Each
112/// field maps to the same-named field on [`WorkflowNode`] and is
113/// merged in place (set when `Some`, left alone when `None`).
114#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
115#[serde(default)]
116pub struct WorkflowPatchNodePolicyBody {
117    pub task_label: Option<String>,
118    pub prompt: Option<String>,
119    pub system: Option<String>,
120    pub tools: Option<serde_json::Value>,
121    pub model_policy: Option<serde_json::Value>,
122    pub capability_policy: Option<CapabilityPolicy>,
123    pub approval_policy: Option<serde_json::Value>,
124}
125
126/// Shape used by [`WorkflowPatchOperation::UpsertPromptCapsule`]. The
127/// patch always sets `id` to match `capsule_id` so the bundle invariant
128/// (`capsule.id == map_key`) holds without callers needing to repeat it.
129#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
130#[serde(default)]
131pub struct WorkflowPatchPromptCapsuleBody {
132    pub node_id: String,
133    pub trigger_id: Option<String>,
134    pub prompt: String,
135    pub system: Option<String>,
136    pub context: BTreeMap<String, serde_json::Value>,
137}
138
139/// Shape used by [`WorkflowPatchOperation::UpdateBundlePolicy`]. Each
140/// field replaces the corresponding [`WorkflowBundlePolicy`] field when
141/// `Some`. The `tool_policy` and `approval_required` lists replace
142/// rather than merge to keep the contract obvious — agents that want a
143/// merge should compute and submit the full list.
144#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
145#[serde(default)]
146pub struct WorkflowPatchBundlePolicyBody {
147    pub autonomy_tier: Option<String>,
148    pub tool_policy: Option<BTreeMap<String, serde_json::Value>>,
149    pub approval_required: Option<Vec<String>>,
150    pub retry: Option<serde_json::Value>,
151    pub catchup: Option<serde_json::Value>,
152}
153
154/// What a host renders when it shows the result of validating a patch.
155#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
156pub struct WorkflowPatchValidationReport {
157    pub schema_version: u32,
158    pub patch_id: String,
159    pub bundle_id: String,
160    pub valid: bool,
161    pub apply_errors: Vec<WorkflowPatchDiagnostic>,
162    pub bundle_validation: WorkflowBundleValidationReport,
163    pub graph_diff: WorkflowPatchGraphDiff,
164    pub capability_delta: WorkflowPatchCapabilityDelta,
165    pub graph_export: WorkflowBundleGraphExport,
166}
167
168/// One structured failure from applying a patch. The `op_index` lets
169/// the host highlight the offending operation in its review surface.
170#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
171pub struct WorkflowPatchDiagnostic {
172    pub severity: String,
173    pub op_index: Option<usize>,
174    pub op: Option<String>,
175    pub path: String,
176    pub message: String,
177    pub node_id: Option<String>,
178}
179
180/// Structural diff between the original and patched workflow graph.
181/// Used by host UIs and the patch-authoring skill so a model that
182/// proposes a patch can tell at a glance whether its edit produced the
183/// shape it intended.
184#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
185pub struct WorkflowPatchGraphDiff {
186    pub added_nodes: Vec<String>,
187    pub added_edges: Vec<WorkflowPatchEdgeRef>,
188    pub updated_nodes: Vec<String>,
189    pub updated_capsules: Vec<String>,
190    pub policy_fields_changed: Vec<String>,
191}
192
193#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
194pub struct WorkflowPatchEdgeRef {
195    pub from: String,
196    pub to: String,
197    pub branch: Option<String>,
198    pub label: Option<String>,
199}
200
201/// Capability ceiling delta between the bundle before and after the
202/// patch, optionally compared against a parent execution policy.
203///
204/// `widening` collects every dimension where the patched bundle asks
205/// for *more* than the parent ceiling (or the original bundle, when no
206/// parent is supplied). The patch is rejected when this list is
207/// non-empty — agents must not be able to expand permissions.
208#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
209pub struct WorkflowPatchCapabilityDelta {
210    pub before: CapabilityPolicy,
211    pub after: CapabilityPolicy,
212    pub parent: Option<CapabilityPolicy>,
213    pub added_tools: Vec<String>,
214    pub added_capabilities: BTreeMap<String, Vec<String>>,
215    pub raised_side_effect_level: Option<RaisedSideEffectLevel>,
216    pub added_workspace_roots: Vec<String>,
217    pub added_connector_scopes: BTreeMap<String, Vec<String>>,
218    pub added_command_gates: Vec<String>,
219    pub raised_autonomy_tier: Option<RaisedAutonomyTier>,
220    pub widening: Vec<CapabilityCeilingViolation>,
221}
222
223#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
224pub struct RaisedSideEffectLevel {
225    pub from: String,
226    pub to: String,
227}
228
229#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
230pub struct RaisedAutonomyTier {
231    pub from: String,
232    pub to: String,
233}
234
235/// One concrete way the patched bundle exceeds the parent ceiling.
236/// `kind` is a stable enum string so hosts can group/explain them.
237#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
238pub struct CapabilityCeilingViolation {
239    pub kind: String,
240    pub detail: String,
241}
242
243/// Apply a patch to a copy of the bundle and return the new bundle.
244/// Fails fast on the first structural error; callers that want richer
245/// diagnostics should prefer [`validate_workflow_patch`].
246pub fn apply_workflow_patch(
247    bundle: &WorkflowBundle,
248    patch: &WorkflowPatch,
249) -> Result<WorkflowBundle, Vec<WorkflowPatchDiagnostic>> {
250    let mut errors = Vec::new();
251    if patch.schema_version != WORKFLOW_PATCH_SCHEMA_VERSION {
252        errors.push(diagnostic_global(format!(
253            "unsupported patch schema_version {}; expected {}",
254            patch.schema_version, WORKFLOW_PATCH_SCHEMA_VERSION
255        )));
256    }
257    if patch.id.trim().is_empty() {
258        errors.push(diagnostic_global("patch id is required".to_string()));
259    }
260    if patch.operations.is_empty() {
261        errors.push(diagnostic_global(
262            "patch contains no operations; refusing to no-op".to_string(),
263        ));
264    }
265    if !errors.is_empty() {
266        return Err(errors);
267    }
268
269    let mut working = bundle.clone();
270    for (index, operation) in patch.operations.iter().enumerate() {
271        if let Err(diag) = apply_operation(&mut working, operation, index) {
272            return Err(vec![diag]);
273        }
274    }
275    Ok(working)
276}
277
278/// Apply + validate + diff + ceiling check, in one pass. The bundle
279/// validator is the source of truth for "is this still a valid bundle?";
280/// this function adds the patch-specific apply errors, the structural
281/// diff, and the capability delta on top.
282pub fn validate_workflow_patch(
283    bundle: &WorkflowBundle,
284    patch: &WorkflowPatch,
285    parent_ceiling: Option<&CapabilityPolicy>,
286) -> WorkflowPatchValidationReport {
287    let before_ceiling = bundle_capability_ceiling(bundle);
288
289    let (patched, apply_errors) = match apply_workflow_patch(bundle, patch) {
290        Ok(patched) => (patched, Vec::new()),
291        Err(errors) => (bundle.clone(), errors),
292    };
293
294    let bundle_validation = validate_workflow_bundle(&patched);
295    let graph_diff = diff_bundle_graph(bundle, &patched, patch);
296    let after_ceiling = bundle_capability_ceiling(&patched);
297    let capability_delta = compute_capability_delta(
298        bundle,
299        &patched,
300        before_ceiling,
301        after_ceiling,
302        parent_ceiling,
303    );
304    let graph_export = preview_workflow_bundle(&patched).graph;
305    let valid =
306        apply_errors.is_empty() && bundle_validation.valid && capability_delta.widening.is_empty();
307
308    WorkflowPatchValidationReport {
309        schema_version: WORKFLOW_PATCH_SCHEMA_VERSION,
310        patch_id: patch.id.clone(),
311        bundle_id: bundle.id.clone(),
312        valid,
313        apply_errors,
314        bundle_validation,
315        graph_diff,
316        capability_delta,
317        graph_export,
318    }
319}
320
321/// Project a bundle's effective capability ceiling. The bundle does
322/// not carry a single capability policy; we compose one from the
323/// per-node `capability_policy` declarations, the bundle-level
324/// `tool_policy` keys, the connector scopes, the autonomy tier, the
325/// `worktree_policy`, and the declared `command_gates`. The result is
326/// what a parent runtime needs to compare against to decide whether
327/// running this bundle would widen its own ceiling.
328pub fn bundle_capability_ceiling(bundle: &WorkflowBundle) -> CapabilityPolicy {
329    let mut tools: BTreeSet<String> = bundle.policy.tool_policy.keys().cloned().collect();
330    let mut capabilities: BTreeMap<String, BTreeSet<String>> = BTreeMap::new();
331    let mut workspace_roots: BTreeSet<String> = BTreeSet::new();
332    let mut max_side_effect: Option<&'static str> = None;
333
334    for node in bundle.workflow.nodes.values() {
335        for tool in &node.capability_policy.tools {
336            tools.insert(tool.clone());
337        }
338        for (capability, ops) in &node.capability_policy.capabilities {
339            let entry = capabilities.entry(capability.clone()).or_default();
340            for op in ops {
341                entry.insert(op.clone());
342            }
343        }
344        for root in &node.capability_policy.workspace_roots {
345            workspace_roots.insert(root.clone());
346        }
347        if let Some(level) = node.capability_policy.side_effect_level.as_deref() {
348            max_side_effect = match max_side_effect {
349                Some(current) if side_effect_rank(current) >= side_effect_rank(level) => {
350                    Some(current)
351                }
352                _ => Some(static_side_effect(level)),
353            };
354        }
355    }
356
357    let autonomy_floor = autonomy_side_effect_floor(&bundle.policy.autonomy_tier);
358    if let Some(floor) = autonomy_floor {
359        max_side_effect = match max_side_effect {
360            Some(current) if side_effect_rank(current) >= side_effect_rank(floor) => Some(current),
361            _ => Some(floor),
362        };
363    }
364
365    if !bundle.connectors.is_empty() {
366        capabilities
367            .entry("connector".to_string())
368            .or_default()
369            .insert("call".to_string());
370    }
371    if !bundle.environment.command_gates.is_empty()
372        || bundle.environment.worktree_policy != "host_managed"
373    {
374        capabilities
375            .entry("process".to_string())
376            .or_default()
377            .insert("exec".to_string());
378        max_side_effect = match max_side_effect {
379            Some(current) if side_effect_rank(current) >= side_effect_rank("process_exec") => {
380                Some(current)
381            }
382            _ => Some("process_exec"),
383        };
384    }
385
386    CapabilityPolicy {
387        tools: tools.into_iter().collect(),
388        capabilities: capabilities
389            .into_iter()
390            .map(|(k, v)| (k, v.into_iter().collect()))
391            .collect(),
392        workspace_roots: workspace_roots.into_iter().collect(),
393        side_effect_level: max_side_effect.map(|level| level.to_string()),
394        recursion_limit: None,
395        tool_arg_constraints: Vec::new(),
396        tool_annotations: BTreeMap::new(),
397    }
398}
399
400fn apply_operation(
401    bundle: &mut WorkflowBundle,
402    operation: &WorkflowPatchOperation,
403    index: usize,
404) -> Result<(), WorkflowPatchDiagnostic> {
405    match operation {
406        WorkflowPatchOperation::InsertNode { node_id, node } => {
407            if node_id.trim().is_empty() {
408                return Err(diagnostic_op(
409                    index,
410                    "insert_node",
411                    "operations".to_string(),
412                    "insert_node node_id is required".to_string(),
413                    None,
414                ));
415            }
416            if bundle.workflow.nodes.contains_key(node_id) {
417                return Err(diagnostic_op(
418                    index,
419                    "insert_node",
420                    format!("workflow.nodes.{node_id}"),
421                    format!("workflow already contains node {node_id}"),
422                    Some(node_id.clone()),
423                ));
424            }
425            let workflow_node = node_body_into_workflow_node(node_id, node);
426            bundle.workflow.nodes.insert(node_id.clone(), workflow_node);
427            if bundle.workflow.entry.is_empty() {
428                bundle.workflow.entry = node_id.clone();
429            }
430            Ok(())
431        }
432        WorkflowPatchOperation::AddEdge {
433            from,
434            to,
435            branch,
436            label,
437        } => {
438            if !bundle.workflow.nodes.contains_key(from) {
439                return Err(diagnostic_op(
440                    index,
441                    "add_edge",
442                    "edges.from".to_string(),
443                    format!("edge.from references unknown node: {from}"),
444                    Some(from.clone()),
445                ));
446            }
447            if !bundle.workflow.nodes.contains_key(to) {
448                return Err(diagnostic_op(
449                    index,
450                    "add_edge",
451                    "edges.to".to_string(),
452                    format!("edge.to references unknown node: {to}"),
453                    Some(to.clone()),
454                ));
455            }
456            let candidate = WorkflowEdge {
457                from: from.clone(),
458                to: to.clone(),
459                branch: branch.clone(),
460                label: label.clone(),
461            };
462            if bundle.workflow.edges.iter().any(|edge| {
463                edge.from == candidate.from
464                    && edge.to == candidate.to
465                    && edge.branch == candidate.branch
466                    && edge.label == candidate.label
467            }) {
468                return Err(diagnostic_op(
469                    index,
470                    "add_edge",
471                    "edges".to_string(),
472                    format!("edge {from} -> {to} already exists"),
473                    Some(from.clone()),
474                ));
475            }
476            bundle.workflow.edges.push(candidate);
477            Ok(())
478        }
479        WorkflowPatchOperation::UpsertPromptCapsule {
480            capsule_id,
481            capsule,
482        } => {
483            if capsule_id.trim().is_empty() {
484                return Err(diagnostic_op(
485                    index,
486                    "upsert_prompt_capsule",
487                    "prompt_capsules".to_string(),
488                    "capsule_id is required".to_string(),
489                    None,
490                ));
491            }
492            if !bundle.workflow.nodes.contains_key(&capsule.node_id) {
493                return Err(diagnostic_op(
494                    index,
495                    "upsert_prompt_capsule",
496                    format!("prompt_capsules.{capsule_id}.node_id"),
497                    format!(
498                        "prompt capsule references unknown node: {}",
499                        capsule.node_id
500                    ),
501                    Some(capsule.node_id.clone()),
502                ));
503            }
504            let existing = bundle
505                .prompt_capsules
506                .values()
507                .find(|other| other.node_id == capsule.node_id && other.id != *capsule_id);
508            if let Some(other) = existing {
509                return Err(diagnostic_op(
510                    index,
511                    "upsert_prompt_capsule",
512                    format!("prompt_capsules.{capsule_id}.node_id"),
513                    format!(
514                        "prompt capsule {capsule_id} would target node {} but capsule {} already targets it",
515                        capsule.node_id, other.id
516                    ),
517                    Some(capsule.node_id.clone()),
518                ));
519            }
520            let capsule_value = super::workflow_bundle::PromptCapsule {
521                id: capsule_id.clone(),
522                node_id: capsule.node_id.clone(),
523                trigger_id: capsule.trigger_id.clone(),
524                prompt: capsule.prompt.clone(),
525                system: capsule.system.clone(),
526                context: capsule.context.clone(),
527            };
528            bundle
529                .prompt_capsules
530                .insert(capsule_id.clone(), capsule_value);
531            Ok(())
532        }
533        WorkflowPatchOperation::UpdateNodePolicy { node_id, policy } => {
534            let Some(node) = bundle.workflow.nodes.get_mut(node_id) else {
535                return Err(diagnostic_op(
536                    index,
537                    "update_node_policy",
538                    format!("workflow.nodes.{node_id}"),
539                    format!("workflow does not contain node {node_id}"),
540                    Some(node_id.clone()),
541                ));
542            };
543            apply_node_policy_body(node, policy).map_err(|message| {
544                diagnostic_op(
545                    index,
546                    "update_node_policy",
547                    format!("workflow.nodes.{node_id}"),
548                    message,
549                    Some(node_id.clone()),
550                )
551            })?;
552            Ok(())
553        }
554        WorkflowPatchOperation::UpdateBundlePolicy { policy } => {
555            apply_bundle_policy_body(&mut bundle.policy, policy).map_err(|message| {
556                diagnostic_op(
557                    index,
558                    "update_bundle_policy",
559                    "policy".to_string(),
560                    message,
561                    None,
562                )
563            })?;
564            Ok(())
565        }
566    }
567}
568
569fn node_body_into_workflow_node(node_id: &str, body: &WorkflowPatchNodeBody) -> WorkflowNode {
570    let mut node = WorkflowNode {
571        id: Some(node_id.to_string()),
572        kind: body
573            .kind
574            .clone()
575            .filter(|kind| !kind.trim().is_empty())
576            .unwrap_or_else(|| "stage".to_string()),
577        ..WorkflowNode::default()
578    };
579    node.task_label = body.task_label.clone();
580    node.prompt = body.prompt.clone();
581    node.system = body.system.clone();
582    if let Some(tools) = &body.tools {
583        node.tools = tools.clone();
584    }
585    if let Some(model_policy) = &body.model_policy {
586        if let Ok(parsed) = serde_json::from_value(model_policy.clone()) {
587            node.model_policy = parsed;
588        }
589    }
590    if let Some(capability_policy) = &body.capability_policy {
591        node.capability_policy = capability_policy.clone();
592    }
593    if let Some(approval_policy) = &body.approval_policy {
594        if let Ok(parsed) = serde_json::from_value(approval_policy.clone()) {
595            node.approval_policy = parsed;
596        }
597    }
598    node.metadata = body.metadata.clone();
599    node
600}
601
602fn apply_node_policy_body(
603    node: &mut WorkflowNode,
604    body: &WorkflowPatchNodePolicyBody,
605) -> Result<(), String> {
606    if let Some(label) = &body.task_label {
607        node.task_label = Some(label.clone());
608    }
609    if let Some(prompt) = &body.prompt {
610        node.prompt = Some(prompt.clone());
611    }
612    if let Some(system) = &body.system {
613        node.system = Some(system.clone());
614    }
615    if let Some(tools) = &body.tools {
616        node.tools = tools.clone();
617    }
618    if let Some(model_policy) = &body.model_policy {
619        node.model_policy = serde_json::from_value(model_policy.clone())
620            .map_err(|error| format!("invalid model_policy: {error}"))?;
621    }
622    if let Some(capability_policy) = &body.capability_policy {
623        node.capability_policy = capability_policy.clone();
624    }
625    if let Some(approval_policy) = &body.approval_policy {
626        node.approval_policy = serde_json::from_value(approval_policy.clone())
627            .map_err(|error| format!("invalid approval_policy: {error}"))?;
628    }
629    Ok(())
630}
631
632fn apply_bundle_policy_body(
633    policy: &mut WorkflowBundlePolicy,
634    body: &WorkflowPatchBundlePolicyBody,
635) -> Result<(), String> {
636    if let Some(autonomy) = &body.autonomy_tier {
637        policy.autonomy_tier = autonomy.clone();
638    }
639    if let Some(tool_policy) = &body.tool_policy {
640        policy.tool_policy = tool_policy.clone();
641    }
642    if let Some(approval_required) = &body.approval_required {
643        policy.approval_required = approval_required.clone();
644    }
645    if let Some(retry) = &body.retry {
646        policy.retry = serde_json::from_value(retry.clone())
647            .map_err(|error| format!("invalid retry: {error}"))?;
648    }
649    if let Some(catchup) = &body.catchup {
650        policy.catchup = serde_json::from_value(catchup.clone())
651            .map_err(|error| format!("invalid catchup: {error}"))?;
652    }
653    Ok(())
654}
655
656fn diff_bundle_graph(
657    before: &WorkflowBundle,
658    after: &WorkflowBundle,
659    patch: &WorkflowPatch,
660) -> WorkflowPatchGraphDiff {
661    let mut diff = WorkflowPatchGraphDiff::default();
662    let before_node_ids: BTreeSet<&String> = before.workflow.nodes.keys().collect();
663    for node_id in after.workflow.nodes.keys() {
664        if !before_node_ids.contains(node_id) {
665            diff.added_nodes.push(node_id.clone());
666        }
667    }
668    let before_edges: BTreeSet<(String, String, Option<String>, Option<String>)> = before
669        .workflow
670        .edges
671        .iter()
672        .map(|edge| {
673            (
674                edge.from.clone(),
675                edge.to.clone(),
676                edge.branch.clone(),
677                edge.label.clone(),
678            )
679        })
680        .collect();
681    for edge in &after.workflow.edges {
682        let key = (
683            edge.from.clone(),
684            edge.to.clone(),
685            edge.branch.clone(),
686            edge.label.clone(),
687        );
688        if !before_edges.contains(&key) {
689            diff.added_edges.push(WorkflowPatchEdgeRef {
690                from: edge.from.clone(),
691                to: edge.to.clone(),
692                branch: edge.branch.clone(),
693                label: edge.label.clone(),
694            });
695        }
696    }
697    for operation in &patch.operations {
698        match operation {
699            WorkflowPatchOperation::UpdateNodePolicy { node_id, .. } => {
700                diff.updated_nodes.push(node_id.clone());
701            }
702            WorkflowPatchOperation::UpsertPromptCapsule { capsule_id, .. } => {
703                diff.updated_capsules.push(capsule_id.clone());
704            }
705            WorkflowPatchOperation::UpdateBundlePolicy { policy } => {
706                if policy.autonomy_tier.is_some() {
707                    diff.policy_fields_changed.push("autonomy_tier".to_string());
708                }
709                if policy.tool_policy.is_some() {
710                    diff.policy_fields_changed.push("tool_policy".to_string());
711                }
712                if policy.approval_required.is_some() {
713                    diff.policy_fields_changed
714                        .push("approval_required".to_string());
715                }
716                if policy.retry.is_some() {
717                    diff.policy_fields_changed.push("retry".to_string());
718                }
719                if policy.catchup.is_some() {
720                    diff.policy_fields_changed.push("catchup".to_string());
721                }
722            }
723            _ => {}
724        }
725    }
726    diff.added_nodes.sort();
727    diff.updated_nodes.sort();
728    diff.updated_nodes.dedup();
729    diff.updated_capsules.sort();
730    diff.updated_capsules.dedup();
731    diff.policy_fields_changed.sort();
732    diff.policy_fields_changed.dedup();
733    diff.added_edges
734        .sort_by(|left, right| (&left.from, &left.to).cmp(&(&right.from, &right.to)));
735    diff
736}
737
738fn compute_capability_delta(
739    before_bundle: &WorkflowBundle,
740    after_bundle: &WorkflowBundle,
741    before: CapabilityPolicy,
742    after: CapabilityPolicy,
743    parent: Option<&CapabilityPolicy>,
744) -> WorkflowPatchCapabilityDelta {
745    let added_tools: Vec<String> = after
746        .tools
747        .iter()
748        .filter(|tool| !before.tools.contains(tool))
749        .cloned()
750        .collect();
751
752    let mut added_capabilities: BTreeMap<String, Vec<String>> = BTreeMap::new();
753    for (capability, ops) in &after.capabilities {
754        let before_ops = before
755            .capabilities
756            .get(capability)
757            .cloned()
758            .unwrap_or_default();
759        let added: Vec<String> = ops
760            .iter()
761            .filter(|op| !before_ops.contains(op))
762            .cloned()
763            .collect();
764        if !added.is_empty() {
765            added_capabilities.insert(capability.clone(), added);
766        }
767    }
768
769    let raised_side_effect_level = match (
770        before.side_effect_level.as_deref(),
771        after.side_effect_level.as_deref(),
772    ) {
773        (Some(before_level), Some(after_level))
774            if side_effect_rank(after_level) > side_effect_rank(before_level) =>
775        {
776            Some(RaisedSideEffectLevel {
777                from: before_level.to_string(),
778                to: after_level.to_string(),
779            })
780        }
781        (None, Some(after_level)) => Some(RaisedSideEffectLevel {
782            from: "none".to_string(),
783            to: after_level.to_string(),
784        }),
785        _ => None,
786    };
787
788    let added_workspace_roots: Vec<String> = after
789        .workspace_roots
790        .iter()
791        .filter(|root| !before.workspace_roots.contains(root))
792        .cloned()
793        .collect();
794
795    let mut added_connector_scopes: BTreeMap<String, Vec<String>> = BTreeMap::new();
796    let before_scopes_by_id: BTreeMap<&str, BTreeSet<&str>> = before_bundle
797        .connectors
798        .iter()
799        .map(|connector| {
800            (
801                connector.id.as_str(),
802                connector.scopes.iter().map(String::as_str).collect(),
803            )
804        })
805        .collect();
806    for connector in &after_bundle.connectors {
807        let before_scopes = before_scopes_by_id
808            .get(connector.id.as_str())
809            .cloned()
810            .unwrap_or_default();
811        let added: Vec<String> = connector
812            .scopes
813            .iter()
814            .filter(|scope| !before_scopes.contains(scope.as_str()))
815            .cloned()
816            .collect();
817        if !added.is_empty() {
818            added_connector_scopes.insert(connector.id.clone(), added);
819        }
820    }
821
822    let added_command_gates: Vec<String> = after_bundle
823        .environment
824        .command_gates
825        .iter()
826        .filter(|gate| !before_bundle.environment.command_gates.contains(gate))
827        .cloned()
828        .collect();
829
830    let raised_autonomy_tier = match (
831        before_bundle.policy.autonomy_tier.as_str(),
832        after_bundle.policy.autonomy_tier.as_str(),
833    ) {
834        (before_tier, after_tier) if autonomy_rank(after_tier) > autonomy_rank(before_tier) => {
835            Some(RaisedAutonomyTier {
836                from: before_tier.to_string(),
837                to: after_tier.to_string(),
838            })
839        }
840        _ => None,
841    };
842
843    let widening = match parent {
844        Some(parent) => collect_ceiling_violations(
845            parent,
846            &after,
847            &added_connector_scopes,
848            &added_command_gates,
849            raised_autonomy_tier.as_ref(),
850        ),
851        None => Vec::new(),
852    };
853
854    WorkflowPatchCapabilityDelta {
855        before,
856        after,
857        parent: parent.cloned(),
858        added_tools,
859        added_capabilities,
860        raised_side_effect_level,
861        added_workspace_roots,
862        added_connector_scopes,
863        added_command_gates,
864        raised_autonomy_tier,
865        widening,
866    }
867}
868
869fn collect_ceiling_violations(
870    parent: &CapabilityPolicy,
871    requested: &CapabilityPolicy,
872    added_connector_scopes: &BTreeMap<String, Vec<String>>,
873    added_command_gates: &[String],
874    raised_autonomy_tier: Option<&RaisedAutonomyTier>,
875) -> Vec<CapabilityCeilingViolation> {
876    let mut violations = Vec::new();
877    if !parent.tools.is_empty() {
878        for tool in &requested.tools {
879            if !parent.tools.contains(tool) {
880                violations.push(CapabilityCeilingViolation {
881                    kind: "tool".to_string(),
882                    detail: format!("tool '{tool}' is not in parent tool ceiling"),
883                });
884            }
885        }
886    }
887    for (capability, ops) in &requested.capabilities {
888        match parent.capabilities.get(capability) {
889            Some(parent_ops) => {
890                for op in ops {
891                    if !parent_ops.contains(op) {
892                        violations.push(CapabilityCeilingViolation {
893                            kind: "capability".to_string(),
894                            detail: format!(
895                                "capability '{capability}.{op}' exceeds parent ceiling"
896                            ),
897                        });
898                    }
899                }
900            }
901            None if !parent.capabilities.is_empty() => {
902                violations.push(CapabilityCeilingViolation {
903                    kind: "capability".to_string(),
904                    detail: format!("capability '{capability}' is not in parent ceiling"),
905                });
906            }
907            _ => {}
908        }
909    }
910    if let (Some(parent_level), Some(requested_level)) = (
911        parent.side_effect_level.as_deref(),
912        requested.side_effect_level.as_deref(),
913    ) {
914        if side_effect_rank(requested_level) > side_effect_rank(parent_level) {
915            violations.push(CapabilityCeilingViolation {
916                kind: "side_effect_level".to_string(),
917                detail: format!(
918                    "side_effect_level '{requested_level}' exceeds parent ceiling '{parent_level}'"
919                ),
920            });
921        }
922    }
923    if !parent.workspace_roots.is_empty() {
924        for root in &requested.workspace_roots {
925            if !parent.workspace_roots.contains(root) {
926                violations.push(CapabilityCeilingViolation {
927                    kind: "workspace_root".to_string(),
928                    detail: format!("workspace_root '{root}' exceeds parent allowlist"),
929                });
930            }
931        }
932    }
933    if !added_connector_scopes.is_empty() {
934        let parent_allows_connector_calls = parent
935            .capabilities
936            .get("connector")
937            .is_some_and(|ops| ops.iter().any(|op| op == "call"));
938        if !parent_allows_connector_calls && !parent.capabilities.is_empty() {
939            for (connector_id, scopes) in added_connector_scopes {
940                violations.push(CapabilityCeilingViolation {
941                    kind: "connector_scope".to_string(),
942                    detail: format!(
943                        "connector '{connector_id}' adds scopes {scopes:?} but parent ceiling does not include connector.call"
944                    ),
945                });
946            }
947        }
948    }
949    if !added_command_gates.is_empty() {
950        let parent_allows_exec = parent
951            .capabilities
952            .get("process")
953            .is_some_and(|ops| ops.iter().any(|op| op == "exec"));
954        if !parent_allows_exec && !parent.capabilities.is_empty() {
955            violations.push(CapabilityCeilingViolation {
956                kind: "command_gate".to_string(),
957                detail: format!(
958                    "patch adds command gates {added_command_gates:?} but parent ceiling does not include process.exec"
959                ),
960            });
961        }
962    }
963    if let Some(raised) = raised_autonomy_tier {
964        violations.push(CapabilityCeilingViolation {
965            kind: "autonomy_tier".to_string(),
966            detail: format!(
967                "autonomy_tier raised from '{}' to '{}' — patches must not widen autonomy",
968                raised.from, raised.to
969            ),
970        });
971    }
972    violations
973}
974
975fn side_effect_rank(level: &str) -> usize {
976    match level {
977        "none" => 0,
978        "read_only" => 1,
979        "workspace_write" => 2,
980        "process_exec" => 3,
981        "network" => 4,
982        _ => 5,
983    }
984}
985
986fn static_side_effect(level: &str) -> &'static str {
987    match level {
988        "none" => "none",
989        "read_only" => "read_only",
990        "workspace_write" => "workspace_write",
991        "process_exec" => "process_exec",
992        "network" => "network",
993        _ => "none",
994    }
995}
996
997fn autonomy_rank(tier: &str) -> usize {
998    match tier {
999        "shadow" => 0,
1000        "suggest" => 1,
1001        "act_with_approval" => 2,
1002        "act_auto" => 3,
1003        _ => 0,
1004    }
1005}
1006
1007fn autonomy_side_effect_floor(tier: &str) -> Option<&'static str> {
1008    match tier {
1009        "act_auto" => Some("network"),
1010        "act_with_approval" => Some("process_exec"),
1011        "suggest" => Some("read_only"),
1012        _ => None,
1013    }
1014}
1015
1016fn diagnostic_op(
1017    index: usize,
1018    op: &str,
1019    path: String,
1020    message: String,
1021    node_id: Option<String>,
1022) -> WorkflowPatchDiagnostic {
1023    WorkflowPatchDiagnostic {
1024        severity: "error".to_string(),
1025        op_index: Some(index),
1026        op: Some(op.to_string()),
1027        path,
1028        message,
1029        node_id,
1030    }
1031}
1032
1033fn diagnostic_global(message: String) -> WorkflowPatchDiagnostic {
1034    WorkflowPatchDiagnostic {
1035        severity: "error".to_string(),
1036        op_index: None,
1037        op: None,
1038        path: "patch".to_string(),
1039        message,
1040        node_id: None,
1041    }
1042}
1043
1044#[cfg(test)]
1045#[path = "workflow_patch_tests.rs"]
1046mod workflow_patch_tests;