Skip to main content

harn_vm/orchestration/
workflow_patch.rs

1//! Workflow patch proposals: a bounded, auditable contract that lets an
2//! agent author propose changes to a portable [`WorkflowBundle`] without
3//! touching the live runtime.
4//!
5//! The shape is intentionally small. An agent emits a JSON document
6//! describing a sequence of [`WorkflowPatchOperation`]s; Harn applies
7//! them to a copy of the bundle, runs the existing bundle validator,
8//! computes a capability-ceiling delta against the parent execution
9//! policy (when one is supplied), and returns a single
10//! [`WorkflowPatchValidationReport`] the host can render.
11//!
12//! The patch surface deliberately mirrors what the issue calls out:
13//! insert agent / verifier / approval node, update prompt capsule,
14//! update model & tool policy, add edge. Anything not in this list
15//! goes through a normal bundle edit instead — the patch contract is
16//! the meta-programming layer, not a general bundle DSL.
17
18use std::collections::{BTreeMap, BTreeSet};
19
20use serde::{Deserialize, Serialize};
21
22use super::workflow::{WorkflowEdge, WorkflowNode};
23use super::workflow_bundle::{
24    preview_workflow_bundle, validate_workflow_bundle, WorkflowBundle, WorkflowBundleGraphExport,
25    WorkflowBundlePolicy, WorkflowBundleValidationReport,
26};
27use super::CapabilityPolicy;
28
29pub const WORKFLOW_PATCH_SCHEMA_VERSION: u32 = 1;
30
31/// A bounded, auditable proposal to mutate a workflow bundle.
32///
33/// Patches are flat lists of [`WorkflowPatchOperation`]s applied in
34/// order. An empty operations list is rejected by [`apply_workflow_patch`]
35/// — silent no-ops would let agents claim work they did not do.
36#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
37#[serde(default)]
38pub struct WorkflowPatch {
39    pub schema_version: u32,
40    pub id: String,
41    pub summary: Option<String>,
42    pub operations: Vec<WorkflowPatchOperation>,
43}
44
45/// Operations are tagged by an external `op` discriminator so handlers
46/// can dispatch on the literal string from JSON.
47///
48/// Only the operations called out in #1423 are exposed: insert node,
49/// add edge, upsert prompt capsule, update node policy, update bundle
50/// policy. New operations require a deliberate contract bump.
51#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
52#[serde(tag = "op", rename_all = "snake_case")]
53pub enum WorkflowPatchOperation {
54    /// Insert a new workflow node. Fails if `node_id` already exists or
55    /// is empty. The `node` body uses the same shape as
56    /// `workflow.nodes[k]` in a bundle JSON, with sensible defaults
57    /// applied for unspecified fields.
58    InsertNode {
59        node_id: String,
60        #[serde(default)]
61        node: WorkflowPatchNodeBody,
62    },
63    /// Append an edge to the workflow graph. Fails if either endpoint
64    /// references an unknown node, or if the edge already exists.
65    AddEdge {
66        from: String,
67        to: String,
68        #[serde(default)]
69        branch: Option<String>,
70        #[serde(default)]
71        label: Option<String>,
72    },
73    /// Insert or replace a prompt capsule. The capsule's `node_id` must
74    /// reference a node that exists after all prior patch operations
75    /// have been applied (so an `InsertNode` followed by
76    /// `UpsertPromptCapsule` works).
77    UpsertPromptCapsule {
78        capsule_id: String,
79        capsule: WorkflowPatchPromptCapsuleBody,
80    },
81    /// Merge per-node policy fields onto an existing node. Only the
82    /// fields named on [`WorkflowPatchNodePolicyBody`] can be set —
83    /// arbitrary node fields are intentionally not patchable.
84    UpdateNodePolicy {
85        node_id: String,
86        policy: WorkflowPatchNodePolicyBody,
87    },
88    /// Merge bundle-level policy fields. Mirrors the safe knobs in
89    /// [`WorkflowBundlePolicy`].
90    UpdateBundlePolicy {
91        policy: WorkflowPatchBundlePolicyBody,
92    },
93}
94
95/// Shape used by [`WorkflowPatchOperation::InsertNode`]. Mirrors the
96/// editable fields on [`WorkflowNode`] without exposing every tunable.
97#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
98#[serde(default)]
99pub struct WorkflowPatchNodeBody {
100    pub kind: Option<String>,
101    pub task_label: Option<String>,
102    pub prompt: Option<String>,
103    pub system: Option<String>,
104    pub tools: Option<serde_json::Value>,
105    pub model_policy: Option<serde_json::Value>,
106    pub capability_policy: Option<CapabilityPolicy>,
107    pub approval_policy: Option<serde_json::Value>,
108    pub metadata: BTreeMap<String, serde_json::Value>,
109}
110
111/// Shape used by [`WorkflowPatchOperation::UpdateNodePolicy`]. Each
112/// field maps to the same-named field on [`WorkflowNode`] and is
113/// merged in place (set when `Some`, left alone when `None`).
114#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
115#[serde(default)]
116pub struct WorkflowPatchNodePolicyBody {
117    pub task_label: Option<String>,
118    pub prompt: Option<String>,
119    pub system: Option<String>,
120    pub tools: Option<serde_json::Value>,
121    pub model_policy: Option<serde_json::Value>,
122    pub capability_policy: Option<CapabilityPolicy>,
123    pub approval_policy: Option<serde_json::Value>,
124}
125
126/// Shape used by [`WorkflowPatchOperation::UpsertPromptCapsule`]. The
127/// patch always sets `id` to match `capsule_id` so the bundle invariant
128/// (`capsule.id == map_key`) holds without callers needing to repeat it.
129#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
130#[serde(default)]
131pub struct WorkflowPatchPromptCapsuleBody {
132    pub node_id: String,
133    pub trigger_id: Option<String>,
134    pub prompt: String,
135    pub system: Option<String>,
136    pub context: BTreeMap<String, serde_json::Value>,
137}
138
139/// Shape used by [`WorkflowPatchOperation::UpdateBundlePolicy`]. Each
140/// field replaces the corresponding [`WorkflowBundlePolicy`] field when
141/// `Some`. The `tool_policy` and `approval_required` lists replace
142/// rather than merge to keep the contract obvious — agents that want a
143/// merge should compute and submit the full list.
144#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
145#[serde(default)]
146pub struct WorkflowPatchBundlePolicyBody {
147    pub autonomy_tier: Option<String>,
148    pub tool_policy: Option<BTreeMap<String, serde_json::Value>>,
149    pub approval_required: Option<Vec<String>>,
150    pub retry: Option<serde_json::Value>,
151    pub catchup: Option<serde_json::Value>,
152}
153
154/// What a host renders when it shows the result of validating a patch.
155#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
156pub struct WorkflowPatchValidationReport {
157    pub schema_version: u32,
158    pub patch_id: String,
159    pub bundle_id: String,
160    pub valid: bool,
161    pub apply_errors: Vec<WorkflowPatchDiagnostic>,
162    pub bundle_validation: WorkflowBundleValidationReport,
163    pub graph_diff: WorkflowPatchGraphDiff,
164    pub capability_delta: WorkflowPatchCapabilityDelta,
165    pub graph_export: WorkflowBundleGraphExport,
166}
167
168/// One structured failure from applying a patch. The `op_index` lets
169/// the host highlight the offending operation in its review surface.
170#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
171pub struct WorkflowPatchDiagnostic {
172    pub severity: String,
173    pub op_index: Option<usize>,
174    pub op: Option<String>,
175    pub path: String,
176    pub message: String,
177    pub node_id: Option<String>,
178}
179
180/// Structural diff between the original and patched workflow graph.
181/// Used by host UIs and the patch-authoring skill so a model that
182/// proposes a patch can tell at a glance whether its edit produced the
183/// shape it intended.
184#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
185pub struct WorkflowPatchGraphDiff {
186    pub added_nodes: Vec<String>,
187    pub added_edges: Vec<WorkflowPatchEdgeRef>,
188    pub updated_nodes: Vec<String>,
189    pub updated_capsules: Vec<String>,
190    pub policy_fields_changed: Vec<String>,
191}
192
193#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
194pub struct WorkflowPatchEdgeRef {
195    pub from: String,
196    pub to: String,
197    pub branch: Option<String>,
198    pub label: Option<String>,
199}
200
201/// Capability ceiling delta between the bundle before and after the
202/// patch, optionally compared against a parent execution policy.
203///
204/// `widening` collects every dimension where the patched bundle asks
205/// for *more* than the parent ceiling (or the original bundle, when no
206/// parent is supplied). The patch is rejected when this list is
207/// non-empty — agents must not be able to expand permissions.
208#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
209pub struct WorkflowPatchCapabilityDelta {
210    pub before: CapabilityPolicy,
211    pub after: CapabilityPolicy,
212    pub parent: Option<CapabilityPolicy>,
213    pub added_tools: Vec<String>,
214    pub added_capabilities: BTreeMap<String, Vec<String>>,
215    pub raised_side_effect_level: Option<RaisedSideEffectLevel>,
216    pub added_workspace_roots: Vec<String>,
217    pub added_connector_scopes: BTreeMap<String, Vec<String>>,
218    pub added_command_gates: Vec<String>,
219    pub raised_autonomy_tier: Option<RaisedAutonomyTier>,
220    pub widening: Vec<CapabilityCeilingViolation>,
221}
222
223#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
224pub struct RaisedSideEffectLevel {
225    pub from: String,
226    pub to: String,
227}
228
229#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
230pub struct RaisedAutonomyTier {
231    pub from: String,
232    pub to: String,
233}
234
235/// One concrete way the patched bundle exceeds the parent ceiling.
236/// `kind` is a stable enum string so hosts can group/explain them.
237#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
238pub struct CapabilityCeilingViolation {
239    pub kind: String,
240    pub detail: String,
241}
242
243/// Apply a patch to a copy of the bundle and return the new bundle.
244/// Fails fast on the first structural error; callers that want richer
245/// diagnostics should prefer [`validate_workflow_patch`].
246pub fn apply_workflow_patch(
247    bundle: &WorkflowBundle,
248    patch: &WorkflowPatch,
249) -> Result<WorkflowBundle, Vec<WorkflowPatchDiagnostic>> {
250    let mut errors = Vec::new();
251    if patch.schema_version != WORKFLOW_PATCH_SCHEMA_VERSION {
252        errors.push(diagnostic_global(format!(
253            "unsupported patch schema_version {}; expected {}",
254            patch.schema_version, WORKFLOW_PATCH_SCHEMA_VERSION
255        )));
256    }
257    if patch.id.trim().is_empty() {
258        errors.push(diagnostic_global("patch id is required".to_string()));
259    }
260    if patch.operations.is_empty() {
261        errors.push(diagnostic_global(
262            "patch contains no operations; refusing to no-op".to_string(),
263        ));
264    }
265    if !errors.is_empty() {
266        return Err(errors);
267    }
268
269    let mut working = bundle.clone();
270    for (index, operation) in patch.operations.iter().enumerate() {
271        if let Err(diag) = apply_operation(&mut working, operation, index) {
272            return Err(vec![diag]);
273        }
274    }
275    Ok(working)
276}
277
278/// Apply + validate + diff + ceiling check, in one pass. The bundle
279/// validator is the source of truth for "is this still a valid bundle?";
280/// this function adds the patch-specific apply errors, the structural
281/// diff, and the capability delta on top.
282pub fn validate_workflow_patch(
283    bundle: &WorkflowBundle,
284    patch: &WorkflowPatch,
285    parent_ceiling: Option<&CapabilityPolicy>,
286) -> WorkflowPatchValidationReport {
287    let before_ceiling = bundle_capability_ceiling(bundle);
288
289    let (patched, apply_errors) = match apply_workflow_patch(bundle, patch) {
290        Ok(patched) => (patched, Vec::new()),
291        Err(errors) => (bundle.clone(), errors),
292    };
293
294    let bundle_validation = validate_workflow_bundle(&patched);
295    let graph_diff = diff_bundle_graph(bundle, &patched, patch);
296    let after_ceiling = bundle_capability_ceiling(&patched);
297    let capability_delta = compute_capability_delta(
298        bundle,
299        &patched,
300        before_ceiling,
301        after_ceiling,
302        parent_ceiling,
303    );
304    let graph_export = preview_workflow_bundle(&patched).graph;
305    let valid =
306        apply_errors.is_empty() && bundle_validation.valid && capability_delta.widening.is_empty();
307
308    WorkflowPatchValidationReport {
309        schema_version: WORKFLOW_PATCH_SCHEMA_VERSION,
310        patch_id: patch.id.clone(),
311        bundle_id: bundle.id.clone(),
312        valid,
313        apply_errors,
314        bundle_validation,
315        graph_diff,
316        capability_delta,
317        graph_export,
318    }
319}
320
321/// Project a bundle's effective capability ceiling. The bundle does
322/// not carry a single capability policy; we compose one from the
323/// per-node `capability_policy` declarations, the bundle-level
324/// `tool_policy` keys, the connector scopes, the autonomy tier, the
325/// `worktree_policy`, and the declared `command_gates`. The result is
326/// what a parent runtime needs to compare against to decide whether
327/// running this bundle would widen its own ceiling.
328pub fn bundle_capability_ceiling(bundle: &WorkflowBundle) -> CapabilityPolicy {
329    let mut tools: BTreeSet<String> = bundle.policy.tool_policy.keys().cloned().collect();
330    let mut capabilities: BTreeMap<String, BTreeSet<String>> = BTreeMap::new();
331    let mut workspace_roots: BTreeSet<String> = BTreeSet::new();
332    let mut max_side_effect: Option<&'static str> = None;
333
334    for node in bundle.workflow.nodes.values() {
335        for tool in &node.capability_policy.tools {
336            tools.insert(tool.clone());
337        }
338        for (capability, ops) in &node.capability_policy.capabilities {
339            let entry = capabilities.entry(capability.clone()).or_default();
340            for op in ops {
341                entry.insert(op.clone());
342            }
343        }
344        for root in &node.capability_policy.workspace_roots {
345            workspace_roots.insert(root.clone());
346        }
347        if let Some(level) = node.capability_policy.side_effect_level.as_deref() {
348            max_side_effect = match max_side_effect {
349                Some(current) if side_effect_rank(current) >= side_effect_rank(level) => {
350                    Some(current)
351                }
352                _ => Some(static_side_effect(level)),
353            };
354        }
355    }
356
357    let autonomy_floor = autonomy_side_effect_floor(&bundle.policy.autonomy_tier);
358    if let Some(floor) = autonomy_floor {
359        max_side_effect = match max_side_effect {
360            Some(current) if side_effect_rank(current) >= side_effect_rank(floor) => Some(current),
361            _ => Some(floor),
362        };
363    }
364
365    if !bundle.connectors.is_empty() {
366        capabilities
367            .entry("connector".to_string())
368            .or_default()
369            .insert("call".to_string());
370    }
371    if !bundle.environment.command_gates.is_empty()
372        || bundle.environment.worktree_policy != "host_managed"
373    {
374        capabilities
375            .entry("process".to_string())
376            .or_default()
377            .insert("exec".to_string());
378        max_side_effect = match max_side_effect {
379            Some(current) if side_effect_rank(current) >= side_effect_rank("process_exec") => {
380                Some(current)
381            }
382            _ => Some("process_exec"),
383        };
384    }
385
386    CapabilityPolicy {
387        tools: tools.into_iter().collect(),
388        capabilities: capabilities
389            .into_iter()
390            .map(|(k, v)| (k, v.into_iter().collect()))
391            .collect(),
392        workspace_roots: workspace_roots.into_iter().collect(),
393        side_effect_level: max_side_effect.map(|level| level.to_string()),
394        recursion_limit: None,
395        tool_arg_constraints: Vec::new(),
396        tool_annotations: BTreeMap::new(),
397        sandbox_profile: crate::orchestration::SandboxProfile::default(),
398    }
399}
400
401fn apply_operation(
402    bundle: &mut WorkflowBundle,
403    operation: &WorkflowPatchOperation,
404    index: usize,
405) -> Result<(), WorkflowPatchDiagnostic> {
406    match operation {
407        WorkflowPatchOperation::InsertNode { node_id, node } => {
408            if node_id.trim().is_empty() {
409                return Err(diagnostic_op(
410                    index,
411                    "insert_node",
412                    "operations".to_string(),
413                    "insert_node node_id is required".to_string(),
414                    None,
415                ));
416            }
417            if bundle.workflow.nodes.contains_key(node_id) {
418                return Err(diagnostic_op(
419                    index,
420                    "insert_node",
421                    format!("workflow.nodes.{node_id}"),
422                    format!("workflow already contains node {node_id}"),
423                    Some(node_id.clone()),
424                ));
425            }
426            let workflow_node = node_body_into_workflow_node(node_id, node);
427            bundle.workflow.nodes.insert(node_id.clone(), workflow_node);
428            if bundle.workflow.entry.is_empty() {
429                bundle.workflow.entry = node_id.clone();
430            }
431            Ok(())
432        }
433        WorkflowPatchOperation::AddEdge {
434            from,
435            to,
436            branch,
437            label,
438        } => {
439            if !bundle.workflow.nodes.contains_key(from) {
440                return Err(diagnostic_op(
441                    index,
442                    "add_edge",
443                    "edges.from".to_string(),
444                    format!("edge.from references unknown node: {from}"),
445                    Some(from.clone()),
446                ));
447            }
448            if !bundle.workflow.nodes.contains_key(to) {
449                return Err(diagnostic_op(
450                    index,
451                    "add_edge",
452                    "edges.to".to_string(),
453                    format!("edge.to references unknown node: {to}"),
454                    Some(to.clone()),
455                ));
456            }
457            let candidate = WorkflowEdge {
458                from: from.clone(),
459                to: to.clone(),
460                branch: branch.clone(),
461                label: label.clone(),
462            };
463            if bundle.workflow.edges.iter().any(|edge| {
464                edge.from == candidate.from
465                    && edge.to == candidate.to
466                    && edge.branch == candidate.branch
467                    && edge.label == candidate.label
468            }) {
469                return Err(diagnostic_op(
470                    index,
471                    "add_edge",
472                    "edges".to_string(),
473                    format!("edge {from} -> {to} already exists"),
474                    Some(from.clone()),
475                ));
476            }
477            bundle.workflow.edges.push(candidate);
478            Ok(())
479        }
480        WorkflowPatchOperation::UpsertPromptCapsule {
481            capsule_id,
482            capsule,
483        } => {
484            if capsule_id.trim().is_empty() {
485                return Err(diagnostic_op(
486                    index,
487                    "upsert_prompt_capsule",
488                    "prompt_capsules".to_string(),
489                    "capsule_id is required".to_string(),
490                    None,
491                ));
492            }
493            if !bundle.workflow.nodes.contains_key(&capsule.node_id) {
494                return Err(diagnostic_op(
495                    index,
496                    "upsert_prompt_capsule",
497                    format!("prompt_capsules.{capsule_id}.node_id"),
498                    format!(
499                        "prompt capsule references unknown node: {}",
500                        capsule.node_id
501                    ),
502                    Some(capsule.node_id.clone()),
503                ));
504            }
505            let existing = bundle
506                .prompt_capsules
507                .values()
508                .find(|other| other.node_id == capsule.node_id && other.id != *capsule_id);
509            if let Some(other) = existing {
510                return Err(diagnostic_op(
511                    index,
512                    "upsert_prompt_capsule",
513                    format!("prompt_capsules.{capsule_id}.node_id"),
514                    format!(
515                        "prompt capsule {capsule_id} would target node {} but capsule {} already targets it",
516                        capsule.node_id, other.id
517                    ),
518                    Some(capsule.node_id.clone()),
519                ));
520            }
521            let capsule_value = super::workflow_bundle::PromptCapsule {
522                id: capsule_id.clone(),
523                node_id: capsule.node_id.clone(),
524                trigger_id: capsule.trigger_id.clone(),
525                prompt: capsule.prompt.clone(),
526                system: capsule.system.clone(),
527                context: capsule.context.clone(),
528            };
529            bundle
530                .prompt_capsules
531                .insert(capsule_id.clone(), capsule_value);
532            Ok(())
533        }
534        WorkflowPatchOperation::UpdateNodePolicy { node_id, policy } => {
535            let Some(node) = bundle.workflow.nodes.get_mut(node_id) else {
536                return Err(diagnostic_op(
537                    index,
538                    "update_node_policy",
539                    format!("workflow.nodes.{node_id}"),
540                    format!("workflow does not contain node {node_id}"),
541                    Some(node_id.clone()),
542                ));
543            };
544            apply_node_policy_body(node, policy).map_err(|message| {
545                diagnostic_op(
546                    index,
547                    "update_node_policy",
548                    format!("workflow.nodes.{node_id}"),
549                    message,
550                    Some(node_id.clone()),
551                )
552            })?;
553            Ok(())
554        }
555        WorkflowPatchOperation::UpdateBundlePolicy { policy } => {
556            apply_bundle_policy_body(&mut bundle.policy, policy).map_err(|message| {
557                diagnostic_op(
558                    index,
559                    "update_bundle_policy",
560                    "policy".to_string(),
561                    message,
562                    None,
563                )
564            })?;
565            Ok(())
566        }
567    }
568}
569
570fn node_body_into_workflow_node(node_id: &str, body: &WorkflowPatchNodeBody) -> WorkflowNode {
571    let mut node = WorkflowNode {
572        id: Some(node_id.to_string()),
573        kind: body
574            .kind
575            .clone()
576            .filter(|kind| !kind.trim().is_empty())
577            .unwrap_or_else(|| "stage".to_string()),
578        ..WorkflowNode::default()
579    };
580    node.task_label = body.task_label.clone();
581    node.prompt = body.prompt.clone();
582    node.system = body.system.clone();
583    if let Some(tools) = &body.tools {
584        node.tools = tools.clone();
585    }
586    if let Some(model_policy) = &body.model_policy {
587        if let Ok(parsed) = serde_json::from_value(model_policy.clone()) {
588            node.model_policy = parsed;
589        }
590    }
591    if let Some(capability_policy) = &body.capability_policy {
592        node.capability_policy = capability_policy.clone();
593    }
594    if let Some(approval_policy) = &body.approval_policy {
595        if let Ok(parsed) = serde_json::from_value(approval_policy.clone()) {
596            node.approval_policy = parsed;
597        }
598    }
599    node.metadata = body.metadata.clone();
600    node
601}
602
603fn apply_node_policy_body(
604    node: &mut WorkflowNode,
605    body: &WorkflowPatchNodePolicyBody,
606) -> Result<(), String> {
607    if let Some(label) = &body.task_label {
608        node.task_label = Some(label.clone());
609    }
610    if let Some(prompt) = &body.prompt {
611        node.prompt = Some(prompt.clone());
612    }
613    if let Some(system) = &body.system {
614        node.system = Some(system.clone());
615    }
616    if let Some(tools) = &body.tools {
617        node.tools = tools.clone();
618    }
619    if let Some(model_policy) = &body.model_policy {
620        node.model_policy = serde_json::from_value(model_policy.clone())
621            .map_err(|error| format!("invalid model_policy: {error}"))?;
622    }
623    if let Some(capability_policy) = &body.capability_policy {
624        node.capability_policy = capability_policy.clone();
625    }
626    if let Some(approval_policy) = &body.approval_policy {
627        node.approval_policy = serde_json::from_value(approval_policy.clone())
628            .map_err(|error| format!("invalid approval_policy: {error}"))?;
629    }
630    Ok(())
631}
632
633fn apply_bundle_policy_body(
634    policy: &mut WorkflowBundlePolicy,
635    body: &WorkflowPatchBundlePolicyBody,
636) -> Result<(), String> {
637    if let Some(autonomy) = &body.autonomy_tier {
638        policy.autonomy_tier = autonomy.clone();
639    }
640    if let Some(tool_policy) = &body.tool_policy {
641        policy.tool_policy = tool_policy.clone();
642    }
643    if let Some(approval_required) = &body.approval_required {
644        policy.approval_required = approval_required.clone();
645    }
646    if let Some(retry) = &body.retry {
647        policy.retry = serde_json::from_value(retry.clone())
648            .map_err(|error| format!("invalid retry: {error}"))?;
649    }
650    if let Some(catchup) = &body.catchup {
651        policy.catchup = serde_json::from_value(catchup.clone())
652            .map_err(|error| format!("invalid catchup: {error}"))?;
653    }
654    Ok(())
655}
656
657fn diff_bundle_graph(
658    before: &WorkflowBundle,
659    after: &WorkflowBundle,
660    patch: &WorkflowPatch,
661) -> WorkflowPatchGraphDiff {
662    let mut diff = WorkflowPatchGraphDiff::default();
663    let before_node_ids: BTreeSet<&String> = before.workflow.nodes.keys().collect();
664    for node_id in after.workflow.nodes.keys() {
665        if !before_node_ids.contains(node_id) {
666            diff.added_nodes.push(node_id.clone());
667        }
668    }
669    let before_edges: BTreeSet<(String, String, Option<String>, Option<String>)> = before
670        .workflow
671        .edges
672        .iter()
673        .map(|edge| {
674            (
675                edge.from.clone(),
676                edge.to.clone(),
677                edge.branch.clone(),
678                edge.label.clone(),
679            )
680        })
681        .collect();
682    for edge in &after.workflow.edges {
683        let key = (
684            edge.from.clone(),
685            edge.to.clone(),
686            edge.branch.clone(),
687            edge.label.clone(),
688        );
689        if !before_edges.contains(&key) {
690            diff.added_edges.push(WorkflowPatchEdgeRef {
691                from: edge.from.clone(),
692                to: edge.to.clone(),
693                branch: edge.branch.clone(),
694                label: edge.label.clone(),
695            });
696        }
697    }
698    for operation in &patch.operations {
699        match operation {
700            WorkflowPatchOperation::UpdateNodePolicy { node_id, .. } => {
701                diff.updated_nodes.push(node_id.clone());
702            }
703            WorkflowPatchOperation::UpsertPromptCapsule { capsule_id, .. } => {
704                diff.updated_capsules.push(capsule_id.clone());
705            }
706            WorkflowPatchOperation::UpdateBundlePolicy { policy } => {
707                if policy.autonomy_tier.is_some() {
708                    diff.policy_fields_changed.push("autonomy_tier".to_string());
709                }
710                if policy.tool_policy.is_some() {
711                    diff.policy_fields_changed.push("tool_policy".to_string());
712                }
713                if policy.approval_required.is_some() {
714                    diff.policy_fields_changed
715                        .push("approval_required".to_string());
716                }
717                if policy.retry.is_some() {
718                    diff.policy_fields_changed.push("retry".to_string());
719                }
720                if policy.catchup.is_some() {
721                    diff.policy_fields_changed.push("catchup".to_string());
722                }
723            }
724            _ => {}
725        }
726    }
727    diff.added_nodes.sort();
728    diff.updated_nodes.sort();
729    diff.updated_nodes.dedup();
730    diff.updated_capsules.sort();
731    diff.updated_capsules.dedup();
732    diff.policy_fields_changed.sort();
733    diff.policy_fields_changed.dedup();
734    diff.added_edges
735        .sort_by(|left, right| (&left.from, &left.to).cmp(&(&right.from, &right.to)));
736    diff
737}
738
739fn compute_capability_delta(
740    before_bundle: &WorkflowBundle,
741    after_bundle: &WorkflowBundle,
742    before: CapabilityPolicy,
743    after: CapabilityPolicy,
744    parent: Option<&CapabilityPolicy>,
745) -> WorkflowPatchCapabilityDelta {
746    let added_tools: Vec<String> = after
747        .tools
748        .iter()
749        .filter(|tool| !before.tools.contains(tool))
750        .cloned()
751        .collect();
752
753    let mut added_capabilities: BTreeMap<String, Vec<String>> = BTreeMap::new();
754    for (capability, ops) in &after.capabilities {
755        let before_ops = before
756            .capabilities
757            .get(capability)
758            .cloned()
759            .unwrap_or_default();
760        let added: Vec<String> = ops
761            .iter()
762            .filter(|op| !before_ops.contains(op))
763            .cloned()
764            .collect();
765        if !added.is_empty() {
766            added_capabilities.insert(capability.clone(), added);
767        }
768    }
769
770    let raised_side_effect_level = match (
771        before.side_effect_level.as_deref(),
772        after.side_effect_level.as_deref(),
773    ) {
774        (Some(before_level), Some(after_level))
775            if side_effect_rank(after_level) > side_effect_rank(before_level) =>
776        {
777            Some(RaisedSideEffectLevel {
778                from: before_level.to_string(),
779                to: after_level.to_string(),
780            })
781        }
782        (None, Some(after_level)) => Some(RaisedSideEffectLevel {
783            from: "none".to_string(),
784            to: after_level.to_string(),
785        }),
786        _ => None,
787    };
788
789    let added_workspace_roots: Vec<String> = after
790        .workspace_roots
791        .iter()
792        .filter(|root| !before.workspace_roots.contains(root))
793        .cloned()
794        .collect();
795
796    let mut added_connector_scopes: BTreeMap<String, Vec<String>> = BTreeMap::new();
797    let before_scopes_by_id: BTreeMap<&str, BTreeSet<&str>> = before_bundle
798        .connectors
799        .iter()
800        .map(|connector| {
801            (
802                connector.id.as_str(),
803                connector.scopes.iter().map(String::as_str).collect(),
804            )
805        })
806        .collect();
807    for connector in &after_bundle.connectors {
808        let before_scopes = before_scopes_by_id
809            .get(connector.id.as_str())
810            .cloned()
811            .unwrap_or_default();
812        let added: Vec<String> = connector
813            .scopes
814            .iter()
815            .filter(|scope| !before_scopes.contains(scope.as_str()))
816            .cloned()
817            .collect();
818        if !added.is_empty() {
819            added_connector_scopes.insert(connector.id.clone(), added);
820        }
821    }
822
823    let added_command_gates: Vec<String> = after_bundle
824        .environment
825        .command_gates
826        .iter()
827        .filter(|gate| !before_bundle.environment.command_gates.contains(gate))
828        .cloned()
829        .collect();
830
831    let raised_autonomy_tier = match (
832        before_bundle.policy.autonomy_tier.as_str(),
833        after_bundle.policy.autonomy_tier.as_str(),
834    ) {
835        (before_tier, after_tier) if autonomy_rank(after_tier) > autonomy_rank(before_tier) => {
836            Some(RaisedAutonomyTier {
837                from: before_tier.to_string(),
838                to: after_tier.to_string(),
839            })
840        }
841        _ => None,
842    };
843
844    let widening = match parent {
845        Some(parent) => collect_ceiling_violations(
846            parent,
847            &after,
848            &added_connector_scopes,
849            &added_command_gates,
850            raised_autonomy_tier.as_ref(),
851        ),
852        None => Vec::new(),
853    };
854
855    WorkflowPatchCapabilityDelta {
856        before,
857        after,
858        parent: parent.cloned(),
859        added_tools,
860        added_capabilities,
861        raised_side_effect_level,
862        added_workspace_roots,
863        added_connector_scopes,
864        added_command_gates,
865        raised_autonomy_tier,
866        widening,
867    }
868}
869
870fn collect_ceiling_violations(
871    parent: &CapabilityPolicy,
872    requested: &CapabilityPolicy,
873    added_connector_scopes: &BTreeMap<String, Vec<String>>,
874    added_command_gates: &[String],
875    raised_autonomy_tier: Option<&RaisedAutonomyTier>,
876) -> Vec<CapabilityCeilingViolation> {
877    let mut violations = Vec::new();
878    if !parent.tools.is_empty() {
879        for tool in &requested.tools {
880            if !parent.tools.contains(tool) {
881                violations.push(CapabilityCeilingViolation {
882                    kind: "tool".to_string(),
883                    detail: format!("tool '{tool}' is not in parent tool ceiling"),
884                });
885            }
886        }
887    }
888    for (capability, ops) in &requested.capabilities {
889        match parent.capabilities.get(capability) {
890            Some(parent_ops) => {
891                for op in ops {
892                    if !parent_ops.contains(op) {
893                        violations.push(CapabilityCeilingViolation {
894                            kind: "capability".to_string(),
895                            detail: format!(
896                                "capability '{capability}.{op}' exceeds parent ceiling"
897                            ),
898                        });
899                    }
900                }
901            }
902            None if !parent.capabilities.is_empty() => {
903                violations.push(CapabilityCeilingViolation {
904                    kind: "capability".to_string(),
905                    detail: format!("capability '{capability}' is not in parent ceiling"),
906                });
907            }
908            _ => {}
909        }
910    }
911    if let (Some(parent_level), Some(requested_level)) = (
912        parent.side_effect_level.as_deref(),
913        requested.side_effect_level.as_deref(),
914    ) {
915        if side_effect_rank(requested_level) > side_effect_rank(parent_level) {
916            violations.push(CapabilityCeilingViolation {
917                kind: "side_effect_level".to_string(),
918                detail: format!(
919                    "side_effect_level '{requested_level}' exceeds parent ceiling '{parent_level}'"
920                ),
921            });
922        }
923    }
924    if !parent.workspace_roots.is_empty() {
925        for root in &requested.workspace_roots {
926            if !parent.workspace_roots.contains(root) {
927                violations.push(CapabilityCeilingViolation {
928                    kind: "workspace_root".to_string(),
929                    detail: format!("workspace_root '{root}' exceeds parent allowlist"),
930                });
931            }
932        }
933    }
934    if !added_connector_scopes.is_empty() {
935        let parent_allows_connector_calls = parent
936            .capabilities
937            .get("connector")
938            .is_some_and(|ops| ops.iter().any(|op| op == "call"));
939        if !parent_allows_connector_calls && !parent.capabilities.is_empty() {
940            for (connector_id, scopes) in added_connector_scopes {
941                violations.push(CapabilityCeilingViolation {
942                    kind: "connector_scope".to_string(),
943                    detail: format!(
944                        "connector '{connector_id}' adds scopes {scopes:?} but parent ceiling does not include connector.call"
945                    ),
946                });
947            }
948        }
949    }
950    if !added_command_gates.is_empty() {
951        let parent_allows_exec = parent
952            .capabilities
953            .get("process")
954            .is_some_and(|ops| ops.iter().any(|op| op == "exec"));
955        if !parent_allows_exec && !parent.capabilities.is_empty() {
956            violations.push(CapabilityCeilingViolation {
957                kind: "command_gate".to_string(),
958                detail: format!(
959                    "patch adds command gates {added_command_gates:?} but parent ceiling does not include process.exec"
960                ),
961            });
962        }
963    }
964    if let Some(raised) = raised_autonomy_tier {
965        violations.push(CapabilityCeilingViolation {
966            kind: "autonomy_tier".to_string(),
967            detail: format!(
968                "autonomy_tier raised from '{}' to '{}' — patches must not widen autonomy",
969                raised.from, raised.to
970            ),
971        });
972    }
973    violations
974}
975
976fn side_effect_rank(level: &str) -> usize {
977    match level {
978        "none" => 0,
979        "read_only" => 1,
980        "workspace_write" => 2,
981        "process_exec" => 3,
982        "network" => 4,
983        _ => 5,
984    }
985}
986
987fn static_side_effect(level: &str) -> &'static str {
988    match level {
989        "none" => "none",
990        "read_only" => "read_only",
991        "workspace_write" => "workspace_write",
992        "process_exec" => "process_exec",
993        "network" => "network",
994        _ => "none",
995    }
996}
997
998fn autonomy_rank(tier: &str) -> usize {
999    match tier {
1000        "shadow" => 0,
1001        "suggest" => 1,
1002        "act_with_approval" => 2,
1003        "act_auto" => 3,
1004        _ => 0,
1005    }
1006}
1007
1008fn autonomy_side_effect_floor(tier: &str) -> Option<&'static str> {
1009    match tier {
1010        "act_auto" => Some("network"),
1011        "act_with_approval" => Some("process_exec"),
1012        "suggest" => Some("read_only"),
1013        _ => None,
1014    }
1015}
1016
1017fn diagnostic_op(
1018    index: usize,
1019    op: &str,
1020    path: String,
1021    message: String,
1022    node_id: Option<String>,
1023) -> WorkflowPatchDiagnostic {
1024    WorkflowPatchDiagnostic {
1025        severity: "error".to_string(),
1026        op_index: Some(index),
1027        op: Some(op.to_string()),
1028        path,
1029        message,
1030        node_id,
1031    }
1032}
1033
1034fn diagnostic_global(message: String) -> WorkflowPatchDiagnostic {
1035    WorkflowPatchDiagnostic {
1036        severity: "error".to_string(),
1037        op_index: None,
1038        op: None,
1039        path: "patch".to_string(),
1040        message,
1041        node_id: None,
1042    }
1043}
1044
1045#[cfg(test)]
1046#[path = "workflow_patch_tests.rs"]
1047mod workflow_patch_tests;