Skip to main content

apimock_config/
workspace.rs

1//! The editable workspace: loaded TOML + stable node IDs + edit API.
2//!
3//! # Role in the 5.1 design
4//!
5//! A GUI never touches `Config` or `RuleSet` directly. It holds a
6//! `Workspace` value, calls `snapshot()` to get a read-only view for
7//! rendering, and `apply(EditCommand)` to mutate. Later, `save()`
8//! writes changes back to disk.
9//!
10//! # Stage breakdown
11//!
12//! 5.1.0 implements Steps 1–3 of the spec's §12 plan:
13//!
14//! - **Step 1** (this file) — `load` + `snapshot`.
15//! - **Step 2** — `apply` with the full eight-command set.
16//! - **Step 3** — `validate` producing a `ValidationReport`.
17//!
18//! Steps 4 (`save` + diff) and 5 (richer routing snapshot) are planned
19//! for 5.2+.
20//!
21//! # IDs
22//!
23//! Every editable node gets a v4 UUID at load time. IDs are stable
24//! across `apply()` calls within one `Workspace` instance, so GUI
25//! selection survives edits that reorder or rename surrounding nodes.
26//! IDs are *not* stable across fresh `load()` calls — a reload
27//! regenerates the table, which matches the spec §10 "Workspace は
28//! メモリ上に独立インスタンスを持つ" stance.
29
30use std::{
31    collections::HashMap,
32    path::{Path, PathBuf},
33};
34
35use apimock_routing::{RoutingError, RuleSet};
36
37use crate::{
38    Config,
39    error::{ApplyError, ConfigError, SaveError, WorkspaceError},
40    view::{
41        ApplyResult, ConfigFileKind, ConfigFileView, ConfigNodeView, Diagnostic, EditCommand,
42        EditValue, NodeId, NodeKind, NodeValidation, SaveResult, Severity, ValidationIssue,
43        ValidationReport, WorkspaceSnapshot,
44    },
45};
46
47/// Editable view of an apimock workspace.
48///
49/// # Internal layout
50///
51/// The `Workspace` holds the loaded TOML model (as a `Config`) plus
52/// two index maps:
53///
54/// - `id_to_address`: NodeId → where the node lives in `config`.
55/// - `address_to_id`: reverse — used when rebuilding snapshots.
56///
57/// On every `apply()` that could move nodes around (Add / Remove /
58/// Move), these tables are partially rebuilt. Reloading the config
59/// discards them and re-seeds with fresh IDs.
60pub struct Workspace {
61    /// Path this workspace was loaded from.
62    root_path: PathBuf,
63    /// Loaded TOML model. Authoritative source of truth for
64    /// persistence; edits happen through the editable helpers on
65    /// `Workspace` which keep `config` + id tables in sync.
66    config: Config,
67    /// ID index — see struct doc.
68    ids: IdIndex,
69    /// Workspace-scope diagnostics (e.g. load-time warnings). Per-node
70    /// diagnostics live inside each node's `NodeValidation`.
71    diagnostics: Vec<Diagnostic>,
72}
73
74/// Internal index mapping NodeId to an editable node's logical
75/// address.
76///
77/// # Why a separate enum and not a path string
78///
79/// The `apply` layer needs to mutate the underlying config, which is
80/// only safe if the address is a closed, exhaustively-matchable set.
81/// A free-form `"rule_sets[0].rules[2]"` string would force the apply
82/// code to parse at every edit and silently accept nonsense paths.
83#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
84enum NodeAddress {
85    /// The root config (there is exactly one).
86    Root,
87    /// A whole rule set, identified by its index in `service.rule_sets`.
88    RuleSet { rule_set: usize },
89    /// A single rule inside a rule set.
90    Rule { rule_set: usize, rule: usize },
91    /// The `respond` block of a rule.
92    Respond { rule_set: usize, rule: usize },
93    /// A middleware file reference, by its index in
94    /// `service.middlewares_file_paths`.
95    Middleware { middleware: usize },
96    /// The fallback respond dir. Singleton — there is one per workspace.
97    FallbackRespondDir,
98}
99
100#[derive(Default)]
101struct IdIndex {
102    id_to_address: HashMap<NodeId, NodeAddress>,
103    address_to_id: HashMap<NodeAddress, NodeId>,
104}
105
106impl IdIndex {
107    fn insert(&mut self, address: NodeAddress) -> NodeId {
108        if let Some(&id) = self.address_to_id.get(&address) {
109            return id;
110        }
111        let id = NodeId::new();
112        self.id_to_address.insert(id, address);
113        self.address_to_id.insert(address, id);
114        id
115    }
116
117    /// Lookup a NodeAddress by id. Used by the apply layer in Step 2.
118    #[allow(dead_code)]
119    fn lookup(&self, id: NodeId) -> Option<NodeAddress> {
120        self.id_to_address.get(&id).copied()
121    }
122
123    fn id_for(&self, address: NodeAddress) -> Option<NodeId> {
124        self.address_to_id.get(&address).copied()
125    }
126}
127
128impl Workspace {
129    /// Load a workspace rooted at the given `apimock.toml`-like path.
130    ///
131    /// Accepts either a direct path to the config file or the
132    /// directory containing one; a missing file-path is searched for
133    /// as `apimock.toml` inside `root`. Mirrors the CLI's existing
134    /// resolution rules.
135    pub fn load(root: PathBuf) -> Result<Self, WorkspaceError> {
136        let resolved = resolve_root(&root)?;
137
138        // Re-use `Config::new` so rule-set loading + validation go
139        // through the same path as the running server. This is
140        // important — the spec's "GUI doesn't break running server
141        // behaviour" invariant (§13) is easiest to guarantee if both
142        // paths share the same loader.
143        let config_path_string = resolved.to_string_lossy().into_owned();
144        let config = Config::new(Some(&config_path_string), None).map_err(WorkspaceError::from)?;
145
146        let mut workspace = Self {
147            root_path: resolved,
148            config,
149            ids: IdIndex::default(),
150            diagnostics: Vec::new(),
151        };
152        workspace.seed_ids();
153        Ok(workspace)
154    }
155
156    /// Assign a fresh NodeId to every editable address in `config`.
157    /// Called from `load` and from any `apply()` path that might
158    /// change the address of existing nodes.
159    ///
160    /// # Why we rebuild rather than patch
161    ///
162    /// `NodeAddress` carries positional indices (`rule_set: usize`).
163    /// When a rule is deleted from the middle of a list, every rule
164    /// after it gets a new index, so its `NodeAddress` changes. The
165    /// GUI's NodeId must *not* change — that's the whole point of
166    /// UUIDs — so this function preserves the existing
167    /// address_to_id mapping where addresses still exist and only
168    /// mints new IDs for genuinely new addresses.
169    ///
170    /// For Step 1 there's nothing to preserve: load is a from-scratch
171    /// operation. Step 2 will call a more careful `reseed_after_edit`.
172    fn seed_ids(&mut self) {
173        // Root is always present.
174        self.ids.insert(NodeAddress::Root);
175
176        // Fallback respond dir is always present — even if the user
177        // hasn't set it, it has a default value.
178        self.ids.insert(NodeAddress::FallbackRespondDir);
179
180        // Rule sets + their rules + respond blocks.
181        for (rs_idx, rule_set) in self.config.service.rule_sets.iter().enumerate() {
182            self.ids.insert(NodeAddress::RuleSet { rule_set: rs_idx });
183            for (rule_idx, _rule) in rule_set.rules.iter().enumerate() {
184                self.ids.insert(NodeAddress::Rule {
185                    rule_set: rs_idx,
186                    rule: rule_idx,
187                });
188                self.ids.insert(NodeAddress::Respond {
189                    rule_set: rs_idx,
190                    rule: rule_idx,
191                });
192            }
193        }
194
195        // Middleware references.
196        if let Some(paths) = self.config.service.middlewares_file_paths.as_ref() {
197            for mw_idx in 0..paths.len() {
198                self.ids
199                    .insert(NodeAddress::Middleware { middleware: mw_idx });
200            }
201        }
202    }
203
204    /// Build a snapshot for GUI rendering.
205    ///
206    /// # Allocation cost
207    ///
208    /// A snapshot fully owns its data (no borrows into the workspace)
209    /// so the GUI can serialise / send / store it without lifetime
210    /// gymnastics. This is O(total editable nodes) allocation per
211    /// call; the GUI should call it once per edit, not once per
212    /// render frame.
213    pub fn snapshot(&self) -> WorkspaceSnapshot {
214        let mut files: Vec<ConfigFileView> = Vec::new();
215
216        // Root file.
217        if let Some(root_nodes) = self.root_file_nodes() {
218            files.push(root_nodes);
219        }
220
221        // Rule-set files.
222        for (rs_idx, rule_set) in self.config.service.rule_sets.iter().enumerate() {
223            files.push(self.rule_set_file_view(rs_idx, rule_set));
224        }
225
226        // Middleware files. We don't introspect them beyond their path
227        // existence; the Rhai AST is a server-side concern.
228        if let Some(paths) = self.config.service.middlewares_file_paths.as_ref() {
229            for (mw_idx, mw_path) in paths.iter().enumerate() {
230                let abs = self.resolve_relative(mw_path);
231                let id = self
232                    .ids
233                    .id_for(NodeAddress::Middleware { middleware: mw_idx })
234                    .expect("middleware id seeded at load");
235                let node = ConfigNodeView {
236                    id,
237                    source_file: abs.clone(),
238                    toml_path: format!("service.middlewares[{}]", mw_idx),
239                    display_name: mw_path.clone(),
240                    kind: NodeKind::Script,
241                    validation: NodeValidation::ok(),
242                };
243                files.push(ConfigFileView {
244                    path: abs.clone(),
245                    display_name: file_basename(&abs),
246                    kind: ConfigFileKind::Middleware,
247                    nodes: vec![node],
248                });
249            }
250        }
251
252        // Route catalog — placeholder for Step 5. Currently an empty
253        // snapshot; stage-2 of routing will populate.
254        let routes = apimock_routing::view::RouteCatalogSnapshot::empty();
255
256        WorkspaceSnapshot {
257            files,
258            routes,
259            diagnostics: self.diagnostics.clone(),
260        }
261    }
262
263    /// Apply one edit command, mutating the in-memory workspace.
264    ///
265    /// # Shape of the implementation
266    ///
267    /// Each `EditCommand` variant maps to a small helper method. The
268    /// helpers return `Result<Vec<NodeId>, ApplyError>`; `apply` wraps
269    /// the ok-path in an `ApplyResult` with the right `requires_reload`
270    /// flag and reruns validation so the result carries up-to-date
271    /// diagnostics.
272    ///
273    /// # ID stability on structural changes
274    ///
275    /// Commands that change positional layout (Remove / Delete / Move
276    /// / Add) touch `self.ids` carefully so NodeIds that refer to the
277    /// *same logical node* survive the operation. For example, after
278    /// `RemoveRuleSet { id }` at index `i`, rule sets at positions
279    /// `i+1..` shift down by one: the code below explicitly migrates
280    /// their IDs so a GUI that selected rule-set #3 before the edit
281    /// still has the same ID pointing at what is now rule-set #2.
282    pub fn apply(&mut self, cmd: EditCommand) -> Result<ApplyResult, ApplyError> {
283        let (changed_nodes, requires_reload) = match cmd {
284            EditCommand::AddRuleSet { path } => {
285                let ids = self.cmd_add_rule_set(path)?;
286                (ids, true)
287            }
288            EditCommand::RemoveRuleSet { id } => {
289                let ids = self.cmd_remove_rule_set(id)?;
290                (ids, true)
291            }
292            EditCommand::AddRule { parent, rule } => {
293                let ids = self.cmd_add_rule(parent, rule)?;
294                (ids, true)
295            }
296            EditCommand::UpdateRule { id, rule } => {
297                let ids = self.cmd_update_rule(id, rule)?;
298                (ids, true)
299            }
300            EditCommand::DeleteRule { id } => {
301                let ids = self.cmd_delete_rule(id)?;
302                (ids, true)
303            }
304            EditCommand::MoveRule { id, new_index } => {
305                let ids = self.cmd_move_rule(id, new_index)?;
306                (ids, true)
307            }
308            EditCommand::UpdateRespond { id, respond } => {
309                let ids = self.cmd_update_respond(id, respond)?;
310                (ids, true)
311            }
312            EditCommand::UpdateRootSetting { key, value } => {
313                let ids = self.cmd_update_root_setting(key, value)?;
314                // Root settings include listener port / ip, which change
315                // how the listener binds. Those need a full restart, not
316                // just a reload — the caller reads `reload_hint` from
317                // save() for the fine-grained hint; at apply time we
318                // conservatively flag `requires_reload = true`.
319                (ids, true)
320            }
321        };
322
323        // After any mutation, refresh per-node validation so the
324        // `ApplyResult.diagnostics` reflects the new state. This is the
325        // Step-3 piece: validation is now per-node and GUI-ready, not a
326        // bare boolean.
327        let diagnostics = self.collect_diagnostics();
328
329        Ok(ApplyResult {
330            changed_nodes,
331            diagnostics,
332            requires_reload,
333        })
334    }
335
336    // --- Individual command implementations --------------------------
337
338    fn cmd_add_rule_set(&mut self, path: String) -> Result<Vec<NodeId>, ApplyError> {
339        // Resolve the path against the root's parent dir (same
340        // convention as `Config::new`), then load the rule set.
341        let relative_dir = self.config_relative_dir().map_err(internal_path_err)?;
342        let joined = Path::new(&relative_dir).join(&path);
343        let path_str = joined.to_str().ok_or_else(|| ApplyError::InvalidPayload {
344            reason: format!(
345                "path contains non-UTF-8 bytes: {}",
346                joined.to_string_lossy()
347            ),
348        })?;
349
350        let next_idx = self.config.service.rule_sets.len();
351        let new_rule_set = RuleSet::new(path_str, relative_dir.as_str(), next_idx)
352            .map_err(|e| ApplyError::InvalidPayload {
353                reason: format!("failed to load rule set `{}`: {}", path, e),
354            })?;
355
356        // Record the path in service.rule_sets_file_paths too so
357        // `save()` persists the change later.
358        let file_paths = self
359            .config
360            .service
361            .rule_sets_file_paths
362            .get_or_insert_with(Vec::new);
363        file_paths.push(path.clone());
364
365        let new_len = self.config.service.rule_sets.len() + 1;
366        self.config.service.rule_sets.push(new_rule_set);
367
368        // Mint IDs for the new rule set + its rules + responds.
369        let rs_addr = NodeAddress::RuleSet {
370            rule_set: next_idx,
371        };
372        let rs_id = self.ids.insert(rs_addr);
373        let mut changed = vec![rs_id];
374        let new_rs = &self.config.service.rule_sets[next_idx];
375        for rule_idx in 0..new_rs.rules.len() {
376            let r_id = self.ids.insert(NodeAddress::Rule {
377                rule_set: next_idx,
378                rule: rule_idx,
379            });
380            let resp_id = self.ids.insert(NodeAddress::Respond {
381                rule_set: next_idx,
382                rule: rule_idx,
383            });
384            changed.push(r_id);
385            changed.push(resp_id);
386        }
387        // Sanity: new_len is purely informational here, but makes
388        // the invariant explicit to anyone reading the code.
389        debug_assert_eq!(new_len, self.config.service.rule_sets.len());
390
391        Ok(changed)
392    }
393
394    fn cmd_remove_rule_set(&mut self, id: NodeId) -> Result<Vec<NodeId>, ApplyError> {
395        let addr = self.ids.lookup(id).ok_or(ApplyError::UnknownNode { id })?;
396        let NodeAddress::RuleSet { rule_set: idx } = addr else {
397            return Err(ApplyError::WrongNodeKind {
398                id,
399                reason: "expected a rule set id".to_owned(),
400            });
401        };
402
403        let len = self.config.service.rule_sets.len();
404        if idx >= len {
405            return Err(ApplyError::InvalidPayload {
406                reason: format!("rule set index {} out of range (len={})", idx, len),
407            });
408        }
409
410        // Collect IDs that will change: the removed one plus every rule
411        // set (+ rules + responds) whose index shifts down by one.
412        let mut changed: Vec<NodeId> = Vec::new();
413        // the rule-set itself and its internal nodes (removed)
414        changed.push(id);
415        if let Some(removed_rs) = self.config.service.rule_sets.get(idx) {
416            for rule_idx in 0..removed_rs.rules.len() {
417                if let Some(r_id) = self.ids.id_for(NodeAddress::Rule {
418                    rule_set: idx,
419                    rule: rule_idx,
420                }) {
421                    changed.push(r_id);
422                }
423                if let Some(resp_id) = self.ids.id_for(NodeAddress::Respond {
424                    rule_set: idx,
425                    rule: rule_idx,
426                }) {
427                    changed.push(resp_id);
428                }
429            }
430        }
431
432        // Actually remove.
433        self.config.service.rule_sets.remove(idx);
434        if let Some(paths) = self.config.service.rule_sets_file_paths.as_mut() {
435            if idx < paths.len() {
436                paths.remove(idx);
437            }
438        }
439
440        // Migrate IDs: everything at `idx` onwards in the *old* layout
441        // needs its address remapped. The clean approach: gather the
442        // old (address → id) pairs we care about, clear the entries
443        // affected by the shift, re-insert with new addresses.
444        self.shift_rule_sets_down(idx);
445
446        // Every shifted rule set's ID remains valid but its address
447        // has changed; surface those IDs too so the GUI refreshes
448        // their position indicators.
449        for shifted_idx in idx..self.config.service.rule_sets.len() {
450            if let Some(shifted_id) = self
451                .ids
452                .id_for(NodeAddress::RuleSet {
453                    rule_set: shifted_idx,
454                })
455            {
456                if !changed.contains(&shifted_id) {
457                    changed.push(shifted_id);
458                }
459            }
460        }
461
462        Ok(changed)
463    }
464
465    fn cmd_add_rule(
466        &mut self,
467        parent: NodeId,
468        rule_payload: crate::view::RulePayload,
469    ) -> Result<Vec<NodeId>, ApplyError> {
470        let addr = self
471            .ids
472            .lookup(parent)
473            .ok_or(ApplyError::UnknownNode { id: parent })?;
474        let NodeAddress::RuleSet { rule_set: rs_idx } = addr else {
475            return Err(ApplyError::WrongNodeKind {
476                id: parent,
477                reason: "expected a rule set id (parent for AddRule must be a rule set)".to_owned(),
478            });
479        };
480
481        let rule_set = self
482            .config
483            .service
484            .rule_sets
485            .get_mut(rs_idx)
486            .ok_or_else(|| ApplyError::InvalidPayload {
487                reason: format!("rule set index {} out of range", rs_idx),
488            })?;
489
490        let new_rule = build_rule_from_payload(rule_payload, rule_set, rs_idx)?;
491        let new_rule_idx = rule_set.rules.len();
492        rule_set.rules.push(new_rule);
493
494        let r_id = self.ids.insert(NodeAddress::Rule {
495            rule_set: rs_idx,
496            rule: new_rule_idx,
497        });
498        let resp_id = self.ids.insert(NodeAddress::Respond {
499            rule_set: rs_idx,
500            rule: new_rule_idx,
501        });
502        Ok(vec![parent, r_id, resp_id])
503    }
504
505    fn cmd_update_rule(
506        &mut self,
507        id: NodeId,
508        rule_payload: crate::view::RulePayload,
509    ) -> Result<Vec<NodeId>, ApplyError> {
510        let addr = self.ids.lookup(id).ok_or(ApplyError::UnknownNode { id })?;
511        let NodeAddress::Rule {
512            rule_set: rs_idx,
513            rule: rule_idx,
514        } = addr
515        else {
516            return Err(ApplyError::WrongNodeKind {
517                id,
518                reason: "expected a rule id".to_owned(),
519            });
520        };
521
522        let rule_set = self
523            .config
524            .service
525            .rule_sets
526            .get_mut(rs_idx)
527            .ok_or_else(|| ApplyError::InvalidPayload {
528                reason: format!("rule set index {} out of range", rs_idx),
529            })?;
530
531        let new_rule = build_rule_from_payload(rule_payload, rule_set, rs_idx)?;
532        *rule_set
533            .rules
534            .get_mut(rule_idx)
535            .ok_or_else(|| ApplyError::InvalidPayload {
536                reason: format!("rule index {} out of range", rule_idx),
537            })? = new_rule;
538
539        let resp_id = self
540            .ids
541            .id_for(NodeAddress::Respond {
542                rule_set: rs_idx,
543                rule: rule_idx,
544            })
545            .unwrap_or_else(NodeId::new);
546        Ok(vec![id, resp_id])
547    }
548
549    fn cmd_delete_rule(&mut self, id: NodeId) -> Result<Vec<NodeId>, ApplyError> {
550        let addr = self.ids.lookup(id).ok_or(ApplyError::UnknownNode { id })?;
551        let NodeAddress::Rule {
552            rule_set: rs_idx,
553            rule: rule_idx,
554        } = addr
555        else {
556            return Err(ApplyError::WrongNodeKind {
557                id,
558                reason: "expected a rule id".to_owned(),
559            });
560        };
561
562        let rule_set = self
563            .config
564            .service
565            .rule_sets
566            .get_mut(rs_idx)
567            .ok_or_else(|| ApplyError::InvalidPayload {
568                reason: format!("rule set index {} out of range", rs_idx),
569            })?;
570
571        if rule_idx >= rule_set.rules.len() {
572            return Err(ApplyError::InvalidPayload {
573                reason: format!("rule index {} out of range", rule_idx),
574            });
575        }
576
577        // Gather IDs that will change.
578        let mut changed: Vec<NodeId> = Vec::new();
579        changed.push(id);
580        if let Some(resp_id) = self.ids.id_for(NodeAddress::Respond {
581            rule_set: rs_idx,
582            rule: rule_idx,
583        }) {
584            changed.push(resp_id);
585        }
586
587        rule_set.rules.remove(rule_idx);
588        self.shift_rules_down(rs_idx, rule_idx);
589
590        // Shifted rules' ids change their address but not their identity.
591        let new_rule_count = self.config.service.rule_sets[rs_idx].rules.len();
592        for shifted_idx in rule_idx..new_rule_count {
593            if let Some(r_id) = self.ids.id_for(NodeAddress::Rule {
594                rule_set: rs_idx,
595                rule: shifted_idx,
596            }) {
597                if !changed.contains(&r_id) {
598                    changed.push(r_id);
599                }
600            }
601            if let Some(resp_id) = self.ids.id_for(NodeAddress::Respond {
602                rule_set: rs_idx,
603                rule: shifted_idx,
604            }) {
605                if !changed.contains(&resp_id) {
606                    changed.push(resp_id);
607                }
608            }
609        }
610
611        Ok(changed)
612    }
613
614    fn cmd_move_rule(&mut self, id: NodeId, new_index: usize) -> Result<Vec<NodeId>, ApplyError> {
615        let addr = self.ids.lookup(id).ok_or(ApplyError::UnknownNode { id })?;
616        let NodeAddress::Rule {
617            rule_set: rs_idx,
618            rule: old_idx,
619        } = addr
620        else {
621            return Err(ApplyError::WrongNodeKind {
622                id,
623                reason: "expected a rule id".to_owned(),
624            });
625        };
626
627        let rule_set = self
628            .config
629            .service
630            .rule_sets
631            .get_mut(rs_idx)
632            .ok_or_else(|| ApplyError::InvalidPayload {
633                reason: format!("rule set index {} out of range", rs_idx),
634            })?;
635
636        if old_idx >= rule_set.rules.len() || new_index >= rule_set.rules.len() {
637            return Err(ApplyError::InvalidPayload {
638                reason: format!(
639                    "move out of bounds: old_idx={}, new_index={}, len={}",
640                    old_idx,
641                    new_index,
642                    rule_set.rules.len()
643                ),
644            });
645        }
646        if old_idx == new_index {
647            return Ok(vec![id]);
648        }
649
650        // Do the move in `config`.
651        let rule = rule_set.rules.remove(old_idx);
652        rule_set.rules.insert(new_index, rule);
653
654        // Reshuffle IDs for all rules in this rule set: the simplest
655        // correct approach is to pull out all rule+respond IDs for
656        // this rule-set, reorder them to match the new slice order,
657        // and re-insert.
658        self.reorder_rule_ids(rs_idx, old_idx, new_index);
659
660        // Every rule in [min(old, new) .. max(old, new)] changed address;
661        // report their IDs so the GUI repaints.
662        let lo = old_idx.min(new_index);
663        let hi = old_idx.max(new_index);
664        let mut changed: Vec<NodeId> = Vec::new();
665        for idx in lo..=hi {
666            if let Some(r_id) = self.ids.id_for(NodeAddress::Rule {
667                rule_set: rs_idx,
668                rule: idx,
669            }) {
670                changed.push(r_id);
671            }
672            if let Some(resp_id) = self.ids.id_for(NodeAddress::Respond {
673                rule_set: rs_idx,
674                rule: idx,
675            }) {
676                changed.push(resp_id);
677            }
678        }
679        Ok(changed)
680    }
681
682    fn cmd_update_respond(
683        &mut self,
684        id: NodeId,
685        respond: crate::view::RespondPayload,
686    ) -> Result<Vec<NodeId>, ApplyError> {
687        let addr = self.ids.lookup(id).ok_or(ApplyError::UnknownNode { id })?;
688        let NodeAddress::Respond {
689            rule_set: rs_idx,
690            rule: rule_idx,
691        } = addr
692        else {
693            return Err(ApplyError::WrongNodeKind {
694                id,
695                reason: "expected a respond id".to_owned(),
696            });
697        };
698
699        let rule = self
700            .config
701            .service
702            .rule_sets
703            .get_mut(rs_idx)
704            .and_then(|rs| rs.rules.get_mut(rule_idx))
705            .ok_or_else(|| ApplyError::InvalidPayload {
706                reason: format!(
707                    "rule at rule_set={}, rule={} not found",
708                    rs_idx, rule_idx
709                ),
710            })?;
711
712        rule.respond = build_respond_from_payload(respond);
713
714        // Re-run status-code derivation so the updated `status` field
715        // has its matching `StatusCode` stored.
716        let rule_set = &self.config.service.rule_sets[rs_idx];
717        let derived = rule_set.rules[rule_idx].compute_derived_fields(rule_set, rule_idx, rs_idx);
718        self.config.service.rule_sets[rs_idx].rules[rule_idx] = derived;
719
720        Ok(vec![id])
721    }
722
723    fn cmd_update_root_setting(
724        &mut self,
725        key: crate::view::RootSettingKey,
726        value: EditValue,
727    ) -> Result<Vec<NodeId>, ApplyError> {
728        use crate::view::RootSettingKey::*;
729
730        match key {
731            ListenerIpAddress => {
732                let s = value_as_string(&value)?;
733                let listener = self.config.listener.get_or_insert_with(Default::default);
734                listener.ip_address = s;
735            }
736            ListenerPort => {
737                let n = value_as_integer(&value)?;
738                if !(0..=u16::MAX as i64).contains(&n) {
739                    return Err(ApplyError::InvalidPayload {
740                        reason: format!("port {} not in 0..=65535", n),
741                    });
742                }
743                let listener = self.config.listener.get_or_insert_with(Default::default);
744                listener.port = n as u16;
745            }
746            ServiceFallbackRespondDir => {
747                let s = value_as_string(&value)?;
748                self.config.service.fallback_respond_dir = s;
749            }
750            ServiceStrategy => {
751                let s = value_as_string(&value)?;
752                // The only recognised strategy value today is
753                // "first_match". Anything else is rejected — if future
754                // strategies are added, extend this match.
755                match s.as_str() {
756                    "first_match" => {
757                        self.config.service.strategy =
758                            Some(apimock_routing::Strategy::FirstMatch);
759                    }
760                    other => {
761                        return Err(ApplyError::InvalidPayload {
762                            reason: format!("unknown strategy: {}", other),
763                        });
764                    }
765                }
766            }
767        }
768
769        // Root is a singleton; its NodeId is always the same.
770        let id = self
771            .ids
772            .id_for(NodeAddress::Root)
773            .expect("root id seeded at load");
774        Ok(vec![id])
775    }
776
777    // --- Shared helpers ----------------------------------------------
778
779    /// After a rule set is removed at `removed_idx`, migrate every ID
780    /// whose address referenced a later rule set to its new index.
781    fn shift_rule_sets_down(&mut self, removed_idx: usize) {
782        // Walk current layout (after removal). For each surviving
783        // rule_set at new index `new_idx`, the *old* index was
784        // `new_idx` if `new_idx < removed_idx` (no shift needed) or
785        // `new_idx + 1` if `new_idx >= removed_idx` (it shifted down).
786        // We rebuild mappings only for the shifted half.
787        let new_rs_count = self.config.service.rule_sets.len();
788
789        // First drop any stale ID entries for the removed index and
790        // for everything whose old address will be replaced.
791        // Collect stale (old) addresses first, then update `self.ids`.
792        let mut stale: Vec<NodeAddress> = Vec::new();
793        stale.push(NodeAddress::RuleSet {
794            rule_set: removed_idx,
795        });
796        // The old index range is [removed_idx, new_rs_count+1).
797        for old_idx in removed_idx..new_rs_count + 1 {
798            stale.push(NodeAddress::RuleSet { rule_set: old_idx });
799            // We don't know the old rule counts any more, so we walk
800            // the id index for matches.
801        }
802
803        // Safer approach: pull all entries whose address's rule_set
804        // field is >= removed_idx (both Rule and Respond and RuleSet),
805        // and rebuild them.
806        let mut to_migrate: Vec<(NodeId, NodeAddress)> = Vec::new();
807        for (&addr, &id) in self.ids.address_to_id.iter() {
808            match addr {
809                NodeAddress::RuleSet { rule_set } if rule_set >= removed_idx => {
810                    to_migrate.push((id, addr));
811                }
812                NodeAddress::Rule { rule_set, .. } if rule_set >= removed_idx => {
813                    to_migrate.push((id, addr));
814                }
815                NodeAddress::Respond { rule_set, .. } if rule_set >= removed_idx => {
816                    to_migrate.push((id, addr));
817                }
818                _ => {}
819            }
820        }
821
822        for (id, addr) in &to_migrate {
823            self.ids.address_to_id.remove(addr);
824            self.ids.id_to_address.remove(id);
825        }
826
827        // Re-insert with shifted addresses, skipping anything that
828        // belonged to the removed rule set.
829        for (id, addr) in to_migrate {
830            let new_addr = match addr {
831                NodeAddress::RuleSet { rule_set } => {
832                    if rule_set == removed_idx {
833                        continue; // removed outright
834                    }
835                    NodeAddress::RuleSet {
836                        rule_set: rule_set - 1,
837                    }
838                }
839                NodeAddress::Rule { rule_set, rule } => {
840                    if rule_set == removed_idx {
841                        continue;
842                    }
843                    NodeAddress::Rule {
844                        rule_set: rule_set - 1,
845                        rule,
846                    }
847                }
848                NodeAddress::Respond { rule_set, rule } => {
849                    if rule_set == removed_idx {
850                        continue;
851                    }
852                    NodeAddress::Respond {
853                        rule_set: rule_set - 1,
854                        rule,
855                    }
856                }
857                other => other,
858            };
859            self.ids.id_to_address.insert(id, new_addr);
860            self.ids.address_to_id.insert(new_addr, id);
861        }
862    }
863
864    /// After a rule is deleted from `rule_set_idx` at position
865    /// `removed_rule_idx`, shift IDs for later rules in the same set.
866    fn shift_rules_down(&mut self, rule_set_idx: usize, removed_rule_idx: usize) {
867        let mut to_migrate: Vec<(NodeId, NodeAddress)> = Vec::new();
868        for (&addr, &id) in self.ids.address_to_id.iter() {
869            match addr {
870                NodeAddress::Rule { rule_set, rule }
871                    if rule_set == rule_set_idx && rule >= removed_rule_idx =>
872                {
873                    to_migrate.push((id, addr));
874                }
875                NodeAddress::Respond { rule_set, rule }
876                    if rule_set == rule_set_idx && rule >= removed_rule_idx =>
877                {
878                    to_migrate.push((id, addr));
879                }
880                _ => {}
881            }
882        }
883
884        for (id, addr) in &to_migrate {
885            self.ids.address_to_id.remove(addr);
886            self.ids.id_to_address.remove(id);
887        }
888
889        for (id, addr) in to_migrate {
890            let new_addr = match addr {
891                NodeAddress::Rule { rule_set, rule } => {
892                    if rule == removed_rule_idx {
893                        continue;
894                    }
895                    NodeAddress::Rule {
896                        rule_set,
897                        rule: rule - 1,
898                    }
899                }
900                NodeAddress::Respond { rule_set, rule } => {
901                    if rule == removed_rule_idx {
902                        continue;
903                    }
904                    NodeAddress::Respond {
905                        rule_set,
906                        rule: rule - 1,
907                    }
908                }
909                other => other,
910            };
911            self.ids.id_to_address.insert(id, new_addr);
912            self.ids.address_to_id.insert(new_addr, id);
913        }
914    }
915
916    /// After a rule in `rule_set_idx` moves from `old_idx` to
917    /// `new_idx`, shuffle the IDs of every rule between those indices.
918    fn reorder_rule_ids(&mut self, rule_set_idx: usize, old_idx: usize, new_idx: usize) {
919        // Grab current mapping for all rules in this rule set.
920        let rule_count = self.config.service.rule_sets[rule_set_idx].rules.len();
921        let mut rule_ids: Vec<Option<NodeId>> = (0..rule_count)
922            .map(|r| {
923                self.ids.id_for(NodeAddress::Rule {
924                    rule_set: rule_set_idx,
925                    rule: r,
926                })
927            })
928            .collect();
929        let mut resp_ids: Vec<Option<NodeId>> = (0..rule_count)
930            .map(|r| {
931                self.ids.id_for(NodeAddress::Respond {
932                    rule_set: rule_set_idx,
933                    rule: r,
934                })
935            })
936            .collect();
937
938        // Before the config move, `rule_ids[old_idx]` held the moving
939        // rule's old ID. But the config mutation already happened —
940        // so the id_for lookups above are pre-migration (the ids
941        // didn't change), they simply don't match the new layout yet.
942        // We mimic the same move on `rule_ids`:
943        let moving_r = rule_ids.remove(old_idx);
944        rule_ids.insert(new_idx, moving_r);
945        let moving_resp = resp_ids.remove(old_idx);
946        resp_ids.insert(new_idx, moving_resp);
947
948        // Clear old mappings for these addresses and repopulate.
949        for r in 0..rule_count {
950            let rule_addr = NodeAddress::Rule {
951                rule_set: rule_set_idx,
952                rule: r,
953            };
954            let resp_addr = NodeAddress::Respond {
955                rule_set: rule_set_idx,
956                rule: r,
957            };
958            if let Some(prev_id) = self.ids.address_to_id.remove(&rule_addr) {
959                self.ids.id_to_address.remove(&prev_id);
960            }
961            if let Some(prev_id) = self.ids.address_to_id.remove(&resp_addr) {
962                self.ids.id_to_address.remove(&prev_id);
963            }
964        }
965        for (r, id_opt) in rule_ids.into_iter().enumerate() {
966            let addr = NodeAddress::Rule {
967                rule_set: rule_set_idx,
968                rule: r,
969            };
970            let id = id_opt.unwrap_or_else(NodeId::new);
971            self.ids.id_to_address.insert(id, addr);
972            self.ids.address_to_id.insert(addr, id);
973        }
974        for (r, id_opt) in resp_ids.into_iter().enumerate() {
975            let addr = NodeAddress::Respond {
976                rule_set: rule_set_idx,
977                rule: r,
978            };
979            let id = id_opt.unwrap_or_else(NodeId::new);
980            self.ids.id_to_address.insert(id, addr);
981            self.ids.address_to_id.insert(addr, id);
982        }
983    }
984
985    fn config_relative_dir(&self) -> Result<String, ConfigError> {
986        self.config.current_dir_to_parent_dir_relative_path()
987    }
988
989    /// Walk every node, asking it for its validation state, and return
990    /// the flat list of issues. Used at apply-time and on demand from
991    /// `validate()`.
992    fn collect_diagnostics(&self) -> Vec<Diagnostic> {
993        let mut out: Vec<Diagnostic> = Vec::new();
994        for (rs_idx, rule_set) in self.config.service.rule_sets.iter().enumerate() {
995            for (rule_idx, rule) in rule_set.rules.iter().enumerate() {
996                let nv = respond_node_validation(&rule.respond, rule_set, rule_idx, rs_idx);
997                if nv.ok {
998                    continue;
999                }
1000                let resp_id = self.ids.id_for(NodeAddress::Respond {
1001                    rule_set: rs_idx,
1002                    rule: rule_idx,
1003                });
1004                for issue in nv.issues {
1005                    out.push(Diagnostic {
1006                        node_id: resp_id,
1007                        file: Some(PathBuf::from(rule_set.file_path.as_str())),
1008                        severity: issue.severity,
1009                        message: issue.message,
1010                    });
1011                }
1012            }
1013        }
1014
1015        // Root-level check: fallback_respond_dir must exist.
1016        if !Path::new(self.config.service.fallback_respond_dir.as_str()).exists() {
1017            out.push(Diagnostic {
1018                node_id: self.ids.id_for(NodeAddress::FallbackRespondDir),
1019                file: Some(self.root_path.clone()),
1020                severity: Severity::Error,
1021                message: format!(
1022                    "fallback_respond_dir does not exist: {}",
1023                    self.config.service.fallback_respond_dir
1024                ),
1025            });
1026        }
1027
1028        out
1029    }
1030
1031    // --- Public API ----
1032
1033    /// Validate the workspace and return a GUI-ready report.
1034    ///
1035    /// Uses the same per-node checks `snapshot()` does so the numbers
1036    /// line up: a node rendered with a red underline in the snapshot
1037    /// will appear in `report.diagnostics` with the same message.
1038    pub fn validate(&self) -> ValidationReport {
1039        let diagnostics = self.collect_diagnostics();
1040        let is_valid = !diagnostics
1041            .iter()
1042            .any(|d| matches!(d.severity, Severity::Error));
1043        ValidationReport {
1044            diagnostics,
1045            is_valid,
1046        }
1047    }
1048
1049    /// Save the workspace back to disk. **Step 4 will implement this.**
1050    pub fn save(&mut self) -> Result<SaveResult, SaveError> {
1051        Err(SaveError::Inconsistent {
1052            reason: "Workspace::save is a Step-4 feature; not implemented in 5.1.0"
1053                .to_owned(),
1054        })
1055    }
1056
1057    /// Root config file as a `ConfigFileView`, if it can be rendered.
1058    fn root_file_nodes(&self) -> Option<ConfigFileView> {
1059        let mut nodes = Vec::new();
1060
1061        if let Some(root_id) = self.ids.id_for(NodeAddress::Root) {
1062            nodes.push(ConfigNodeView {
1063                id: root_id,
1064                source_file: self.root_path.clone(),
1065                toml_path: String::new(),
1066                display_name: "apimock.toml".to_owned(),
1067                kind: NodeKind::RootSetting,
1068                validation: NodeValidation::ok(),
1069            });
1070        }
1071
1072        if let Some(fb_id) = self.ids.id_for(NodeAddress::FallbackRespondDir) {
1073            nodes.push(ConfigNodeView {
1074                id: fb_id,
1075                source_file: self.root_path.clone(),
1076                toml_path: "service.fallback_respond_dir".to_owned(),
1077                display_name: self.config.service.fallback_respond_dir.clone(),
1078                kind: NodeKind::FileNode,
1079                validation: NodeValidation::ok(),
1080            });
1081        }
1082
1083        Some(ConfigFileView {
1084            path: self.root_path.clone(),
1085            display_name: file_basename(&self.root_path),
1086            kind: ConfigFileKind::Root,
1087            nodes,
1088        })
1089    }
1090
1091    fn rule_set_file_view(&self, rs_idx: usize, rule_set: &RuleSet) -> ConfigFileView {
1092        let file_path = PathBuf::from(rule_set.file_path.as_str());
1093        let mut nodes: Vec<ConfigNodeView> = Vec::new();
1094
1095        // Rule-set itself.
1096        if let Some(rs_id) = self
1097            .ids
1098            .id_for(NodeAddress::RuleSet { rule_set: rs_idx })
1099        {
1100            nodes.push(ConfigNodeView {
1101                id: rs_id,
1102                source_file: file_path.clone(),
1103                toml_path: String::new(),
1104                display_name: file_basename(&file_path),
1105                kind: NodeKind::RuleSet,
1106                validation: NodeValidation::ok(),
1107            });
1108        }
1109
1110        // Rules inside.
1111        for (rule_idx, rule) in rule_set.rules.iter().enumerate() {
1112            if let Some(rule_id) = self.ids.id_for(NodeAddress::Rule {
1113                rule_set: rs_idx,
1114                rule: rule_idx,
1115            }) {
1116                let url_path_label = rule
1117                    .when
1118                    .request
1119                    .url_path
1120                    .as_ref()
1121                    .map(|u| u.value.as_str())
1122                    .unwrap_or_default();
1123                let display = if url_path_label.is_empty() {
1124                    format!("Rule #{}", rule_idx + 1)
1125                } else {
1126                    url_path_label.to_owned()
1127                };
1128                nodes.push(ConfigNodeView {
1129                    id: rule_id,
1130                    source_file: file_path.clone(),
1131                    toml_path: format!("rules[{}]", rule_idx),
1132                    display_name: display,
1133                    kind: NodeKind::Rule,
1134                    validation: NodeValidation::ok(),
1135                });
1136            }
1137
1138            if let Some(resp_id) = self.ids.id_for(NodeAddress::Respond {
1139                rule_set: rs_idx,
1140                rule: rule_idx,
1141            }) {
1142                nodes.push(ConfigNodeView {
1143                    id: resp_id,
1144                    source_file: file_path.clone(),
1145                    toml_path: format!("rules[{}].respond", rule_idx),
1146                    display_name: summarise_respond(&rule.respond),
1147                    kind: NodeKind::Respond,
1148                    validation: respond_node_validation(&rule.respond, rule_set, rule_idx, rs_idx),
1149                });
1150            }
1151        }
1152
1153        ConfigFileView {
1154            path: file_path.clone(),
1155            display_name: file_basename(&file_path),
1156            kind: ConfigFileKind::RuleSet,
1157            nodes,
1158        }
1159    }
1160
1161    fn resolve_relative(&self, rel: &str) -> PathBuf {
1162        match self.config.current_dir_to_parent_dir_relative_path() {
1163            Ok(dir) => Path::new(&dir).join(rel),
1164            Err(_) => PathBuf::from(rel),
1165        }
1166    }
1167
1168    /// Access the underlying `Config`. Intended for embedders that
1169    /// need to build a running `Server` from the same workspace. Edit
1170    /// via `apply()` instead of touching `Config` directly — changes
1171    /// made through this reference are invisible to the ID index.
1172    pub fn config(&self) -> &Config {
1173        &self.config
1174    }
1175
1176    /// Access the root path. Primarily for diagnostics.
1177    pub fn root_path(&self) -> &Path {
1178        &self.root_path
1179    }
1180}
1181
1182/// Collapse a `Respond` into a one-line display label.
1183fn summarise_respond(respond: &apimock_routing::Respond) -> String {
1184    if let Some(p) = respond.file_path.as_ref() {
1185        return format!("file: {}", p);
1186    }
1187    if let Some(t) = respond.text.as_ref() {
1188        const LIMIT: usize = 40;
1189        if t.chars().count() > LIMIT {
1190            let truncated: String = t.chars().take(LIMIT).collect();
1191            return format!("text: {}…", truncated);
1192        }
1193        return format!("text: {}", t);
1194    }
1195    if let Some(s) = respond.status.as_ref() {
1196        return format!("status: {}", s);
1197    }
1198    "(empty)".to_owned()
1199}
1200
1201fn respond_node_validation(
1202    respond: &apimock_routing::Respond,
1203    rule_set: &RuleSet,
1204    rule_idx: usize,
1205    rs_idx: usize,
1206) -> NodeValidation {
1207    // `Respond::validate` logs errors but returns a bool. For 5.1
1208    // per-node validation we want structured messages — so we replicate
1209    // the specific checks here rather than piping through the logger.
1210    let mut issues: Vec<ValidationIssue> = Vec::new();
1211
1212    let any = respond.file_path.is_some() || respond.text.is_some() || respond.status.is_some();
1213    if !any {
1214        issues.push(ValidationIssue {
1215            severity: Severity::Error,
1216            message: "response requires at least one of file_path, text, or status".to_owned(),
1217        });
1218    }
1219    if respond.file_path.is_some() && respond.text.is_some() {
1220        issues.push(ValidationIssue {
1221            severity: Severity::Error,
1222            message: "file_path and text cannot both be set".to_owned(),
1223        });
1224    }
1225    if respond.file_path.is_some() && respond.status.is_some() {
1226        issues.push(ValidationIssue {
1227            severity: Severity::Error,
1228            message: "status cannot be combined with file_path (only with text)".to_owned(),
1229        });
1230    }
1231
1232    // file-existence validation: this is the same behaviour the old
1233    // `Respond::validate(dir_prefix, …)` performed. We don't call it
1234    // directly because it writes to `log::error!`, which would flood
1235    // the console during every GUI snapshot.
1236    if let Some(file_path) = respond.file_path.as_ref() {
1237        let dir_prefix = rule_set.dir_prefix();
1238        let p = Path::new(dir_prefix.as_str()).join(file_path);
1239        if !p.exists() {
1240            issues.push(ValidationIssue {
1241                severity: Severity::Error,
1242                message: format!(
1243                    "file not found: {} (rule #{} in rule set #{})",
1244                    p.to_string_lossy(),
1245                    rule_idx + 1,
1246                    rs_idx + 1,
1247                ),
1248            });
1249        }
1250    }
1251
1252    NodeValidation {
1253        ok: issues.is_empty(),
1254        issues,
1255    }
1256}
1257
1258fn file_basename(path: &Path) -> String {
1259    path.file_name()
1260        .map(|n| n.to_string_lossy().into_owned())
1261        .unwrap_or_else(|| path.to_string_lossy().into_owned())
1262}
1263
1264// --- Payload → model helpers used by the apply layer --------------
1265
1266fn build_rule_from_payload(
1267    payload: crate::view::RulePayload,
1268    rule_set: &apimock_routing::RuleSet,
1269    rs_idx: usize,
1270) -> Result<apimock_routing::Rule, ApplyError> {
1271    use apimock_routing::rule_set::rule::Rule;
1272    use apimock_routing::rule_set::rule::when::When;
1273    use apimock_routing::rule_set::rule::when::request::{
1274        Request, http_method::HttpMethod, url_path::UrlPathConfig,
1275    };
1276
1277    // Build the Request shape from the simple payload. We use the
1278    // simple UrlPath variant (Simple(String)) because the payload's
1279    // url_path is a plain string; the richer variants (op, etc.) are
1280    // out of scope for 5.1 — a GUI can round-trip them once Step-5
1281    // exposes richer form controls.
1282    let url_path_config = payload.url_path.as_ref().map(|s| UrlPathConfig::Simple(s.clone()));
1283
1284    let http_method = match payload.method.as_deref() {
1285        Some("GET") | Some("get") => Some(HttpMethod::Get),
1286        Some("POST") | Some("post") => Some(HttpMethod::Post),
1287        Some("PUT") | Some("put") => Some(HttpMethod::Put),
1288        Some("DELETE") | Some("delete") => Some(HttpMethod::Delete),
1289        Some(other) => {
1290            return Err(ApplyError::InvalidPayload {
1291                reason: format!(
1292                    "unsupported HTTP method `{}` — supported: GET, POST, PUT, DELETE",
1293                    other
1294                ),
1295            });
1296        }
1297        None => None,
1298    };
1299
1300    let request = Request {
1301        url_path_config,
1302        url_path: None, // derived below
1303        http_method,
1304        headers: None,
1305        body: None,
1306    };
1307
1308    let rule = Rule {
1309        when: When { request },
1310        respond: build_respond_from_payload(payload.respond),
1311    };
1312
1313    // compute_derived_fields normalises the URL path with the rule
1314    // set's prefix and validates the status code. Running it here means
1315    // the freshly-created rule is ready for matching without a second
1316    // pass.
1317    //
1318    // `rule_idx` at this point is whatever position the rule will
1319    // occupy after being pushed — use `rule_set.rules.len()` because
1320    // the push happens immediately after.
1321    Ok(rule.compute_derived_fields(rule_set, rule_set.rules.len(), rs_idx))
1322}
1323
1324fn build_respond_from_payload(payload: crate::view::RespondPayload) -> apimock_routing::Respond {
1325    apimock_routing::Respond {
1326        file_path: payload.file_path,
1327        csv_records_key: None,
1328        text: payload.text,
1329        status: payload.status,
1330        status_code: None, // derived later
1331        headers: None,
1332        delay_response_milliseconds: payload.delay_milliseconds,
1333    }
1334}
1335
1336fn value_as_string(value: &EditValue) -> Result<String, ApplyError> {
1337    match value {
1338        EditValue::String(s) => Ok(s.clone()),
1339        EditValue::Enum(s) => Ok(s.clone()),
1340        other => Err(ApplyError::InvalidPayload {
1341            reason: format!("expected a string, got {:?}", other),
1342        }),
1343    }
1344}
1345
1346fn value_as_integer(value: &EditValue) -> Result<i64, ApplyError> {
1347    match value {
1348        EditValue::Integer(n) => Ok(*n),
1349        other => Err(ApplyError::InvalidPayload {
1350            reason: format!("expected an integer, got {:?}", other),
1351        }),
1352    }
1353}
1354
1355/// Wrap a ConfigError produced inside an apply command as an
1356/// `ApplyError::InvalidPayload`. Apply uses anyhow-ish flattening
1357/// because the caller doesn't care whether the root cause was a
1358/// read-fail or a parse-fail — they all surface as "edit couldn't
1359/// be applied" from the GUI's point of view.
1360fn internal_path_err(err: ConfigError) -> ApplyError {
1361    ApplyError::InvalidPayload {
1362        reason: format!("internal path resolution failed: {}", err),
1363    }
1364}
1365
1366fn resolve_root(root: &Path) -> Result<PathBuf, WorkspaceError> {
1367    if root.is_file() {
1368        return Ok(root.to_path_buf());
1369    }
1370    if root.is_dir() {
1371        let candidate = root.join("apimock.toml");
1372        if candidate.is_file() {
1373            return Ok(candidate);
1374        }
1375        return Err(WorkspaceError::InvalidRoot {
1376            path: root.to_path_buf(),
1377            reason: "directory does not contain apimock.toml".to_owned(),
1378        });
1379    }
1380    Err(WorkspaceError::InvalidRoot {
1381        path: root.to_path_buf(),
1382        reason: "path does not exist".to_owned(),
1383    })
1384}
1385
1386// Convert a raw `RoutingError` sneaked into the load path; normally
1387// `ConfigError` wraps it, but the explicit conversion keeps the
1388// apply-layer clean when it needs to materialise one.
1389#[allow(dead_code)]
1390fn routing_to_config(err: RoutingError) -> ConfigError {
1391    ConfigError::from(err)
1392}
1393
1394#[cfg(test)]
1395mod tests;