Skip to main content

ito_core/
backend_coordination.rs

1//! Backend coordination use-cases for CLI commands.
2//!
3//! Provides the business logic for claim, release, allocate, and sync
4//! operations that the CLI adapter calls. Each function accepts trait
5//! objects for the backend clients so the CLI can inject the concrete
6//! implementation.
7
8use std::path::Path;
9
10use ito_domain::backend::{
11    AllocateResult, ArchiveResult, ArtifactBundle, BackendArchiveClient, BackendLeaseClient,
12    BackendSyncClient, ClaimResult, PushResult, ReleaseResult,
13};
14use ito_domain::modules::ModuleRepository as DomainModuleRepository;
15
16use crate::backend_sync::map_backend_error;
17use crate::errors::{CoreError, CoreResult};
18
19/// Claim a change lease through the backend.
20pub fn claim_change(
21    lease_client: &dyn BackendLeaseClient,
22    change_id: &str,
23) -> CoreResult<ClaimResult> {
24    lease_client
25        .claim(change_id)
26        .map_err(|e| map_backend_error(e, "claim"))
27}
28
29/// Release a change lease through the backend.
30pub fn release_change(
31    lease_client: &dyn BackendLeaseClient,
32    change_id: &str,
33) -> CoreResult<ReleaseResult> {
34    lease_client
35        .release(change_id)
36        .map_err(|e| map_backend_error(e, "release"))
37}
38
39/// Allocate the next available change through the backend.
40pub fn allocate_change(lease_client: &dyn BackendLeaseClient) -> CoreResult<AllocateResult> {
41    lease_client
42        .allocate()
43        .map_err(|e| map_backend_error(e, "allocate"))
44}
45
46/// Pull artifacts from the backend for a change.
47pub fn sync_pull(
48    sync_client: &dyn BackendSyncClient,
49    ito_path: &std::path::Path,
50    change_id: &str,
51    backup_dir: &std::path::Path,
52) -> CoreResult<ArtifactBundle> {
53    crate::backend_sync::pull_artifacts(sync_client, ito_path, change_id, backup_dir)
54}
55
56/// Push local artifacts to the backend for a change.
57pub fn sync_push(
58    sync_client: &dyn BackendSyncClient,
59    ito_path: &std::path::Path,
60    change_id: &str,
61    backup_dir: &std::path::Path,
62) -> CoreResult<PushResult> {
63    crate::backend_sync::push_artifacts(sync_client, ito_path, change_id, backup_dir)
64}
65
66/// Result of a backend-mode archive orchestration.
67#[derive(Debug)]
68pub struct BackendArchiveOutcome {
69    /// Spec IDs that were copied to the main specs tree.
70    pub specs_updated: Vec<String>,
71    /// The archive folder name (date-prefixed).
72    pub archive_name: String,
73    /// Backend archive result with timestamp.
74    pub backend_result: ArchiveResult,
75}
76
77/// Orchestrate a backend-mode archive for a change.
78///
79/// The flow is:
80/// 1. Pull the final artifact bundle from the backend.
81/// 2. Copy spec deltas to the main specs tree (unless `skip_specs`).
82/// 3. Move the change to the archive directory.
83/// 4. Mark the change as archived on the backend.
84///
85/// If step 4 fails, the local archive is already committed — the caller
86/// should report the backend error but NOT roll back the local archive
87/// (the local state is correct; the backend can be retried).
88pub fn archive_with_backend(
89    sync_client: &dyn BackendSyncClient,
90    archive_client: &dyn BackendArchiveClient,
91    module_repo: &impl DomainModuleRepository,
92    ito_path: &Path,
93    change_id: &str,
94    backup_dir: &Path,
95    skip_specs: bool,
96) -> CoreResult<BackendArchiveOutcome> {
97    // Step 1: Pull final artifacts from backend
98    crate::backend_sync::pull_artifacts(sync_client, ito_path, change_id, backup_dir)?;
99
100    // Step 2: Copy spec deltas to main specs tree
101    let specs_updated = if skip_specs {
102        Vec::new()
103    } else {
104        let spec_names = crate::archive::discover_change_specs(ito_path, change_id)?;
105        crate::archive::copy_specs_to_main(ito_path, change_id, &spec_names)?
106    };
107
108    // Step 3: Move to archive
109    let archive_name = crate::archive::generate_archive_name(change_id);
110    crate::archive::move_to_archive(module_repo, ito_path, change_id, &archive_name)?;
111
112    // Step 4: Mark archived on backend
113    let backend_result = archive_client
114        .mark_archived(change_id)
115        .map_err(|e| map_backend_error(e, "archive"))?;
116
117    Ok(BackendArchiveOutcome {
118        specs_updated,
119        archive_name,
120        backend_result,
121    })
122}
123
124/// Check whether the given `CoreError` represents a backend availability failure.
125///
126/// The CLI can use this to suggest fallback to filesystem mode.
127pub fn is_backend_unavailable(err: &CoreError) -> bool {
128    match err {
129        CoreError::Process(msg) => msg.contains("Backend unavailable"),
130        _ => false,
131    }
132}
133
134#[cfg(test)]
135mod tests {
136    use super::*;
137    use ito_domain::backend::{
138        ArchiveResult, BackendError, ClaimResult, LeaseConflict, ReleaseResult,
139    };
140
141    struct FakeLeaseClient {
142        claim_result: Result<ClaimResult, BackendError>,
143        release_result: Result<ReleaseResult, BackendError>,
144        allocate_result: Result<AllocateResult, BackendError>,
145    }
146
147    impl FakeLeaseClient {
148        fn success() -> Self {
149            Self {
150                claim_result: Ok(ClaimResult {
151                    change_id: "test".to_string(),
152                    holder: "me".to_string(),
153                    expires_at: None,
154                }),
155                release_result: Ok(ReleaseResult {
156                    change_id: "test".to_string(),
157                }),
158                allocate_result: Ok(AllocateResult {
159                    claim: Some(ClaimResult {
160                        change_id: "test".to_string(),
161                        holder: "me".to_string(),
162                        expires_at: None,
163                    }),
164                }),
165            }
166        }
167
168        fn conflict() -> Self {
169            Self {
170                claim_result: Err(BackendError::LeaseConflict(LeaseConflict {
171                    change_id: "test".to_string(),
172                    holder: "other".to_string(),
173                    expires_at: None,
174                })),
175                release_result: Ok(ReleaseResult {
176                    change_id: "test".to_string(),
177                }),
178                allocate_result: Ok(AllocateResult { claim: None }),
179            }
180        }
181    }
182
183    impl BackendLeaseClient for FakeLeaseClient {
184        fn claim(&self, _change_id: &str) -> Result<ClaimResult, BackendError> {
185            self.claim_result.clone()
186        }
187
188        fn release(&self, _change_id: &str) -> Result<ReleaseResult, BackendError> {
189            self.release_result.clone()
190        }
191
192        fn allocate(&self) -> Result<AllocateResult, BackendError> {
193            self.allocate_result.clone()
194        }
195    }
196
197    #[test]
198    fn claim_success() {
199        let client = FakeLeaseClient::success();
200        let result = claim_change(&client, "test").unwrap();
201        assert_eq!(result.change_id, "test");
202        assert_eq!(result.holder, "me");
203    }
204
205    #[test]
206    fn claim_conflict() {
207        let client = FakeLeaseClient::conflict();
208        let err = claim_change(&client, "test").unwrap_err();
209        let msg = err.to_string();
210        assert!(msg.contains("Lease conflict"), "msg: {msg}");
211        assert!(msg.contains("other"), "msg: {msg}");
212    }
213
214    #[test]
215    fn release_success() {
216        let client = FakeLeaseClient::success();
217        let result = release_change(&client, "test").unwrap();
218        assert_eq!(result.change_id, "test");
219    }
220
221    #[test]
222    fn allocate_with_work() {
223        let client = FakeLeaseClient::success();
224        let result = allocate_change(&client).unwrap();
225        assert!(result.claim.is_some());
226        assert_eq!(result.claim.unwrap().change_id, "test");
227    }
228
229    #[test]
230    fn allocate_no_work() {
231        let client = FakeLeaseClient::conflict();
232        let result = allocate_change(&client).unwrap();
233        assert!(result.claim.is_none());
234    }
235
236    #[test]
237    fn is_backend_unavailable_detects_process_error() {
238        let err = CoreError::process("Backend unavailable during pull: timeout");
239        assert!(is_backend_unavailable(&err));
240
241        let err = CoreError::validation("some other error");
242        assert!(!is_backend_unavailable(&err));
243    }
244
245    // ── Archive orchestration tests ────────────────────────────────
246
247    use ito_domain::backend::BackendSyncClient;
248    use std::cell::Cell;
249
250    struct FakeSyncClient {
251        bundle: ArtifactBundle,
252    }
253
254    impl FakeSyncClient {
255        fn new(change_id: &str) -> Self {
256            Self {
257                bundle: ArtifactBundle {
258                    change_id: change_id.to_string(),
259                    proposal: Some("# Proposal\nTest content".to_string()),
260                    design: None,
261                    tasks: Some("- [x] Task 1\n".to_string()),
262                    specs: vec![(
263                        "test-cap".to_string(),
264                        "## ADDED Requirements\n".to_string(),
265                    )],
266                    revision: "rev-final".to_string(),
267                },
268            }
269        }
270    }
271
272    impl BackendSyncClient for FakeSyncClient {
273        fn pull(&self, _change_id: &str) -> Result<ArtifactBundle, BackendError> {
274            Ok(self.bundle.clone())
275        }
276
277        fn push(
278            &self,
279            _change_id: &str,
280            _bundle: &ArtifactBundle,
281        ) -> Result<PushResult, BackendError> {
282            Ok(PushResult {
283                change_id: self.bundle.change_id.clone(),
284                new_revision: "rev-new".to_string(),
285            })
286        }
287    }
288
289    struct FakeArchiveClient {
290        should_fail: bool,
291        call_count: Cell<usize>,
292    }
293
294    impl FakeArchiveClient {
295        fn success() -> Self {
296            Self {
297                should_fail: false,
298                call_count: Cell::new(0),
299            }
300        }
301
302        fn failing() -> Self {
303            Self {
304                should_fail: true,
305                call_count: Cell::new(0),
306            }
307        }
308
309        fn calls(&self) -> usize {
310            self.call_count.get()
311        }
312    }
313
314    impl BackendArchiveClient for FakeArchiveClient {
315        fn mark_archived(&self, change_id: &str) -> Result<ArchiveResult, BackendError> {
316            self.call_count.set(self.call_count.get() + 1);
317            if self.should_fail {
318                return Err(BackendError::Unavailable(
319                    "backend offline during archive".to_string(),
320                ));
321            }
322            Ok(ArchiveResult {
323                change_id: change_id.to_string(),
324                archived_at: "2026-02-28T12:00:00Z".to_string(),
325            })
326        }
327    }
328
329    struct FakeModuleRepo;
330
331    impl ito_domain::modules::ModuleRepository for FakeModuleRepo {
332        fn list(
333            &self,
334        ) -> ito_domain::errors::DomainResult<Vec<ito_domain::modules::ModuleSummary>> {
335            Ok(Vec::new())
336        }
337
338        fn get(&self, _id: &str) -> ito_domain::errors::DomainResult<ito_domain::modules::Module> {
339            Err(ito_domain::errors::DomainError::not_found("module", "none"))
340        }
341
342        fn exists(&self, _id: &str) -> bool {
343            false
344        }
345    }
346
347    fn setup_change_on_disk(ito_path: &std::path::Path, change_id: &str) {
348        let change_dir = ito_path.join("changes").join(change_id);
349        std::fs::create_dir_all(change_dir.join("specs/test-cap")).unwrap();
350        std::fs::write(change_dir.join("proposal.md"), "# Proposal").unwrap();
351        std::fs::write(change_dir.join("tasks.md"), "- [x] Done").unwrap();
352        std::fs::write(
353            change_dir.join("specs/test-cap/spec.md"),
354            "## ADDED Requirements\n",
355        )
356        .unwrap();
357    }
358
359    #[test]
360    fn archive_with_backend_happy_path() {
361        let tmp = tempfile::TempDir::new().unwrap();
362        let ito_path = tmp.path().join(".ito");
363        let backup_dir = tmp.path().join("backups");
364        std::fs::create_dir_all(&ito_path).unwrap();
365
366        let change_id = "test-change";
367        setup_change_on_disk(&ito_path, change_id);
368
369        let sync_client = FakeSyncClient::new(change_id);
370        let archive_client = FakeArchiveClient::success();
371        let module_repo = FakeModuleRepo;
372
373        let outcome = archive_with_backend(
374            &sync_client,
375            &archive_client,
376            &module_repo,
377            &ito_path,
378            change_id,
379            &backup_dir,
380            false,
381        )
382        .unwrap();
383
384        // Verify specs were updated
385        assert_eq!(outcome.specs_updated, vec!["test-cap"]);
386
387        // Verify archive name contains the change id
388        assert!(outcome.archive_name.contains(change_id));
389
390        // Verify backend was called
391        assert_eq!(archive_client.calls(), 1);
392        assert_eq!(outcome.backend_result.change_id, change_id);
393
394        // Verify the change is in the archive directory
395        let archive_dir = ito_path
396            .join("changes")
397            .join("archive")
398            .join(&outcome.archive_name);
399        assert!(archive_dir.exists(), "archive directory should exist");
400
401        // Verify original change dir is gone
402        let original_dir = ito_path.join("changes").join(change_id);
403        assert!(!original_dir.exists(), "original change dir should be gone");
404
405        // Verify spec was copied to main specs tree
406        let main_spec = ito_path.join("specs").join("test-cap").join("spec.md");
407        assert!(main_spec.exists(), "main spec should exist");
408    }
409
410    #[test]
411    fn archive_with_backend_skip_specs() {
412        let tmp = tempfile::TempDir::new().unwrap();
413        let ito_path = tmp.path().join(".ito");
414        let backup_dir = tmp.path().join("backups");
415        std::fs::create_dir_all(&ito_path).unwrap();
416
417        let change_id = "test-change";
418        setup_change_on_disk(&ito_path, change_id);
419
420        let sync_client = FakeSyncClient::new(change_id);
421        let archive_client = FakeArchiveClient::success();
422        let module_repo = FakeModuleRepo;
423
424        let outcome = archive_with_backend(
425            &sync_client,
426            &archive_client,
427            &module_repo,
428            &ito_path,
429            change_id,
430            &backup_dir,
431            true, // skip_specs
432        )
433        .unwrap();
434
435        // Specs should not have been updated
436        assert!(outcome.specs_updated.is_empty());
437
438        // But archive should still succeed
439        let archive_dir = ito_path
440            .join("changes")
441            .join("archive")
442            .join(&outcome.archive_name);
443        assert!(archive_dir.exists());
444
445        // Main spec should NOT exist
446        let main_spec = ito_path.join("specs").join("test-cap").join("spec.md");
447        assert!(
448            !main_spec.exists(),
449            "main spec should not be created when skip_specs is true"
450        );
451    }
452
453    #[test]
454    fn archive_with_backend_backend_unavailable() {
455        let tmp = tempfile::TempDir::new().unwrap();
456        let ito_path = tmp.path().join(".ito");
457        let backup_dir = tmp.path().join("backups");
458        std::fs::create_dir_all(&ito_path).unwrap();
459
460        let change_id = "test-change";
461        setup_change_on_disk(&ito_path, change_id);
462
463        let sync_client = FakeSyncClient::new(change_id);
464        let archive_client = FakeArchiveClient::failing();
465        let module_repo = FakeModuleRepo;
466
467        let err = archive_with_backend(
468            &sync_client,
469            &archive_client,
470            &module_repo,
471            &ito_path,
472            change_id,
473            &backup_dir,
474            false,
475        )
476        .unwrap_err();
477
478        let msg = err.to_string();
479        assert!(
480            msg.contains("Backend unavailable"),
481            "should report backend unavailability: {msg}"
482        );
483
484        // The local archive still happened (the move completed before mark_archived failed)
485        // This is by design — the local state is correct; backend can be retried.
486    }
487}