Skip to main content

ito_core/
backend_coordination.rs

1//! Backend coordination use-cases for CLI commands.
2//!
3//! Provides the business logic for claim, release, allocate, and sync
4//! operations that the CLI adapter calls. Each function accepts trait
5//! objects for the backend clients so the CLI can inject the concrete
6//! implementation.
7
8use std::path::Path;
9
10use ito_domain::backend::{
11    AllocateResult, ArchiveResult, ArtifactBundle, BackendArchiveClient, BackendLeaseClient,
12    BackendSyncClient, ClaimResult, PushResult, ReleaseResult,
13};
14
15use crate::backend_sync::map_backend_error;
16use crate::errors::{CoreError, CoreResult};
17
18/// Claim a change lease through the backend.
19pub fn claim_change(
20    lease_client: &dyn BackendLeaseClient,
21    change_id: &str,
22) -> CoreResult<ClaimResult> {
23    lease_client
24        .claim(change_id)
25        .map_err(|e| map_backend_error(e, "claim"))
26}
27
28/// Release a change lease through the backend.
29pub fn release_change(
30    lease_client: &dyn BackendLeaseClient,
31    change_id: &str,
32) -> CoreResult<ReleaseResult> {
33    lease_client
34        .release(change_id)
35        .map_err(|e| map_backend_error(e, "release"))
36}
37
38/// Allocate the next available change through the backend.
39pub fn allocate_change(lease_client: &dyn BackendLeaseClient) -> CoreResult<AllocateResult> {
40    lease_client
41        .allocate()
42        .map_err(|e| map_backend_error(e, "allocate"))
43}
44
45/// Pull artifacts from the backend for a change.
46pub fn sync_pull(
47    sync_client: &dyn BackendSyncClient,
48    ito_path: &std::path::Path,
49    change_id: &str,
50    backup_dir: &std::path::Path,
51) -> CoreResult<ArtifactBundle> {
52    crate::backend_sync::pull_artifacts(sync_client, ito_path, change_id, backup_dir)
53}
54
55/// Push local artifacts to the backend for a change.
56pub fn sync_push(
57    sync_client: &dyn BackendSyncClient,
58    ito_path: &std::path::Path,
59    change_id: &str,
60    backup_dir: &std::path::Path,
61) -> CoreResult<PushResult> {
62    crate::backend_sync::push_artifacts(sync_client, ito_path, change_id, backup_dir)
63}
64
65/// Result of a backend-mode archive orchestration.
66#[derive(Debug)]
67pub struct BackendArchiveOutcome {
68    /// Spec IDs that were copied to the main specs tree.
69    pub specs_updated: Vec<String>,
70    /// The archive folder name (date-prefixed).
71    pub archive_name: String,
72    /// Backend archive result with timestamp.
73    pub backend_result: ArchiveResult,
74}
75
76/// Orchestrate a backend-mode archive for a change.
77///
78/// The flow is:
79/// 1. Pull the final artifact bundle from the backend.
80/// 2. Copy spec deltas to the main specs tree (unless `skip_specs`).
81/// 3. Move the change to the archive directory.
82/// 4. Mark the change as archived on the backend.
83///
84/// If step 4 fails, the local archive is already committed — the caller
85/// should report the backend error but NOT roll back the local archive
86/// (the local state is correct; the backend can be retried).
87pub fn archive_with_backend(
88    sync_client: &dyn BackendSyncClient,
89    archive_client: &dyn BackendArchiveClient,
90    ito_path: &Path,
91    change_id: &str,
92    backup_dir: &Path,
93    skip_specs: bool,
94) -> CoreResult<BackendArchiveOutcome> {
95    // Step 1: Pull final artifacts from backend
96    crate::backend_sync::pull_artifacts(sync_client, ito_path, change_id, backup_dir)?;
97
98    // Step 2: Copy spec deltas to main specs tree
99    let specs_updated = if skip_specs {
100        Vec::new()
101    } else {
102        let spec_names = crate::archive::discover_change_specs(ito_path, change_id)?;
103        crate::archive::copy_specs_to_main(ito_path, change_id, &spec_names)?
104    };
105
106    // Step 3: Move to archive
107    let archive_name = crate::archive::generate_archive_name(change_id);
108    crate::archive::move_to_archive(ito_path, change_id, &archive_name)?;
109
110    // Step 4: Mark archived on backend
111    let backend_result = archive_client
112        .mark_archived(change_id)
113        .map_err(|e| map_backend_error(e, "archive"))?;
114
115    Ok(BackendArchiveOutcome {
116        specs_updated,
117        archive_name,
118        backend_result,
119    })
120}
121
122/// Check whether the given `CoreError` represents a backend availability failure.
123///
124/// The CLI can use this to suggest fallback to filesystem mode.
125pub fn is_backend_unavailable(err: &CoreError) -> bool {
126    match err {
127        CoreError::Process(msg) => msg.contains("Backend unavailable"),
128        _ => false,
129    }
130}
131
132#[cfg(test)]
133mod tests {
134    use super::*;
135    use ito_domain::backend::{
136        ArchiveResult, BackendError, ClaimResult, LeaseConflict, ReleaseResult,
137    };
138
139    struct FakeLeaseClient {
140        claim_result: Result<ClaimResult, BackendError>,
141        release_result: Result<ReleaseResult, BackendError>,
142        allocate_result: Result<AllocateResult, BackendError>,
143    }
144
145    impl FakeLeaseClient {
146        fn success() -> Self {
147            Self {
148                claim_result: Ok(ClaimResult {
149                    change_id: "test".to_string(),
150                    holder: "me".to_string(),
151                    expires_at: None,
152                }),
153                release_result: Ok(ReleaseResult {
154                    change_id: "test".to_string(),
155                }),
156                allocate_result: Ok(AllocateResult {
157                    claim: Some(ClaimResult {
158                        change_id: "test".to_string(),
159                        holder: "me".to_string(),
160                        expires_at: None,
161                    }),
162                }),
163            }
164        }
165
166        fn conflict() -> Self {
167            Self {
168                claim_result: Err(BackendError::LeaseConflict(LeaseConflict {
169                    change_id: "test".to_string(),
170                    holder: "other".to_string(),
171                    expires_at: None,
172                })),
173                release_result: Ok(ReleaseResult {
174                    change_id: "test".to_string(),
175                }),
176                allocate_result: Ok(AllocateResult { claim: None }),
177            }
178        }
179    }
180
181    impl BackendLeaseClient for FakeLeaseClient {
182        fn claim(&self, _change_id: &str) -> Result<ClaimResult, BackendError> {
183            self.claim_result.clone()
184        }
185
186        fn release(&self, _change_id: &str) -> Result<ReleaseResult, BackendError> {
187            self.release_result.clone()
188        }
189
190        fn allocate(&self) -> Result<AllocateResult, BackendError> {
191            self.allocate_result.clone()
192        }
193    }
194
195    #[test]
196    fn claim_success() {
197        let client = FakeLeaseClient::success();
198        let result = claim_change(&client, "test").unwrap();
199        assert_eq!(result.change_id, "test");
200        assert_eq!(result.holder, "me");
201    }
202
203    #[test]
204    fn claim_conflict() {
205        let client = FakeLeaseClient::conflict();
206        let err = claim_change(&client, "test").unwrap_err();
207        let msg = err.to_string();
208        assert!(msg.contains("Lease conflict"), "msg: {msg}");
209        assert!(msg.contains("other"), "msg: {msg}");
210    }
211
212    #[test]
213    fn release_success() {
214        let client = FakeLeaseClient::success();
215        let result = release_change(&client, "test").unwrap();
216        assert_eq!(result.change_id, "test");
217    }
218
219    #[test]
220    fn allocate_with_work() {
221        let client = FakeLeaseClient::success();
222        let result = allocate_change(&client).unwrap();
223        assert!(result.claim.is_some());
224        assert_eq!(result.claim.unwrap().change_id, "test");
225    }
226
227    #[test]
228    fn allocate_no_work() {
229        let client = FakeLeaseClient::conflict();
230        let result = allocate_change(&client).unwrap();
231        assert!(result.claim.is_none());
232    }
233
234    #[test]
235    fn is_backend_unavailable_detects_process_error() {
236        let err = CoreError::process("Backend unavailable during pull: timeout");
237        assert!(is_backend_unavailable(&err));
238
239        let err = CoreError::validation("some other error");
240        assert!(!is_backend_unavailable(&err));
241    }
242
243    // ── Archive orchestration tests ────────────────────────────────
244
245    use ito_domain::backend::BackendSyncClient;
246    use std::cell::Cell;
247
248    struct FakeSyncClient {
249        bundle: ArtifactBundle,
250    }
251
252    impl FakeSyncClient {
253        fn new(change_id: &str) -> Self {
254            Self {
255                bundle: ArtifactBundle {
256                    change_id: change_id.to_string(),
257                    proposal: Some("# Proposal\nTest content".to_string()),
258                    design: None,
259                    tasks: Some("- [x] Task 1\n".to_string()),
260                    specs: vec![(
261                        "test-cap".to_string(),
262                        "## ADDED Requirements\n".to_string(),
263                    )],
264                    revision: "rev-final".to_string(),
265                },
266            }
267        }
268    }
269
270    impl BackendSyncClient for FakeSyncClient {
271        fn pull(&self, _change_id: &str) -> Result<ArtifactBundle, BackendError> {
272            Ok(self.bundle.clone())
273        }
274
275        fn push(
276            &self,
277            _change_id: &str,
278            _bundle: &ArtifactBundle,
279        ) -> Result<PushResult, BackendError> {
280            Ok(PushResult {
281                change_id: self.bundle.change_id.clone(),
282                new_revision: "rev-new".to_string(),
283            })
284        }
285    }
286
287    struct FakeArchiveClient {
288        should_fail: bool,
289        call_count: Cell<usize>,
290    }
291
292    impl FakeArchiveClient {
293        fn success() -> Self {
294            Self {
295                should_fail: false,
296                call_count: Cell::new(0),
297            }
298        }
299
300        fn failing() -> Self {
301            Self {
302                should_fail: true,
303                call_count: Cell::new(0),
304            }
305        }
306
307        fn calls(&self) -> usize {
308            self.call_count.get()
309        }
310    }
311
312    impl BackendArchiveClient for FakeArchiveClient {
313        fn mark_archived(&self, change_id: &str) -> Result<ArchiveResult, BackendError> {
314            self.call_count.set(self.call_count.get() + 1);
315            if self.should_fail {
316                return Err(BackendError::Unavailable(
317                    "backend offline during archive".to_string(),
318                ));
319            }
320            Ok(ArchiveResult {
321                change_id: change_id.to_string(),
322                archived_at: "2026-02-28T12:00:00Z".to_string(),
323            })
324        }
325    }
326
327    fn setup_change_on_disk(ito_path: &std::path::Path, change_id: &str) {
328        let change_dir = ito_path.join("changes").join(change_id);
329        std::fs::create_dir_all(change_dir.join("specs/test-cap")).unwrap();
330        std::fs::write(change_dir.join("proposal.md"), "# Proposal").unwrap();
331        std::fs::write(change_dir.join("tasks.md"), "- [x] Done").unwrap();
332        std::fs::write(
333            change_dir.join("specs/test-cap/spec.md"),
334            "## ADDED Requirements\n",
335        )
336        .unwrap();
337    }
338
339    #[test]
340    fn archive_with_backend_happy_path() {
341        let tmp = tempfile::TempDir::new().unwrap();
342        let ito_path = tmp.path().join(".ito");
343        let backup_dir = tmp.path().join("backups");
344        std::fs::create_dir_all(&ito_path).unwrap();
345
346        let change_id = "test-change";
347        setup_change_on_disk(&ito_path, change_id);
348
349        let sync_client = FakeSyncClient::new(change_id);
350        let archive_client = FakeArchiveClient::success();
351
352        let outcome = archive_with_backend(
353            &sync_client,
354            &archive_client,
355            &ito_path,
356            change_id,
357            &backup_dir,
358            false,
359        )
360        .unwrap();
361
362        // Verify specs were updated
363        assert_eq!(outcome.specs_updated, vec!["test-cap"]);
364
365        // Verify archive name contains the change id
366        assert!(outcome.archive_name.contains(change_id));
367
368        // Verify backend was called
369        assert_eq!(archive_client.calls(), 1);
370        assert_eq!(outcome.backend_result.change_id, change_id);
371
372        // Verify the change is in the archive directory
373        let archive_dir = ito_path
374            .join("changes")
375            .join("archive")
376            .join(&outcome.archive_name);
377        assert!(archive_dir.exists(), "archive directory should exist");
378
379        // Verify original change dir is gone
380        let original_dir = ito_path.join("changes").join(change_id);
381        assert!(!original_dir.exists(), "original change dir should be gone");
382
383        // Verify spec was copied to main specs tree
384        let main_spec = ito_path.join("specs").join("test-cap").join("spec.md");
385        assert!(main_spec.exists(), "main spec should exist");
386    }
387
388    #[test]
389    fn archive_with_backend_skip_specs() {
390        let tmp = tempfile::TempDir::new().unwrap();
391        let ito_path = tmp.path().join(".ito");
392        let backup_dir = tmp.path().join("backups");
393        std::fs::create_dir_all(&ito_path).unwrap();
394
395        let change_id = "test-change";
396        setup_change_on_disk(&ito_path, change_id);
397
398        let sync_client = FakeSyncClient::new(change_id);
399        let archive_client = FakeArchiveClient::success();
400
401        let outcome = archive_with_backend(
402            &sync_client,
403            &archive_client,
404            &ito_path,
405            change_id,
406            &backup_dir,
407            true, // skip_specs
408        )
409        .unwrap();
410
411        // Specs should not have been updated
412        assert!(outcome.specs_updated.is_empty());
413
414        // But archive should still succeed
415        let archive_dir = ito_path
416            .join("changes")
417            .join("archive")
418            .join(&outcome.archive_name);
419        assert!(archive_dir.exists());
420
421        // Main spec should NOT exist
422        let main_spec = ito_path.join("specs").join("test-cap").join("spec.md");
423        assert!(
424            !main_spec.exists(),
425            "main spec should not be created when skip_specs is true"
426        );
427    }
428
429    #[test]
430    fn archive_with_backend_backend_unavailable() {
431        let tmp = tempfile::TempDir::new().unwrap();
432        let ito_path = tmp.path().join(".ito");
433        let backup_dir = tmp.path().join("backups");
434        std::fs::create_dir_all(&ito_path).unwrap();
435
436        let change_id = "test-change";
437        setup_change_on_disk(&ito_path, change_id);
438
439        let sync_client = FakeSyncClient::new(change_id);
440        let archive_client = FakeArchiveClient::failing();
441
442        let err = archive_with_backend(
443            &sync_client,
444            &archive_client,
445            &ito_path,
446            change_id,
447            &backup_dir,
448            false,
449        )
450        .unwrap_err();
451
452        let msg = err.to_string();
453        assert!(
454            msg.contains("Backend unavailable"),
455            "should report backend unavailability: {msg}"
456        );
457
458        // The local archive still happened (the move completed before mark_archived failed)
459        // This is by design — the local state is correct; backend can be retried.
460    }
461}