1use crate::constants::limits::MAX_QUEUE_BACKUP_FILES;
16use anyhow::{Context, Result};
17use std::path::Path;
18use std::path::PathBuf;
19
20const QUEUE_BACKUP_PREFIX: &str = "queue.json.backup.";
21
22pub fn backup_queue(path: &Path, backup_dir: &Path) -> Result<std::path::PathBuf> {
25 std::fs::create_dir_all(backup_dir)?;
26 let timestamp = crate::timeutil::now_utc_rfc3339_or_fallback().replace([':', '.'], "-");
27 let backup_name = format!("{QUEUE_BACKUP_PREFIX}{timestamp}");
28 let backup_path = backup_dir.join(backup_name);
29
30 std::fs::copy(path, &backup_path)
31 .with_context(|| format!("backup queue to {}", backup_path.display()))?;
32
33 match cleanup_queue_backups(backup_dir, MAX_QUEUE_BACKUP_FILES) {
34 Ok(removed) if removed > 0 => {
35 log::debug!(
36 "pruned {} stale queue backup(s); retaining latest {}",
37 removed,
38 MAX_QUEUE_BACKUP_FILES
39 );
40 }
41 Ok(_) => {}
42 Err(err) => {
43 log::warn!(
44 "failed to prune queue backups in {}: {:#}",
45 backup_dir.display(),
46 err
47 );
48 }
49 }
50
51 Ok(backup_path)
52}
53
54pub(crate) fn cleanup_queue_backups(backup_dir: &Path, max_backups: usize) -> Result<usize> {
55 if max_backups == 0 || !backup_dir.exists() {
56 return Ok(0);
57 }
58
59 let mut backup_paths: Vec<PathBuf> = Vec::new();
60 for entry in std::fs::read_dir(backup_dir)
61 .with_context(|| format!("read backup directory {}", backup_dir.display()))?
62 {
63 let entry = entry
64 .with_context(|| format!("read backup directory entry in {}", backup_dir.display()))?;
65
66 let file_type = entry
67 .file_type()
68 .with_context(|| format!("read file type {}", entry.path().display()))?;
69 if !file_type.is_file() {
70 continue;
71 }
72
73 let file_name = entry.file_name();
74 let file_name = file_name.to_string_lossy();
75 if file_name.starts_with(QUEUE_BACKUP_PREFIX) {
76 backup_paths.push(entry.path());
77 }
78 }
79
80 if backup_paths.len() <= max_backups {
81 return Ok(0);
82 }
83
84 backup_paths.sort_unstable_by_key(|path| {
85 path.file_name()
86 .map(|name| name.to_string_lossy().into_owned())
87 .unwrap_or_default()
88 });
89
90 let mut removed = 0usize;
91 let to_remove = backup_paths.len().saturating_sub(max_backups);
92 for backup_path in backup_paths.into_iter().take(to_remove) {
93 std::fs::remove_file(&backup_path)
94 .with_context(|| format!("remove queue backup {}", backup_path.display()))?;
95 removed += 1;
96 }
97
98 Ok(removed)
99}
100
101#[cfg(test)]
102mod tests {
103 use super::*;
104 use crate::contracts::{QueueFile, Task, TaskStatus};
105 use crate::fsutil;
106 use std::collections::HashMap;
107 use tempfile::TempDir;
108
109 fn task(id: &str) -> Task {
110 Task {
111 id: id.to_string(),
112 status: TaskStatus::Todo,
113 title: "Test task".to_string(),
114 description: None,
115 priority: Default::default(),
116 tags: vec!["code".to_string()],
117 scope: vec!["crates/ralph".to_string()],
118 evidence: vec!["observed".to_string()],
119 plan: vec!["do thing".to_string()],
120 notes: vec![],
121 request: Some("test request".to_string()),
122 agent: None,
123 created_at: Some("2026-01-18T00:00:00Z".to_string()),
124 updated_at: Some("2026-01-18T00:00:00Z".to_string()),
125 completed_at: None,
126 started_at: None,
127 scheduled_start: None,
128 depends_on: vec![],
129 blocks: vec![],
130 relates_to: vec![],
131 duplicates: None,
132 custom_fields: HashMap::new(),
133 parent_id: None,
134 estimated_minutes: None,
135 actual_minutes: None,
136 }
137 }
138
139 fn save_queue(path: &Path, queue: &QueueFile) -> Result<()> {
140 let rendered = serde_json::to_string_pretty(queue).context("serialize queue JSON")?;
141 fsutil::write_atomic(path, rendered.as_bytes())
142 .with_context(|| format!("write queue JSON {}", path.display()))?;
143 Ok(())
144 }
145
146 #[test]
147 fn backup_queue_creates_backup_file() -> Result<()> {
148 let temp = TempDir::new()?;
149 let queue_path = temp.path().join("queue.json");
150 let backup_dir = temp.path().join("backups");
151
152 save_queue(
154 &queue_path,
155 &QueueFile {
156 version: 1,
157 tasks: vec![task("RQ-0001")],
158 },
159 )?;
160
161 let backup_path = backup_queue(&queue_path, &backup_dir)?;
163
164 assert!(backup_path.exists());
166 let backup_queue: QueueFile =
167 serde_json::from_str(&std::fs::read_to_string(&backup_path)?)?;
168 assert_eq!(backup_queue.tasks.len(), 1);
169 assert_eq!(backup_queue.tasks[0].id, "RQ-0001");
170
171 Ok(())
172 }
173
174 #[test]
175 fn cleanup_queue_backups_removes_oldest_files() -> Result<()> {
176 let temp = TempDir::new()?;
177 let backup_dir = temp.path().join("backups");
178 std::fs::create_dir_all(&backup_dir)?;
179
180 for suffix in ["0001", "0002", "0003"] {
181 let backup_path = backup_dir.join(format!("{QUEUE_BACKUP_PREFIX}{suffix}"));
182 std::fs::write(backup_path, "{}")?;
183 }
184
185 let removed = cleanup_queue_backups(&backup_dir, 2)?;
186 assert_eq!(removed, 1);
187 assert!(
188 !backup_dir
189 .join(format!("{QUEUE_BACKUP_PREFIX}0001"))
190 .exists()
191 );
192 assert!(
193 backup_dir
194 .join(format!("{QUEUE_BACKUP_PREFIX}0002"))
195 .exists()
196 );
197 assert!(
198 backup_dir
199 .join(format!("{QUEUE_BACKUP_PREFIX}0003"))
200 .exists()
201 );
202
203 Ok(())
204 }
205
206 #[test]
207 fn backup_queue_prunes_backups_to_retention_limit() -> Result<()> {
208 let temp = TempDir::new()?;
209 let queue_path = temp.path().join("queue.json");
210 let backup_dir = temp.path().join("backups");
211 std::fs::create_dir_all(&backup_dir)?;
212
213 save_queue(
214 &queue_path,
215 &QueueFile {
216 version: 1,
217 tasks: vec![task("RQ-0001")],
218 },
219 )?;
220
221 for idx in 0..(MAX_QUEUE_BACKUP_FILES + 2) {
222 let backup_path = backup_dir.join(format!("{QUEUE_BACKUP_PREFIX}0000-{idx:04}"));
223 std::fs::write(backup_path, "{}")?;
224 }
225
226 let _backup_path = backup_queue(&queue_path, &backup_dir)?;
227
228 let backup_count = std::fs::read_dir(&backup_dir)?
229 .flatten()
230 .map(|entry| entry.file_name().to_string_lossy().to_string())
231 .filter(|name| name.starts_with(QUEUE_BACKUP_PREFIX))
232 .count();
233
234 assert_eq!(backup_count, MAX_QUEUE_BACKUP_FILES);
235
236 Ok(())
237 }
238}