1use crate::constants::limits::MAX_QUEUE_BACKUP_FILES;
16use anyhow::{Context, Result};
17use std::path::Path;
18use std::path::PathBuf;
19
20const QUEUE_BACKUP_PREFIX: &str = "queue.json.backup.";
21
22pub fn backup_queue(path: &Path, backup_dir: &Path) -> Result<std::path::PathBuf> {
25 std::fs::create_dir_all(backup_dir)?;
26 let timestamp = crate::timeutil::now_utc_rfc3339_or_fallback().replace([':', '.'], "-");
27 let backup_name = format!("{QUEUE_BACKUP_PREFIX}{timestamp}");
28 let backup_path = backup_dir.join(backup_name);
29
30 std::fs::copy(path, &backup_path)
31 .with_context(|| format!("backup queue to {}", backup_path.display()))?;
32
33 match cleanup_queue_backups(backup_dir, MAX_QUEUE_BACKUP_FILES) {
34 Ok(removed) if removed > 0 => {
35 log::debug!(
36 "pruned {} stale queue backup(s); retaining latest {}",
37 removed,
38 MAX_QUEUE_BACKUP_FILES
39 );
40 }
41 Ok(_) => {
42 }
44 Err(err) => {
45 log::warn!(
46 "failed to prune queue backups in {}: {:#}",
47 backup_dir.display(),
48 err
49 );
50 }
51 }
52
53 Ok(backup_path)
54}
55
56pub(crate) fn cleanup_queue_backups(backup_dir: &Path, max_backups: usize) -> Result<usize> {
57 if max_backups == 0 || !backup_dir.exists() {
58 return Ok(0);
59 }
60
61 let mut backup_paths: Vec<PathBuf> = Vec::new();
62 for entry in std::fs::read_dir(backup_dir)
63 .with_context(|| format!("read backup directory {}", backup_dir.display()))?
64 {
65 let entry = entry
66 .with_context(|| format!("read backup directory entry in {}", backup_dir.display()))?;
67
68 let file_type = entry
69 .file_type()
70 .with_context(|| format!("read file type {}", entry.path().display()))?;
71 if !file_type.is_file() {
72 continue;
73 }
74
75 let file_name = entry.file_name();
76 let file_name = file_name.to_string_lossy();
77 if file_name.starts_with(QUEUE_BACKUP_PREFIX) {
78 backup_paths.push(entry.path());
79 }
80 }
81
82 if backup_paths.len() <= max_backups {
83 return Ok(0);
84 }
85
86 backup_paths.sort_unstable_by_key(|path| {
87 path.file_name()
88 .map(|name| name.to_string_lossy().into_owned())
89 .unwrap_or_default()
90 });
91
92 let mut removed = 0usize;
93 let to_remove = backup_paths.len().saturating_sub(max_backups);
94 for backup_path in backup_paths.into_iter().take(to_remove) {
95 std::fs::remove_file(&backup_path)
96 .with_context(|| format!("remove queue backup {}", backup_path.display()))?;
97 removed += 1;
98 }
99
100 Ok(removed)
101}
102
103#[cfg(test)]
104mod tests {
105 use super::*;
106 use crate::contracts::{QueueFile, Task, TaskStatus};
107 use crate::fsutil;
108 use std::collections::HashMap;
109 use tempfile::TempDir;
110
111 fn task(id: &str) -> Task {
112 Task {
113 id: id.to_string(),
114 status: TaskStatus::Todo,
115 title: "Test task".to_string(),
116 description: None,
117 priority: Default::default(),
118 tags: vec!["code".to_string()],
119 scope: vec!["crates/ralph".to_string()],
120 evidence: vec!["observed".to_string()],
121 plan: vec!["do thing".to_string()],
122 notes: vec![],
123 request: Some("test request".to_string()),
124 agent: None,
125 created_at: Some("2026-01-18T00:00:00Z".to_string()),
126 updated_at: Some("2026-01-18T00:00:00Z".to_string()),
127 completed_at: None,
128 started_at: None,
129 scheduled_start: None,
130 depends_on: vec![],
131 blocks: vec![],
132 relates_to: vec![],
133 duplicates: None,
134 custom_fields: HashMap::new(),
135 parent_id: None,
136 estimated_minutes: None,
137 actual_minutes: None,
138 }
139 }
140
141 fn save_queue(path: &Path, queue: &QueueFile) -> Result<()> {
142 let rendered = serde_json::to_string_pretty(queue).context("serialize queue JSON")?;
143 fsutil::write_atomic(path, rendered.as_bytes())
144 .with_context(|| format!("write queue JSON {}", path.display()))?;
145 Ok(())
146 }
147
148 #[test]
149 fn backup_queue_creates_backup_file() -> Result<()> {
150 let temp = TempDir::new()?;
151 let queue_path = temp.path().join("queue.json");
152 let backup_dir = temp.path().join("backups");
153
154 save_queue(
156 &queue_path,
157 &QueueFile {
158 version: 1,
159 tasks: vec![task("RQ-0001")],
160 },
161 )?;
162
163 let backup_path = backup_queue(&queue_path, &backup_dir)?;
165
166 assert!(backup_path.exists());
168 let backup_queue: QueueFile =
169 serde_json::from_str(&std::fs::read_to_string(&backup_path)?)?;
170 assert_eq!(backup_queue.tasks.len(), 1);
171 assert_eq!(backup_queue.tasks[0].id, "RQ-0001");
172
173 Ok(())
174 }
175
176 #[test]
177 fn cleanup_queue_backups_removes_oldest_files() -> Result<()> {
178 let temp = TempDir::new()?;
179 let backup_dir = temp.path().join("backups");
180 std::fs::create_dir_all(&backup_dir)?;
181
182 for suffix in ["0001", "0002", "0003"] {
183 let backup_path = backup_dir.join(format!("{QUEUE_BACKUP_PREFIX}{suffix}"));
184 std::fs::write(backup_path, "{}")?;
185 }
186
187 let removed = cleanup_queue_backups(&backup_dir, 2)?;
188 assert_eq!(removed, 1);
189 assert!(
190 !backup_dir
191 .join(format!("{QUEUE_BACKUP_PREFIX}0001"))
192 .exists()
193 );
194 assert!(
195 backup_dir
196 .join(format!("{QUEUE_BACKUP_PREFIX}0002"))
197 .exists()
198 );
199 assert!(
200 backup_dir
201 .join(format!("{QUEUE_BACKUP_PREFIX}0003"))
202 .exists()
203 );
204
205 Ok(())
206 }
207
208 #[test]
209 fn backup_queue_prunes_backups_to_retention_limit() -> Result<()> {
210 let temp = TempDir::new()?;
211 let queue_path = temp.path().join("queue.json");
212 let backup_dir = temp.path().join("backups");
213 std::fs::create_dir_all(&backup_dir)?;
214
215 save_queue(
216 &queue_path,
217 &QueueFile {
218 version: 1,
219 tasks: vec![task("RQ-0001")],
220 },
221 )?;
222
223 for idx in 0..(MAX_QUEUE_BACKUP_FILES + 2) {
224 let backup_path = backup_dir.join(format!("{QUEUE_BACKUP_PREFIX}0000-{idx:04}"));
225 std::fs::write(backup_path, "{}")?;
226 }
227
228 let _backup_path = backup_queue(&queue_path, &backup_dir)?;
229
230 let backup_count = std::fs::read_dir(&backup_dir)?
231 .flatten()
232 .map(|entry| entry.file_name().to_string_lossy().to_string())
233 .filter(|name| name.starts_with(QUEUE_BACKUP_PREFIX))
234 .count();
235
236 assert_eq!(backup_count, MAX_QUEUE_BACKUP_FILES);
237
238 Ok(())
239 }
240}