ralph/commands/run/parallel/
state.rs1use crate::fsutil;
18use anyhow::{Context, Result};
19use serde::{Deserialize, Serialize};
20use std::path::{Path, PathBuf};
21
22pub const PARALLEL_STATE_SCHEMA_VERSION: u32 = 3;
33
34#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]
40#[serde(rename_all = "snake_case")]
41pub enum WorkerLifecycle {
42 #[default]
44 Running,
45 Integrating,
47 Completed,
49 Failed,
51 BlockedPush,
53}
54
55#[derive(Debug, Clone, Serialize, Deserialize)]
57pub struct WorkerRecord {
58 pub task_id: String,
60 pub workspace_path: PathBuf,
62 #[serde(default)]
64 pub lifecycle: WorkerLifecycle,
65 pub started_at: String,
67 #[serde(skip_serializing_if = "Option::is_none")]
69 pub completed_at: Option<String>,
70 #[serde(default)]
72 pub push_attempts: u32,
73 #[serde(skip_serializing_if = "Option::is_none")]
75 pub last_error: Option<String>,
76}
77
78impl WorkerRecord {
79 pub fn new(task_id: impl Into<String>, workspace_path: PathBuf, started_at: String) -> Self {
80 Self {
81 task_id: task_id.into(),
82 workspace_path,
83 lifecycle: WorkerLifecycle::Running,
84 started_at,
85 completed_at: None,
86 push_attempts: 0,
87 last_error: None,
88 }
89 }
90
91 pub fn start_integration(&mut self) {
93 self.lifecycle = WorkerLifecycle::Integrating;
94 }
95
96 pub fn mark_completed(&mut self, timestamp: String) {
98 self.lifecycle = WorkerLifecycle::Completed;
99 self.completed_at = Some(timestamp);
100 }
101
102 pub fn mark_failed(&mut self, timestamp: String, error: impl Into<String>) {
104 self.lifecycle = WorkerLifecycle::Failed;
105 self.completed_at = Some(timestamp);
106 self.last_error = Some(error.into());
107 }
108
109 pub fn mark_blocked(&mut self, timestamp: String, error: impl Into<String>) {
111 self.lifecycle = WorkerLifecycle::BlockedPush;
112 self.completed_at = Some(timestamp);
113 self.last_error = Some(error.into());
114 }
115
116 pub fn increment_push_attempt(&mut self) {
118 self.push_attempts += 1;
119 }
120
121 pub fn is_terminal(&self) -> bool {
123 matches!(
124 self.lifecycle,
125 WorkerLifecycle::Completed | WorkerLifecycle::Failed | WorkerLifecycle::BlockedPush
126 )
127 }
128}
129
130#[derive(Debug, Clone, Serialize, Deserialize)]
136pub struct ParallelStateFile {
137 #[serde(default = "default_schema_version")]
139 pub schema_version: u32,
140 #[serde(default)]
142 pub started_at: String,
143 #[serde(default)]
145 pub target_branch: String,
146 #[serde(default)]
148 pub workers: Vec<WorkerRecord>,
149}
150
151fn default_schema_version() -> u32 {
152 1
153}
154
155impl ParallelStateFile {
156 pub fn new(started_at: impl Into<String>, target_branch: impl Into<String>) -> Self {
157 Self {
158 schema_version: PARALLEL_STATE_SCHEMA_VERSION,
159 started_at: started_at.into(),
160 target_branch: target_branch.into(),
161 workers: Vec::new(),
162 }
163 }
164
165 pub fn upsert_worker(&mut self, record: WorkerRecord) {
167 if let Some(existing) = self
168 .workers
169 .iter_mut()
170 .find(|w| w.task_id == record.task_id)
171 {
172 *existing = record;
173 } else {
174 self.workers.push(record);
175 }
176 }
177
178 pub fn remove_worker(&mut self, task_id: &str) {
180 self.workers.retain(|w| w.task_id != task_id);
181 }
182
183 pub fn get_worker(&self, task_id: &str) -> Option<&WorkerRecord> {
185 self.workers.iter().find(|w| w.task_id == task_id)
186 }
187
188 pub fn get_worker_mut(&mut self, task_id: &str) -> Option<&mut WorkerRecord> {
190 self.workers.iter_mut().find(|w| w.task_id == task_id)
191 }
192
193 pub fn has_worker(&self, task_id: &str) -> bool {
195 self.workers.iter().any(|w| w.task_id == task_id)
196 }
197
198 pub fn workers_by_lifecycle(
200 &self,
201 lifecycle: WorkerLifecycle,
202 ) -> impl Iterator<Item = &WorkerRecord> {
203 self.workers
204 .iter()
205 .filter(move |w| w.lifecycle == lifecycle)
206 }
207
208 pub fn active_worker_count(&self) -> usize {
210 self.workers.iter().filter(|w| !w.is_terminal()).count()
211 }
212
213 pub fn blocked_worker_count(&self) -> usize {
215 self.workers_by_lifecycle(WorkerLifecycle::BlockedPush)
216 .count()
217 }
218}
219
220pub fn state_file_path(repo_root: &Path) -> PathBuf {
221 repo_root.join(".ralph/cache/parallel/state.json")
222}
223
224fn migrate_state(mut state: ParallelStateFile) -> ParallelStateFile {
230 if state.schema_version < PARALLEL_STATE_SCHEMA_VERSION {
231 log::info!(
232 "Migrating parallel state from schema v{} to v{}",
233 state.schema_version,
234 PARALLEL_STATE_SCHEMA_VERSION
235 );
236 state.schema_version = PARALLEL_STATE_SCHEMA_VERSION;
239 state.workers.clear();
240 }
241 state
242}
243
244pub fn load_state(path: &Path) -> Result<Option<ParallelStateFile>> {
245 if !path.exists() {
246 return Ok(None);
247 }
248 let raw = std::fs::read_to_string(path)
249 .with_context(|| format!("read parallel state {}", path.display()))?;
250 let state: ParallelStateFile =
251 crate::jsonc::parse_jsonc::<ParallelStateFile>(&raw, "parallel state")?;
252
253 let state = migrate_state(state);
255
256 Ok(Some(state))
257}
258
259pub fn save_state(path: &Path, state: &ParallelStateFile) -> Result<()> {
260 if let Some(parent) = path.parent() {
261 std::fs::create_dir_all(parent)
262 .with_context(|| format!("create parallel state dir {}", parent.display()))?;
263 }
264 let rendered = serde_json::to_string_pretty(state).context("serialize parallel state")?;
265 fsutil::write_atomic(path, rendered.as_bytes())
266 .with_context(|| format!("write parallel state {}", path.display()))?;
267 Ok(())
268}
269
270#[cfg(test)]
271#[path = "state/tests.rs"]
272mod tests;