Skip to main content

forge_runtime/jobs/
executor.rs

1use std::sync::Arc;
2use std::time::Duration;
3
4use forge_core::CircuitBreakerClient;
5use forge_core::job::{JobContext, ProgressUpdate};
6use tokio::time::timeout;
7
8use super::queue::{JobQueue, JobRecord};
9use super::registry::{JobEntry, JobRegistry};
10
11/// Executes jobs with timeout and retry handling.
12pub struct JobExecutor {
13    queue: JobQueue,
14    registry: Arc<JobRegistry>,
15    db_pool: sqlx::PgPool,
16    http_client: CircuitBreakerClient,
17}
18
19impl JobExecutor {
20    const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(30);
21
22    /// Create a new job executor.
23    pub fn new(queue: JobQueue, registry: JobRegistry, db_pool: sqlx::PgPool) -> Self {
24        Self {
25            queue,
26            registry: Arc::new(registry),
27            db_pool,
28            http_client: CircuitBreakerClient::with_defaults(reqwest::Client::new()),
29        }
30    }
31
32    /// Execute a claimed job.
33    pub async fn execute(&self, job: &JobRecord) -> ExecutionResult {
34        let entry = match self.registry.get(&job.job_type) {
35            Some(e) => e,
36            None => {
37                return ExecutionResult::Failed {
38                    error: format!("Unknown job type: {}", job.job_type),
39                    retryable: false,
40                };
41            }
42        };
43
44        if matches!(job.status, forge_core::job::JobStatus::Cancelled) {
45            return ExecutionResult::Cancelled {
46                reason: Self::cancellation_reason(job, "Job cancelled"),
47            };
48        }
49
50        // Mark job as running
51        if let Err(e) = self.queue.start(job.id).await {
52            if matches!(e, sqlx::Error::RowNotFound) {
53                return ExecutionResult::Cancelled {
54                    reason: Self::cancellation_reason(job, "Job cancelled"),
55                };
56            }
57            return ExecutionResult::Failed {
58                error: format!("Failed to start job: {}", e),
59                retryable: true,
60            };
61        }
62
63        // Set up progress channel
64        let (progress_tx, progress_rx) = std::sync::mpsc::channel::<ProgressUpdate>();
65
66        // Spawn task to consume progress updates and save to database
67        // Use try_recv() with async sleep to avoid blocking the tokio runtime
68        let progress_queue = self.queue.clone();
69        let progress_job_id = job.id;
70        tokio::spawn(async move {
71            loop {
72                match progress_rx.try_recv() {
73                    Ok(update) => {
74                        if let Err(e) = progress_queue
75                            .update_progress(
76                                progress_job_id,
77                                update.percentage as i32,
78                                &update.message,
79                            )
80                            .await
81                        {
82                            tracing::debug!(job_id = %progress_job_id, error = %e, "Failed to update job progress");
83                        }
84                    }
85                    Err(std::sync::mpsc::TryRecvError::Empty) => {
86                        // No message yet, sleep briefly and check again
87                        tokio::time::sleep(std::time::Duration::from_millis(50)).await;
88                    }
89                    Err(std::sync::mpsc::TryRecvError::Disconnected) => {
90                        // Sender dropped (job finished), exit loop
91                        break;
92                    }
93                }
94            }
95        });
96
97        // Create job context with progress channel
98        let ctx = JobContext::new(
99            job.id,
100            job.job_type.clone(),
101            job.attempts as u32,
102            job.max_attempts as u32,
103            self.db_pool.clone(),
104            self.http_client.inner().clone(),
105        )
106        .with_saved(job.job_context.clone())
107        .with_progress(progress_tx);
108
109        // Keepalive heartbeat prevents stale cleanup from reclaiming healthy long jobs.
110        let heartbeat_queue = self.queue.clone();
111        let heartbeat_job_id = job.id;
112        let (heartbeat_stop_tx, mut heartbeat_stop_rx) = tokio::sync::watch::channel(false);
113        let heartbeat_task = tokio::spawn(async move {
114            loop {
115                tokio::select! {
116                    _ = tokio::time::sleep(Self::HEARTBEAT_INTERVAL) => {
117                        if let Err(e) = heartbeat_queue.heartbeat(heartbeat_job_id).await {
118                            tracing::debug!(job_id = %heartbeat_job_id, error = %e, "Failed to update job heartbeat");
119                        }
120                    }
121                    changed = heartbeat_stop_rx.changed() => {
122                        if changed.is_err() || *heartbeat_stop_rx.borrow() {
123                            break;
124                        }
125                    }
126                }
127            }
128        });
129
130        // Execute with timeout
131        let job_timeout = entry.info.timeout;
132        let result = timeout(job_timeout, self.run_handler(&entry, &ctx, &job.input)).await;
133
134        let _ = heartbeat_stop_tx.send(true);
135        let _ = heartbeat_task.await;
136
137        let ttl = entry.info.ttl;
138
139        match result {
140            Ok(Ok(output)) => {
141                // Job completed successfully
142                if let Err(e) = self.queue.complete(job.id, output.clone(), ttl).await {
143                    tracing::debug!(job_id = %job.id, error = %e, "Failed to mark job as complete");
144                }
145                ExecutionResult::Completed { output }
146            }
147            Ok(Err(e)) => {
148                // Job failed
149                let error_msg = e.to_string();
150                // Accepts either an explicit cancellation error or a late cancellation request.
151                let cancel_requested = match ctx.is_cancel_requested().await {
152                    Ok(value) => value,
153                    Err(err) => {
154                        tracing::debug!(job_id = %job.id, error = %err, "Failed to check cancellation status");
155                        false
156                    }
157                };
158                if matches!(e, forge_core::ForgeError::JobCancelled(_)) || cancel_requested {
159                    let reason = Self::cancellation_reason(job, "Job cancellation requested");
160                    if let Err(e) = self.queue.cancel(job.id, Some(&reason), ttl).await {
161                        tracing::debug!(job_id = %job.id, error = %e, "Failed to cancel job");
162                    }
163                    if let Err(e) = self
164                        .run_compensation(&entry, &ctx, &job.input, &reason)
165                        .await
166                    {
167                        tracing::warn!(job_id = %job.id, error = %e, "Job compensation failed");
168                    }
169                    return ExecutionResult::Cancelled { reason };
170                }
171                let should_retry = job.attempts < job.max_attempts;
172
173                let retry_delay = if should_retry {
174                    Some(entry.info.retry.calculate_backoff(job.attempts as u32))
175                } else {
176                    None
177                };
178
179                let chrono_delay = retry_delay.map(|d| {
180                    chrono::Duration::from_std(d).unwrap_or(chrono::Duration::seconds(60))
181                });
182
183                if let Err(e) = self.queue.fail(job.id, &error_msg, chrono_delay, ttl).await {
184                    tracing::debug!(job_id = %job.id, error = %e, "Failed to record job failure");
185                }
186
187                ExecutionResult::Failed {
188                    error: error_msg,
189                    retryable: should_retry,
190                }
191            }
192            Err(_) => {
193                // Timeout
194                let error_msg = format!("Job timed out after {:?}", job_timeout);
195                let should_retry = job.attempts < job.max_attempts;
196
197                let retry_delay = if should_retry {
198                    Some(chrono::Duration::seconds(60))
199                } else {
200                    None
201                };
202
203                if let Err(e) = self.queue.fail(job.id, &error_msg, retry_delay, ttl).await {
204                    tracing::debug!(job_id = %job.id, error = %e, "Failed to record job timeout");
205                }
206
207                ExecutionResult::TimedOut {
208                    retryable: should_retry,
209                }
210            }
211        }
212    }
213
214    /// Run the job handler.
215    async fn run_handler(
216        &self,
217        entry: &Arc<JobEntry>,
218        ctx: &JobContext,
219        input: &serde_json::Value,
220    ) -> forge_core::Result<serde_json::Value> {
221        (entry.handler)(ctx, input.clone()).await
222    }
223
224    async fn run_compensation(
225        &self,
226        entry: &Arc<JobEntry>,
227        ctx: &JobContext,
228        input: &serde_json::Value,
229        reason: &str,
230    ) -> forge_core::Result<()> {
231        (entry.compensation)(ctx, input.clone(), reason).await
232    }
233
234    fn cancellation_reason(job: &JobRecord, fallback: &str) -> String {
235        job.cancel_reason
236            .clone()
237            .unwrap_or_else(|| fallback.to_string())
238    }
239}
240
241/// Result of job execution.
242#[derive(Debug)]
243pub enum ExecutionResult {
244    /// Job completed successfully.
245    Completed { output: serde_json::Value },
246    /// Job failed.
247    Failed { error: String, retryable: bool },
248    /// Job timed out.
249    TimedOut { retryable: bool },
250    /// Job cancelled.
251    Cancelled { reason: String },
252}
253
254impl ExecutionResult {
255    /// Check if execution was successful.
256    pub fn is_success(&self) -> bool {
257        matches!(self, Self::Completed { .. })
258    }
259
260    /// Check if the job should be retried.
261    pub fn should_retry(&self) -> bool {
262        match self {
263            Self::Failed { retryable, .. } => *retryable,
264            Self::TimedOut { retryable } => *retryable,
265            _ => false,
266        }
267    }
268}
269
270#[cfg(test)]
271mod tests {
272    use super::*;
273
274    #[test]
275    fn test_execution_result_success() {
276        let result = ExecutionResult::Completed {
277            output: serde_json::json!({}),
278        };
279        assert!(result.is_success());
280        assert!(!result.should_retry());
281    }
282
283    #[test]
284    fn test_execution_result_failed_retryable() {
285        let result = ExecutionResult::Failed {
286            error: "test error".to_string(),
287            retryable: true,
288        };
289        assert!(!result.is_success());
290        assert!(result.should_retry());
291    }
292
293    #[test]
294    fn test_execution_result_failed_not_retryable() {
295        let result = ExecutionResult::Failed {
296            error: "test error".to_string(),
297            retryable: false,
298        };
299        assert!(!result.is_success());
300        assert!(!result.should_retry());
301    }
302
303    #[test]
304    fn test_execution_result_timeout() {
305        let result = ExecutionResult::TimedOut { retryable: true };
306        assert!(!result.is_success());
307        assert!(result.should_retry());
308    }
309
310    #[test]
311    fn test_execution_result_cancelled() {
312        let result = ExecutionResult::Cancelled {
313            reason: "user request".to_string(),
314        };
315        assert!(!result.is_success());
316        assert!(!result.should_retry());
317    }
318}