Skip to main content

forge_core/daemon/
context.rs

1use std::sync::Arc;
2
3use tokio::sync::{Mutex, watch};
4use tracing::Span;
5use uuid::Uuid;
6
7use crate::env::{EnvAccess, EnvProvider, RealEnvProvider};
8use crate::function::{JobDispatch, WorkflowDispatch};
9
10/// Context available to daemon handlers.
11pub struct DaemonContext {
12    /// Daemon name.
13    pub daemon_name: String,
14    /// Unique instance ID for this daemon execution.
15    pub instance_id: Uuid,
16    /// Database pool.
17    db_pool: sqlx::PgPool,
18    /// HTTP client for external calls.
19    http_client: reqwest::Client,
20    /// Shutdown signal receiver (wrapped in Mutex for interior mutability).
21    shutdown_rx: Mutex<watch::Receiver<bool>>,
22    /// Job dispatcher for background jobs.
23    job_dispatch: Option<Arc<dyn JobDispatch>>,
24    /// Workflow dispatcher for starting workflows.
25    workflow_dispatch: Option<Arc<dyn WorkflowDispatch>>,
26    /// Environment variable provider.
27    env_provider: Arc<dyn EnvProvider>,
28    /// Parent span for trace propagation.
29    span: Span,
30}
31
32impl DaemonContext {
33    /// Create a new daemon context.
34    pub fn new(
35        daemon_name: String,
36        instance_id: Uuid,
37        db_pool: sqlx::PgPool,
38        http_client: reqwest::Client,
39        shutdown_rx: watch::Receiver<bool>,
40    ) -> Self {
41        Self {
42            daemon_name,
43            instance_id,
44            db_pool,
45            http_client,
46            shutdown_rx: Mutex::new(shutdown_rx),
47            job_dispatch: None,
48            workflow_dispatch: None,
49            env_provider: Arc::new(RealEnvProvider::new()),
50            span: Span::current(),
51        }
52    }
53
54    /// Set job dispatcher.
55    pub fn with_job_dispatch(mut self, dispatcher: Arc<dyn JobDispatch>) -> Self {
56        self.job_dispatch = Some(dispatcher);
57        self
58    }
59
60    /// Set workflow dispatcher.
61    pub fn with_workflow_dispatch(mut self, dispatcher: Arc<dyn WorkflowDispatch>) -> Self {
62        self.workflow_dispatch = Some(dispatcher);
63        self
64    }
65
66    /// Set environment provider.
67    pub fn with_env_provider(mut self, provider: Arc<dyn EnvProvider>) -> Self {
68        self.env_provider = provider;
69        self
70    }
71
72    pub fn db(&self) -> &sqlx::PgPool {
73        &self.db_pool
74    }
75
76    /// Returns a `DbConn` wrapping the pool for shared helper functions.
77    pub fn db_conn(&self) -> crate::function::DbConn<'_> {
78        crate::function::DbConn::Pool(&self.db_pool)
79    }
80
81    pub fn http(&self) -> &reqwest::Client {
82        &self.http_client
83    }
84
85    /// Dispatch a background job.
86    pub async fn dispatch_job<T: serde::Serialize>(
87        &self,
88        job_type: &str,
89        args: T,
90    ) -> crate::Result<Uuid> {
91        let dispatcher = self.job_dispatch.as_ref().ok_or_else(|| {
92            crate::error::ForgeError::Internal("Job dispatch not available".to_string())
93        })?;
94
95        let args_json = serde_json::to_value(args)?;
96        dispatcher.dispatch_by_name(job_type, args_json, None).await
97    }
98
99    /// Start a workflow.
100    pub async fn start_workflow<T: serde::Serialize>(
101        &self,
102        workflow_name: &str,
103        input: T,
104    ) -> crate::Result<Uuid> {
105        let dispatcher = self.workflow_dispatch.as_ref().ok_or_else(|| {
106            crate::error::ForgeError::Internal("Workflow dispatch not available".to_string())
107        })?;
108
109        let input_json = serde_json::to_value(input)?;
110        dispatcher
111            .start_by_name(workflow_name, input_json, None)
112            .await
113    }
114
115    /// Check if shutdown has been requested.
116    pub fn is_shutdown_requested(&self) -> bool {
117        // Use try_lock to avoid blocking; if can't lock, assume not shutdown
118        self.shutdown_rx
119            .try_lock()
120            .map(|rx| *rx.borrow())
121            .unwrap_or(false)
122    }
123
124    /// Wait for shutdown signal.
125    ///
126    /// Use this in a `tokio::select!` to handle graceful shutdown:
127    ///
128    /// ```ignore
129    /// tokio::select! {
130    ///     _ = tokio::time::sleep(Duration::from_secs(60)) => {}
131    ///     _ = ctx.shutdown_signal() => break,
132    /// }
133    /// ```
134    pub async fn shutdown_signal(&self) {
135        let mut rx = self.shutdown_rx.lock().await;
136        // Wait until the value becomes true
137        while !*rx.borrow_and_update() {
138            if rx.changed().await.is_err() {
139                // Channel closed, treat as shutdown
140                break;
141            }
142        }
143    }
144
145    /// Send heartbeat to indicate daemon is alive.
146    pub async fn heartbeat(&self) -> crate::Result<()> {
147        tracing::trace!(daemon.name = %self.daemon_name, "Sending heartbeat");
148
149        sqlx::query(
150            r#"
151            UPDATE forge_daemons
152            SET last_heartbeat = NOW()
153            WHERE name = $1 AND instance_id = $2
154            "#,
155        )
156        .bind(&self.daemon_name)
157        .bind(self.instance_id)
158        .execute(&self.db_pool)
159        .await
160        .map_err(|e| crate::ForgeError::Database(e.to_string()))?;
161
162        Ok(())
163    }
164
165    /// Get the trace ID for this daemon execution.
166    ///
167    /// Returns the instance_id as a correlation ID.
168    pub fn trace_id(&self) -> String {
169        self.instance_id.to_string()
170    }
171
172    /// Get the parent span for trace propagation.
173    ///
174    /// Use this to create child spans within daemon handlers.
175    pub fn span(&self) -> &Span {
176        &self.span
177    }
178}
179
180impl EnvAccess for DaemonContext {
181    fn env_provider(&self) -> &dyn EnvProvider {
182        self.env_provider.as_ref()
183    }
184}
185
186#[cfg(test)]
187#[allow(clippy::unwrap_used, clippy::indexing_slicing)]
188mod tests {
189    use super::*;
190
191    #[tokio::test]
192    async fn test_daemon_context_creation() {
193        let pool = sqlx::postgres::PgPoolOptions::new()
194            .max_connections(1)
195            .connect_lazy("postgres://localhost/nonexistent")
196            .expect("Failed to create mock pool");
197
198        let (shutdown_tx, shutdown_rx) = watch::channel(false);
199        let instance_id = Uuid::new_v4();
200
201        let ctx = DaemonContext::new(
202            "test_daemon".to_string(),
203            instance_id,
204            pool,
205            reqwest::Client::new(),
206            shutdown_rx,
207        );
208
209        assert_eq!(ctx.daemon_name, "test_daemon");
210        assert_eq!(ctx.instance_id, instance_id);
211        assert!(!ctx.is_shutdown_requested());
212
213        // Signal shutdown
214        shutdown_tx.send(true).unwrap();
215        assert!(ctx.is_shutdown_requested());
216    }
217
218    #[tokio::test]
219    async fn test_shutdown_signal() {
220        let pool = sqlx::postgres::PgPoolOptions::new()
221            .max_connections(1)
222            .connect_lazy("postgres://localhost/nonexistent")
223            .expect("Failed to create mock pool");
224
225        let (shutdown_tx, shutdown_rx) = watch::channel(false);
226
227        let ctx = DaemonContext::new(
228            "test_daemon".to_string(),
229            Uuid::new_v4(),
230            pool,
231            reqwest::Client::new(),
232            shutdown_rx,
233        );
234
235        // Spawn a task to signal shutdown after a delay
236        tokio::spawn(async move {
237            tokio::time::sleep(std::time::Duration::from_millis(50)).await;
238            shutdown_tx.send(true).unwrap();
239        });
240
241        // Wait for shutdown signal
242        tokio::time::timeout(std::time::Duration::from_millis(200), ctx.shutdown_signal())
243            .await
244            .expect("Shutdown signal should complete");
245
246        assert!(ctx.is_shutdown_requested());
247    }
248}