Skip to main content

durable/
lib.rs

1pub mod ctx;
2pub mod error;
3pub mod executor;
4
5pub use ctx::Ctx;
6pub use ctx::RetryPolicy;
7pub use ctx::{TaskQuery, TaskSort, TaskSummary};
8pub use durable_db::entity::sea_orm_active_enums::TaskStatus;
9pub use durable_macros::{step, workflow};
10pub use error::DurableError;
11pub use executor::{Executor, HeartbeatConfig, RecoveredTask};
12pub use sea_orm::DatabaseTransaction;
13
14// Re-export so macro-generated code can reference `durable::inventory::submit!`
15pub use inventory;
16
17use sea_orm::{ConnectOptions, Database, DatabaseConnection};
18use sea_orm_migration::MigratorTrait;
19use std::future::Future;
20use std::pin::Pin;
21use std::sync::RwLock;
22
23/// Global executor ID set by [`init`]. Read by [`Ctx::start`] to tag tasks
24/// so heartbeat-based recovery can find them after a crash.
25static EXECUTOR_ID: RwLock<Option<String>> = RwLock::new(None);
26
27/// Returns the executor ID set by [`init`], or `None` if `init` was not called.
28pub fn executor_id() -> Option<String> {
29    EXECUTOR_ID.read().ok().and_then(|g| g.clone())
30}
31
32// ── Workflow auto-registration ──────────────────────────────────
33
34/// A compiled-in registration of a workflow function for automatic crash recovery.
35///
36/// Produced by `#[durable::workflow]` and collected at link time by `inventory`.
37/// Only workflows with a single `ctx: Ctx` parameter are registered.
38pub struct WorkflowRegistration {
39    /// The workflow name — must match the `name` passed to `Ctx::start`.
40    pub name: &'static str,
41    /// Resumes the workflow given a `Ctx`. Return type is erased; during
42    /// recovery we only care about driving the workflow to completion.
43    pub resume_fn: fn(Ctx) -> Pin<Box<dyn Future<Output = Result<(), DurableError>> + Send>>,
44}
45
46inventory::collect!(WorkflowRegistration);
47
48/// Look up a registered workflow by name.
49pub fn find_workflow(name: &str) -> Option<&'static WorkflowRegistration> {
50    inventory::iter::<WorkflowRegistration>().find(|r| r.name == name)
51}
52
53// ── Initialization ──────────────────────────────────────────────
54
55/// Initialize durable: connect to Postgres, run migrations, start heartbeat,
56/// recover stale tasks, and auto-resume registered workflows.
57///
58/// After this call, [`Ctx::start`] automatically tags tasks with the executor
59/// ID for crash recovery. Recovered root workflows that have a matching
60/// `#[durable::workflow]` registration are automatically spawned as tokio
61/// tasks.
62///
63/// Uses [`HeartbeatConfig::default()`] (60 s heartbeat, 180 s staleness).
64/// For custom intervals use [`init_with_config`].
65///
66/// ```ignore
67/// let (db, recovered) = durable::init("postgres://localhost/mydb").await?;
68/// ```
69pub async fn init(
70    database_url: &str,
71) -> Result<(DatabaseConnection, Vec<RecoveredTask>), DurableError> {
72    init_with_config(database_url, HeartbeatConfig::default()).await
73}
74
75/// Like [`init`] but with a custom [`HeartbeatConfig`].
76pub async fn init_with_config(
77    database_url: &str,
78    config: HeartbeatConfig,
79) -> Result<(DatabaseConnection, Vec<RecoveredTask>), DurableError> {
80    let mut opt = ConnectOptions::new(database_url);
81    opt.set_schema_search_path("public,durable");
82    let db = Database::connect(opt).await?;
83
84    // Run migrations
85    durable_db::Migrator::up(&db, None).await?;
86
87    // Create executor with a unique ID for this process
88    let eid = format!("exec-{}-{}", std::process::id(), uuid::Uuid::new_v4());
89    if let Ok(mut guard) = EXECUTOR_ID.write() {
90        *guard = Some(eid.clone());
91    }
92    let executor = Executor::new(db.clone(), eid);
93
94    // Write initial heartbeat so other workers know we're alive
95    executor.heartbeat().await?;
96
97    let mut all_recovered = Vec::new();
98
99    // Recover stale tasks: timeout/deadline-based
100    let recovered = executor.recover().await?;
101    if !recovered.is_empty() {
102        tracing::info!(
103            "recovered {} stale tasks (timeout/deadline)",
104            recovered.len()
105        );
106    }
107    all_recovered.extend(recovered);
108
109    // Recover stale tasks: heartbeat-based (dead workers, unknown executors, orphaned NULL)
110    let recovered = executor
111        .recover_stale_tasks(config.staleness_threshold)
112        .await?;
113    if !recovered.is_empty() {
114        tracing::info!(
115            "recovered {} stale tasks from dead/unknown workers",
116            recovered.len()
117        );
118    }
119    all_recovered.extend(recovered);
120
121    // Set recovered tasks back to RUNNING and auto-dispatch registered workflows
122    dispatch_recovered(&db, &all_recovered);
123
124    // Start background heartbeat loop
125    executor.start_heartbeat(&config);
126
127    // Start recovery loop that also auto-dispatches
128    start_recovery_dispatch_loop(
129        db.clone(),
130        executor.executor_id().to_string(),
131        config.staleness_threshold,
132    );
133
134    tracing::info!("durable initialized (executor={})", executor.executor_id());
135    Ok((db, all_recovered))
136}
137
138/// Spawn registered workflow functions for recovered root tasks.
139/// Tasks are already RUNNING (claimed atomically by the recovery SQL).
140fn dispatch_recovered(db: &DatabaseConnection, recovered: &[RecoveredTask]) {
141    for task in recovered {
142        // Only auto-dispatch root workflows (children are driven by their parent)
143        if task.parent_id.is_some() {
144            continue;
145        }
146
147        if let Some(reg) = find_workflow(&task.name) {
148            let db_inner = db.clone();
149            let task_id = task.id;
150            let task_name = task.name.clone();
151            let resume = reg.resume_fn;
152            tokio::spawn(async move {
153                tracing::info!(
154                    workflow = %task_name,
155                    id = %task_id,
156                    "auto-resuming recovered workflow"
157                );
158                match Ctx::from_id(&db_inner, task_id).await {
159                    Ok(ctx) => {
160                        if let Err(e) = (resume)(ctx).await {
161                            tracing::error!(
162                                workflow = %task_name,
163                                id = %task_id,
164                                error = %e,
165                                "recovered workflow failed"
166                            );
167                        }
168                    }
169                    Err(e) => {
170                        tracing::error!(
171                            workflow = %task_name,
172                            id = %task_id,
173                            error = %e,
174                            "failed to attach to recovered workflow"
175                        );
176                    }
177                }
178            });
179        } else {
180            tracing::warn!(
181                workflow = %task.name,
182                id = %task.id,
183                "no registered handler for recovered task — use Ctx::from_id() to resume manually"
184            );
185        }
186    }
187}
188
189/// Spawn a background loop that recovers stale tasks AND auto-dispatches them.
190fn start_recovery_dispatch_loop(
191    db: DatabaseConnection,
192    executor_id: String,
193    staleness_threshold: std::time::Duration,
194) {
195    tokio::spawn(async move {
196        let executor = Executor::new(db.clone(), executor_id);
197        let mut ticker = tokio::time::interval(staleness_threshold);
198        loop {
199            ticker.tick().await;
200
201            // Timeout-based recovery
202            match executor.recover().await {
203                Ok(ref recovered) if !recovered.is_empty() => {
204                    tracing::info!(
205                        "recovered {} stale tasks (timeout/deadline)",
206                        recovered.len()
207                    );
208                    dispatch_recovered(&db, recovered);
209                }
210                Err(e) => tracing::warn!("timeout recovery failed: {e}"),
211                _ => {}
212            }
213
214            // Heartbeat-based recovery
215            match executor.recover_stale_tasks(staleness_threshold).await {
216                Ok(ref recovered) if !recovered.is_empty() => {
217                    tracing::info!(
218                        "recovered {} stale tasks from dead workers",
219                        recovered.len()
220                    );
221                    dispatch_recovered(&db, recovered);
222                }
223                Err(e) => tracing::warn!("heartbeat recovery failed: {e}"),
224                _ => {}
225            }
226        }
227    });
228}
229
230/// Initialize durable: connect to Postgres and run migrations only.
231///
232/// Does **not** start heartbeat or recovery loops, and does **not** set the
233/// global executor ID. Use this for tests, migrations-only scripts, or when
234/// you manage the [`Executor`] yourself.
235///
236/// ```ignore
237/// let db = durable::init_db("postgres://localhost/mydb").await?;
238/// ```
239pub async fn init_db(database_url: &str) -> Result<DatabaseConnection, DurableError> {
240    let mut opt = ConnectOptions::new(database_url);
241    opt.set_schema_search_path("public,durable");
242    let db = Database::connect(opt).await?;
243    durable_db::Migrator::up(&db, None).await?;
244    tracing::info!("durable initialized (db only)");
245    Ok(db)
246}