1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
use chrono::{DateTime, Utc};
use serde::Serialize;
use sha2::{Digest, Sha256};
use crate::db::{ConnExt, ConnQueryExt, Database};
use crate::error::{Error, Result};
/// Result of an idempotent enqueue operation.
///
/// Returned by [`Enqueuer::enqueue_unique`] and
/// [`Enqueuer::enqueue_unique_with`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum EnqueueResult {
/// A new job was inserted; contains its ID.
Created(String),
/// A job with the same name and payload is already pending or running;
/// contains the ID of the existing job.
Duplicate(String),
}
/// Options for customising a job enqueue operation.
#[derive(Clone)]
pub struct EnqueueOptions {
/// Name of the queue to place the job in. Defaults to `"default"`.
pub queue: String,
/// When to make the job eligible for execution. Defaults to now (immediate).
pub run_at: Option<DateTime<Utc>>,
}
impl Default for EnqueueOptions {
fn default() -> Self {
Self {
queue: "default".to_string(),
run_at: None,
}
}
}
/// Enqueues jobs into the `jobs` SQLite table.
///
/// Constructed via [`Enqueuer::new`]. Cheaply cloneable — the underlying
/// database handle is `Arc`-wrapped.
#[derive(Clone)]
pub struct Enqueuer {
db: Database,
}
impl Enqueuer {
/// Create a new `Enqueuer` using the given database handle.
pub fn new(db: Database) -> Self {
Self { db }
}
/// Enqueue a job on the default queue for immediate execution.
///
/// Returns the new job's ID on success.
///
/// # Errors
///
/// Returns an error if the payload cannot be serialized to JSON or if the
/// database insert fails.
pub async fn enqueue<T: Serialize>(&self, name: &str, payload: &T) -> Result<String> {
self.enqueue_with(name, payload, EnqueueOptions::default())
.await
}
/// Enqueue a job on the default queue to run at a specific time.
///
/// Returns the new job's ID on success.
///
/// # Errors
///
/// Returns an error if the payload cannot be serialized to JSON or if the
/// database insert fails.
pub async fn enqueue_at<T: Serialize>(
&self,
name: &str,
payload: &T,
run_at: DateTime<Utc>,
) -> Result<String> {
self.enqueue_with(
name,
payload,
EnqueueOptions {
run_at: Some(run_at),
..Default::default()
},
)
.await
}
/// Enqueue a job with full control over queue and schedule.
///
/// Returns the new job's ID on success.
///
/// # Errors
///
/// Returns an error if the payload cannot be serialized to JSON or if the
/// database insert fails.
pub async fn enqueue_with<T: Serialize>(
&self,
name: &str,
payload: &T,
options: EnqueueOptions,
) -> Result<String> {
let id = crate::id::ulid();
let payload_json = serde_json::to_string(payload)
.map_err(|e| Error::internal(format!("serialize job payload: {e}")))?;
let now = Utc::now();
let run_at = options.run_at.unwrap_or(now);
let now_str = now.to_rfc3339();
let run_at_str = run_at.to_rfc3339();
self.db
.conn()
.execute_raw(
"INSERT INTO jobs (id, name, queue, payload, status, attempt, run_at, created_at, updated_at) \
VALUES (?1, ?2, ?3, ?4, 'pending', 0, ?5, ?6, ?7)",
libsql::params![id.as_str(), name, options.queue.as_str(), payload_json.as_str(), run_at_str.as_str(), now_str.as_str(), now_str.as_str()],
)
.await
.map_err(|e| Error::internal(format!("enqueue job: {e}")))?;
Ok(id)
}
/// Enqueue a job only if no pending or running job with the same name and
/// payload already exists (idempotent enqueue on the default queue).
///
/// The uniqueness key is a SHA-256 hash of `name + "\0" + payload_json`.
///
/// # Errors
///
/// Returns an error if the payload cannot be serialized to JSON or if a
/// database operation fails (other than the expected unique-constraint
/// violation).
pub async fn enqueue_unique<T: Serialize>(
&self,
name: &str,
payload: &T,
) -> Result<EnqueueResult> {
self.enqueue_unique_with(name, payload, EnqueueOptions::default())
.await
}
/// Enqueue a job only if no pending or running job with the same name and
/// payload already exists, with full queue and schedule options.
///
/// The uniqueness key is a SHA-256 hash of `name + "\0" + payload_json`.
///
/// # Errors
///
/// Returns an error if the payload cannot be serialized to JSON or if a
/// database operation fails (other than the expected unique-constraint
/// violation).
pub async fn enqueue_unique_with<T: Serialize>(
&self,
name: &str,
payload: &T,
options: EnqueueOptions,
) -> Result<EnqueueResult> {
let payload_json = serde_json::to_string(payload)
.map_err(|e| Error::internal(format!("serialize job payload: {e}")))?;
let hash = compute_payload_hash(name, &payload_json);
let id = crate::id::ulid();
let now = Utc::now();
let run_at = options.run_at.unwrap_or(now);
let now_str = now.to_rfc3339();
let run_at_str = run_at.to_rfc3339();
match self
.db
.conn()
.execute_raw(
"INSERT INTO jobs (id, name, queue, payload, payload_hash, status, attempt, run_at, created_at, updated_at) \
VALUES (?1, ?2, ?3, ?4, ?5, 'pending', 0, ?6, ?7, ?8)",
libsql::params![id.as_str(), name, options.queue.as_str(), payload_json.as_str(), hash.as_str(), run_at_str.as_str(), now_str.as_str(), now_str.as_str()],
)
.await
{
Ok(_) => Ok(EnqueueResult::Created(id)),
Err(ref e) if is_unique_violation(e) => {
let existing_id: String = self
.db
.conn()
.query_one_map(
"SELECT id FROM jobs WHERE payload_hash = ?1 AND status IN ('pending', 'running') LIMIT 1",
libsql::params![hash.as_str()],
|row| {
use crate::db::FromValue;
let val = row.get_value(0).map_err(crate::Error::from)?;
String::from_value(val)
},
)
.await
.map_err(|e| Error::internal(format!("fetch duplicate job id: {e}")))?;
Ok(EnqueueResult::Duplicate(existing_id))
}
Err(e) => Err(Error::internal(format!("enqueue unique job: {e}"))),
}
}
/// Cancel a pending job by ID.
///
/// Returns `true` if the job was found and cancelled, `false` if it was
/// not found or was already past the `pending` state.
///
/// # Errors
///
/// Returns an error if the database update fails.
pub async fn cancel(&self, id: &str) -> Result<bool> {
let now_str = Utc::now().to_rfc3339();
let affected = self
.db
.conn()
.execute_raw(
"UPDATE jobs SET status = 'cancelled', updated_at = ?1 WHERE id = ?2 AND status = 'pending'",
libsql::params![now_str.as_str(), id],
)
.await
.map_err(|e| Error::internal(format!("cancel job: {e}")))?;
Ok(affected > 0)
}
}
/// Check if a libsql error is a unique constraint violation.
fn is_unique_violation(err: &libsql::Error) -> bool {
matches!(err, libsql::Error::SqliteFailure(2067 | 1555, _))
}
fn compute_payload_hash(name: &str, payload_json: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(name.as_bytes());
hasher.update(b"\0");
hasher.update(payload_json.as_bytes());
crate::encoding::hex::encode(&hasher.finalize())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn payload_hash_is_deterministic() {
let h1 = compute_payload_hash("test", r#"{"a":1}"#);
let h2 = compute_payload_hash("test", r#"{"a":1}"#);
assert_eq!(h1, h2);
}
#[test]
fn payload_hash_differs_by_name() {
let h1 = compute_payload_hash("job_a", r#"{"a":1}"#);
let h2 = compute_payload_hash("job_b", r#"{"a":1}"#);
assert_ne!(h1, h2);
}
#[test]
fn payload_hash_differs_by_payload() {
let h1 = compute_payload_hash("test", r#"{"a":1}"#);
let h2 = compute_payload_hash("test", r#"{"a":2}"#);
assert_ne!(h1, h2);
}
#[test]
fn payload_hash_no_boundary_collision() {
let h1 = compute_payload_hash("ab", "c");
let h2 = compute_payload_hash("a", "bc");
assert_ne!(h1, h2);
}
}