turul_a2a/storage/error.rs
1use turul_a2a_types::{A2aTypeError, TaskState};
2
3#[derive(Debug, thiserror::Error)]
4#[non_exhaustive]
5pub enum A2aStorageError {
6 #[error("Task not found: {0}")]
7 TaskNotFound(String),
8
9 #[error("Invalid state transition: {current:?} -> {requested:?}")]
10 InvalidTransition {
11 current: TaskState,
12 requested: TaskState,
13 },
14
15 #[error("Task is in terminal state: {0:?}")]
16 TerminalState(TaskState),
17
18 /// The atomic store's single-terminal-writer invariant rejected a
19 /// terminal write because the task was already in a terminal state
20 /// at the time of the CAS. Distinct from [`Self::TerminalState`]
21 /// which is the raw state-machine signal: `TerminalStateAlreadySet`
22 /// is specifically the "you lost the race" signal emitted by
23 /// [`crate::storage::A2aAtomicStore::update_task_status_with_events`].
24 /// Callers typically translate this to HTTP 409 `TaskNotCancelable` /
25 /// JSON-RPC `-32002` on the wire, and to `EventSink::closed` semantics
26 /// when the caller is an executor sink.
27 ///
28 /// The `current_state` field carries the wire enum name (e.g.
29 /// `"TASK_STATE_COMPLETED"`) so log/telemetry consumers can see
30 /// exactly which terminal won the race.
31 #[error("task {task_id} already in terminal state {current_state} (CAS loser)")]
32 TerminalStateAlreadySet {
33 task_id: String,
34 current_state: String,
35 },
36
37 #[error("Owner mismatch for task: {task_id}")]
38 OwnerMismatch { task_id: String },
39
40 #[error("Tenant mismatch for task: {task_id}")]
41 TenantMismatch { task_id: String },
42
43 #[error("Concurrent modification: {0}")]
44 ConcurrentModification(String),
45
46 #[error("Push notification config not found: {0}")]
47 PushConfigNotFound(String),
48
49 /// The push delivery claim for a `(tenant, task_id, event_sequence,
50 /// config_id)` tuple is already held by another instance whose claim
51 /// has not yet expired, OR the tuple has already reached a terminal
52 /// outcome (`Succeeded`, `GaveUp`, `Abandoned`) and cannot be
53 /// re-claimed regardless of expiry.
54 ///
55 /// Returned only by
56 /// [`crate::push::A2aPushDeliveryStore::claim_delivery`]. Callers
57 /// treat this as "skip delivery on this instance" โ the event is
58 /// already being (or has already been) handled.
59 #[error(
60 "push delivery claim already held: tenant={tenant} task_id={task_id} \
61 event_sequence={event_sequence} config_id={config_id}"
62 )]
63 ClaimAlreadyHeld {
64 tenant: String,
65 task_id: String,
66 event_sequence: u64,
67 config_id: String,
68 },
69
70 /// The claim identity passed to
71 /// [`crate::push::A2aPushDeliveryStore::record_delivery_outcome`]
72 /// does not match the currently-stored claim for this tuple.
73 /// Two causes: the claim expired and another instance re-claimed
74 /// (generation advanced), or the same instance's prior process
75 /// died and the restarted process holds a different `claimant`
76 /// identifier. Either way, the stale caller's outcome is
77 /// dropped so it cannot overwrite a terminal state committed by
78 /// the current claimant.
79 ///
80 /// Workers that receive this error MUST abort their retry loop
81 /// for the affected tuple โ the current claimant (or whoever
82 /// re-claims next) owns the remaining lifecycle.
83 #[error(
84 "stale push delivery claim: tenant={tenant} task_id={task_id} \
85 event_sequence={event_sequence} config_id={config_id} โ recorded \
86 outcome dropped because the claim was re-acquired by another \
87 claimant or generation"
88 )]
89 StaleDeliveryClaim {
90 tenant: String,
91 task_id: String,
92 event_sequence: u64,
93 config_id: String,
94 },
95
96 /// / ยง6.4: `create_config` exhausted its bounded
97 /// retry budget (default 5 attempts with 10/50/250/1000 ms
98 /// backoff) while its CAS against `a2a_tasks.latest_event_sequence`
99 /// kept losing to concurrent event commits. The operator should
100 /// retry the create from the handler; in practice this surfaces
101 /// only under pathological event-burst workloads against a single
102 /// task.
103 #[error(
104 "create_config CAS exhausted retries for tenant={tenant} task_id={task_id}: \
105 concurrent event commits kept advancing latest_event_sequence"
106 )]
107 CreateConfigCasTimeout { tenant: String, task_id: String },
108
109 #[error("Database error: {0}")]
110 DatabaseError(String),
111
112 #[error("Serialization error: {0}")]
113 SerializationError(String),
114
115 #[error("Type error: {0}")]
116 TypeError(#[from] A2aTypeError),
117
118 #[error("Generic storage error: {0}")]
119 Generic(String),
120}