ralph/queue/operations/batch/update.rs
1//! Batch update operations for tasks.
2//!
3//! Responsibilities:
4//! - Batch set status for multiple tasks.
5//! - Batch set custom fields for multiple tasks.
6//! - Batch apply task edits for multiple tasks.
7//!
8//! Does not handle:
9//! - Task creation or deletion (see generate.rs and delete.rs).
10//! - Task filtering/selection (see filters.rs).
11//!
12//! Assumptions/invariants:
13//! - All operations support atomic mode (fail on first error) or continue-on-error mode.
14//! - Task IDs are deduplicated before processing.
15
16use crate::contracts::{QueueFile, TaskStatus};
17use crate::queue;
18use crate::queue::TaskEditKey;
19use anyhow::Result;
20
21use super::{
22 BatchOperationResult, BatchResultCollector, preprocess_batch_ids, validate_task_ids_exist,
23};
24
25/// Batch set status for multiple tasks.
26///
27/// # Arguments
28/// * `queue` - The queue file to modify
29/// * `task_ids` - List of task IDs to update
30/// * `status` - The new status to set
31/// * `now_rfc3339` - Current timestamp for updated_at
32/// * `note` - Optional note to append to each task
33/// * `continue_on_error` - If true, continue processing on individual failures
34///
35/// # Returns
36/// A `BatchOperationResult` with details of successes and failures.
37pub fn batch_set_status(
38 queue: &mut QueueFile,
39 task_ids: &[String],
40 status: TaskStatus,
41 now_rfc3339: &str,
42 note: Option<&str>,
43 continue_on_error: bool,
44) -> Result<BatchOperationResult> {
45 let unique_ids = preprocess_batch_ids(task_ids, "status update")?;
46
47 // In atomic mode, validate all IDs exist first
48 if !continue_on_error {
49 validate_task_ids_exist(queue, &unique_ids)?;
50 }
51
52 let mut collector =
53 BatchResultCollector::new(unique_ids.len(), continue_on_error, "status update");
54
55 for task_id in &unique_ids {
56 match queue::set_status(queue, task_id, status, now_rfc3339, note) {
57 Ok(()) => {
58 collector.record_success(task_id.clone(), Vec::new());
59 }
60 Err(e) => {
61 let error_msg = e.to_string();
62 collector.record_failure(task_id.clone(), error_msg)?;
63 }
64 }
65 }
66
67 Ok(collector.finish())
68}
69
70/// Batch set custom field for multiple tasks.
71///
72/// # Arguments
73/// * `queue` - The queue file to modify
74/// * `task_ids` - List of task IDs to update
75/// * `key` - The custom field key
76/// * `value` - The custom field value
77/// * `now_rfc3339` - Current timestamp for updated_at
78/// * `continue_on_error` - If true, continue processing on individual failures
79///
80/// # Returns
81/// A `BatchOperationResult` with details of successes and failures.
82pub fn batch_set_field(
83 queue: &mut QueueFile,
84 task_ids: &[String],
85 key: &str,
86 value: &str,
87 now_rfc3339: &str,
88 continue_on_error: bool,
89) -> Result<BatchOperationResult> {
90 let unique_ids = preprocess_batch_ids(task_ids, "field update")?;
91
92 // In atomic mode, validate all IDs exist first
93 if !continue_on_error {
94 validate_task_ids_exist(queue, &unique_ids)?;
95 }
96
97 let mut collector =
98 BatchResultCollector::new(unique_ids.len(), continue_on_error, "field update");
99
100 for task_id in &unique_ids {
101 match queue::set_field(queue, task_id, key, value, now_rfc3339) {
102 Ok(()) => {
103 collector.record_success(task_id.clone(), Vec::new());
104 }
105 Err(e) => {
106 let error_msg = e.to_string();
107 collector.record_failure(task_id.clone(), error_msg)?;
108 }
109 }
110 }
111
112 Ok(collector.finish())
113}
114
115/// Batch edit field for multiple tasks.
116///
117/// # Arguments
118/// * `queue` - The queue file to modify
119/// * `done` - Optional done file for validation
120/// * `task_ids` - List of task IDs to update
121/// * `key` - The field to edit
122/// * `value` - The new value
123/// * `now_rfc3339` - Current timestamp for updated_at
124/// * `id_prefix` - Task ID prefix for validation
125/// * `id_width` - Task ID width for validation
126/// * `max_dependency_depth` - Maximum dependency depth for validation
127/// * `continue_on_error` - If true, continue processing on individual failures
128///
129/// # Returns
130/// A `BatchOperationResult` with details of successes and failures.
131#[allow(clippy::too_many_arguments)]
132pub fn batch_apply_edit(
133 queue: &mut QueueFile,
134 done: Option<&QueueFile>,
135 task_ids: &[String],
136 key: TaskEditKey,
137 value: &str,
138 now_rfc3339: &str,
139 id_prefix: &str,
140 id_width: usize,
141 max_dependency_depth: u8,
142 continue_on_error: bool,
143) -> Result<BatchOperationResult> {
144 let unique_ids = preprocess_batch_ids(task_ids, "edit")?;
145
146 // In atomic mode, validate all IDs exist first
147 if !continue_on_error {
148 validate_task_ids_exist(queue, &unique_ids)?;
149 }
150
151 let mut collector = BatchResultCollector::new(unique_ids.len(), continue_on_error, "edit");
152
153 for task_id in &unique_ids {
154 match queue::apply_task_edit(
155 queue,
156 done,
157 task_id,
158 key,
159 value,
160 now_rfc3339,
161 id_prefix,
162 id_width,
163 max_dependency_depth,
164 ) {
165 Ok(()) => {
166 collector.record_success(task_id.clone(), Vec::new());
167 }
168 Err(e) => {
169 let error_msg = e.to_string();
170 collector.record_failure(task_id.clone(), error_msg)?;
171 }
172 }
173 }
174
175 Ok(collector.finish())
176}