use crate::contracts::{QueueFile, TaskStatus};
use crate::queue;
use crate::queue::operations::{CloneTaskOptions, SplitTaskOptions, suggest_new_task_insert_index};
use anyhow::{Result, bail};
use super::{BatchOperationResult, BatchResultCollector, preprocess_batch_ids};
#[allow(clippy::too_many_arguments)]
pub fn batch_clone_tasks(
queue: &mut QueueFile,
done: Option<&QueueFile>,
task_ids: &[String],
status: TaskStatus,
title_prefix: Option<&str>,
now_rfc3339: &str,
id_prefix: &str,
id_width: usize,
max_dependency_depth: u8,
continue_on_error: bool,
) -> Result<BatchOperationResult> {
let unique_ids = preprocess_batch_ids(task_ids, "clone")?;
if !continue_on_error {
for task_id in &unique_ids {
let exists_in_active = queue.tasks.iter().any(|t| t.id == *task_id);
let exists_in_done = done.is_some_and(|d| d.tasks.iter().any(|t| t.id == *task_id));
if !exists_in_active && !exists_in_done {
bail!(
"{}",
crate::error_messages::source_task_not_found(task_id, true)
);
}
}
}
let original_queue = if !continue_on_error {
Some(queue.clone())
} else {
None
};
let mut collector = BatchResultCollector::new(unique_ids.len(), continue_on_error, "clone");
for task_id in &unique_ids {
let opts = CloneTaskOptions::new(task_id, status, now_rfc3339, id_prefix, id_width)
.with_title_prefix(title_prefix)
.with_max_depth(max_dependency_depth);
match queue::operations::clone_task(queue, done, &opts) {
Ok((new_id, cloned_task)) => {
let insert_idx = suggest_new_task_insert_index(queue);
queue.tasks.insert(insert_idx, cloned_task);
collector.record_success(task_id.clone(), vec![new_id]);
}
Err(e) => {
let error_msg = e.to_string();
if !continue_on_error {
if let Some(ref original) = original_queue {
*queue = original.clone();
}
bail!(
"Batch clone failed at task {}: {}. Use --continue-on-error to process remaining tasks.",
task_id,
error_msg
);
}
collector.record_failure(task_id.clone(), error_msg)?;
}
}
}
Ok(collector.finish())
}
#[allow(clippy::too_many_arguments)]
pub fn batch_split_tasks(
queue: &mut QueueFile,
task_ids: &[String],
number: usize,
status: TaskStatus,
title_prefix: Option<&str>,
distribute_plan: bool,
now_rfc3339: &str,
id_prefix: &str,
id_width: usize,
max_dependency_depth: u8,
continue_on_error: bool,
) -> Result<BatchOperationResult> {
if number < 2 {
bail!("Number of child tasks must be at least 2");
}
let unique_ids = preprocess_batch_ids(task_ids, "split")?;
if !continue_on_error {
for task_id in &unique_ids {
if !queue.tasks.iter().any(|t| t.id == *task_id) {
bail!(
"{}",
crate::error_messages::source_task_not_found(task_id, false)
);
}
}
}
let original_queue = if !continue_on_error {
Some(queue.clone())
} else {
None
};
let mut collector = BatchResultCollector::new(unique_ids.len(), continue_on_error, "split");
for task_id in &unique_ids {
let opts = SplitTaskOptions::new(task_id, number, status, now_rfc3339, id_prefix, id_width)
.with_title_prefix(title_prefix)
.with_distribute_plan(distribute_plan)
.with_max_depth(max_dependency_depth);
match queue::operations::split_task(queue, None, &opts) {
Ok((updated_source, child_tasks)) => {
if let Some(idx) = queue.tasks.iter().position(|t| t.id == *task_id) {
queue.tasks[idx] = updated_source;
let child_ids: Vec<String> = child_tasks.iter().map(|t| t.id.clone()).collect();
for (i, child) in child_tasks.into_iter().enumerate() {
queue.tasks.insert(idx + 1 + i, child);
}
collector.record_success(task_id.clone(), child_ids);
} else {
let err_msg = "Source task disappeared during split".to_string();
if !continue_on_error {
if let Some(ref original) = original_queue {
*queue = original.clone();
}
bail!("{}", err_msg);
}
collector.record_failure(task_id.clone(), err_msg)?;
}
}
Err(e) => {
let error_msg = e.to_string();
if !continue_on_error {
if let Some(ref original) = original_queue {
*queue = original.clone();
}
bail!(
"Batch split failed at task {}: {}. Use --continue-on-error to process remaining tasks.",
task_id,
error_msg
);
}
collector.record_failure(task_id.clone(), error_msg)?;
}
}
}
Ok(collector.finish())
}