#[cfg(test)]
mod tests {
use crate::config::mapreduce::parse_mapreduce_workflow;
use crate::cook::workflow::{ExtendedWorkflowConfig, WorkflowMode};
use tempfile::TempDir;
#[tokio::test]
async fn test_setup_phase_execution() {
let yaml = r#"
name: test-mapreduce
mode: mapreduce
setup:
- shell: "echo 'setup1' > setup1.txt"
- shell: 'echo ''[{{"id": 1}}, {{"id": 2}}]'' > items.json'
map:
input: items.json
json_path: "$[*]"
max_parallel: 2
agent_template:
commands:
- shell: "echo 'processing item'"
reduce:
commands:
- shell: "echo 'reduce done'"
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
assert_eq!(config.name, "test-mapreduce");
assert!(config.setup.is_some());
assert_eq!(config.setup.as_ref().unwrap().commands.len(), 2);
}
#[tokio::test]
async fn test_empty_items_handling() {
let yaml = r#"
name: test-empty
mode: mapreduce
setup:
- shell: "echo '[]' > empty.json"
map:
input: empty.json
json_path: "$[*]"
max_parallel: 2
agent_template:
commands:
- shell: "echo 'should not run'"
reduce:
commands:
- shell: "echo 'should not run either'"
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
assert!(config.setup.is_some());
}
#[tokio::test]
async fn test_reduce_phase_variable_substitution() {
let yaml = r#"
name: test-variable-substitution
mode: mapreduce
map:
input: test-items.json
json_path: "$[*]"
max_parallel: 2
agent_template:
commands:
- shell: "echo 'Processing ${item.id}'"
reduce:
commands:
- shell: "echo 'Total: ${map.total}, Success: ${map.successful}, Failed: ${map.failed}'"
- shell: |
git commit -m "Processed ${map.successful} items
Total items: ${map.total}
Failed items: ${map.failed}"
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
assert!(config.reduce.is_some());
let reduce = config.reduce.as_ref().unwrap();
assert_eq!(reduce.commands.len(), 2);
let first_cmd = &reduce.commands[0];
assert!(first_cmd.shell.is_some());
let shell_cmd = first_cmd.shell.as_ref().unwrap();
assert!(shell_cmd.contains("${map.total}"));
assert!(shell_cmd.contains("${map.successful}"));
assert!(shell_cmd.contains("${map.failed}"));
let second_cmd = &reduce.commands[1];
assert!(second_cmd.shell.is_some());
let commit_cmd = second_cmd.shell.as_ref().unwrap();
assert!(commit_cmd.contains("${map.successful}"));
assert!(commit_cmd.contains("${map.total}"));
assert!(commit_cmd.contains("${map.failed}"));
}
#[tokio::test]
async fn test_mapreduce_variable_interpolation_integration() {
use crate::cook::execution::interpolation::InterpolationContext;
use crate::cook::execution::interpolation::InterpolationEngine;
use serde_json::json;
let map_results = json!([
{
"item_id": "file1.rs",
"status": "Success",
"output": "Refactored file1.rs successfully",
"commits": ["abc123"]
},
{
"item_id": "file2.rs",
"status": "Success",
"output": "Refactored file2.rs successfully",
"commits": ["def456"]
},
{
"item_id": "file3.rs",
"status": "Failed",
"output": null,
"commits": []
}
]);
let mut context = InterpolationContext::new();
context.set(
"map",
json!({
"successful": 2,
"failed": 1,
"total": 3
}),
);
context.set("map.results", map_results);
let mut engine = InterpolationEngine::new(false);
let shell_template = "echo 'Processed ${map.total} items: ${map.successful} successful, ${map.failed} failed'";
let shell_result = engine.interpolate(shell_template, &context).unwrap();
assert_eq!(
shell_result,
"echo 'Processed 3 items: 2 successful, 1 failed'"
);
let commit_template = r#"git commit -m "Refactoring complete
Processed ${map.successful} of ${map.total} files successfully
${map.failed} files failed processing""#;
let commit_result = engine.interpolate(commit_template, &context).unwrap();
assert!(commit_result.contains("Processed 2 of 3 files successfully"));
assert!(commit_result.contains("1 files failed processing"));
let claude_template = "claude: Summarize the refactoring results: ${map.successful} successful, ${map.failed} failed out of ${map.total} total files";
let claude_result = engine.interpolate(claude_template, &context).unwrap();
assert_eq!(
claude_result,
"claude: Summarize the refactoring results: 2 successful, 1 failed out of 3 total files"
);
let unbraced_template = "Total files: $map.total, Success rate: ${map.successful}";
let unbraced_result = engine.interpolate(unbraced_template, &context);
assert!(unbraced_result.is_ok() || unbraced_result.is_err()); }
#[tokio::test]
async fn test_debtmap_workflow_parsing() {
let yaml = r#"
name: debtmap-parallel-elimination
mode: mapreduce
# Setup phase: Analyze the codebase and generate debt items
setup:
- shell: "just coverage-lcov"
- shell: "debtmap analyze . --lcov target/coverage/info.lcov --output debtmap.json --format json && git add debtmap.json && git commit -m 'Add debtmap.json'"
commit_required: true
# Map phase: Process each debt item in parallel
map:
input: debtmap.json
json_path: "$.items[*]"
agent_template:
commands:
- claude: "/fix-debt-item --file ${item.location.file}"
capture_output: true
timeout: 300
- shell: "just test"
on_failure:
claude: "/prodigy-debug-test-failure --output '${shell.output}'"
max_attempts: 2
fail_workflow: false
max_parallel: 5
filter: "unified_score.final_score >= 5"
sort_by: "unified_score.final_score DESC"
max_items: 10
# Reduce phase: Aggregate results and finalize
reduce:
commands:
- shell: "just test"
on_failure:
claude: "/prodigy-debug-test-failure --output '${shell.output}'"
max_attempts: 3
fail_workflow: true
- shell: "just fmt && just lint"
capture_output: None
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
assert!(config.setup.is_some());
let setup = config.setup.as_ref().unwrap();
assert_eq!(setup.commands.len(), 2);
assert_eq!(
setup.commands[0].shell,
Some("just coverage-lcov".to_string())
);
assert!(setup.commands[1].commit_required);
assert_eq!(config.map.input, "debtmap.json");
assert_eq!(config.map.json_path, "$.items[*]");
assert_eq!(config.map.max_parallel, "5");
assert_eq!(config.map.max_items, Some(10));
assert_eq!(
config.map.filter,
Some("unified_score.final_score >= 5".to_string())
);
assert_eq!(
config.map.sort_by,
Some("unified_score.final_score DESC".to_string())
);
assert!(config.reduce.is_some());
let reduce = config.reduce.as_ref().unwrap();
assert_eq!(reduce.commands.len(), 2);
}
#[tokio::test]
async fn test_skip_reduce_on_no_success() {
let yaml = r#"
name: test-skip-reduce
mode: mapreduce
setup:
- shell: 'echo ''[{{"id": 1}}]'' > items.json'
map:
input: items.json
json_path: "$[*]"
max_parallel: 1
agent_template:
commands:
- shell: "exit 1" # Fail on purpose
reduce:
commands:
- shell: "echo 'should be skipped'"
"#;
let _config = parse_mapreduce_workflow(yaml).unwrap();
}
#[tokio::test]
async fn test_map_variable_interpolation() {
let yaml = r#"
name: test-interpolation
mode: mapreduce
map:
input: items.json
json_path: "$[*]"
max_parallel: 1
agent_template:
commands:
- shell: "echo 'Processing ${item.id} with score ${item.score}'"
- claude: "/process --file ${item.file} --line ${item.line}"
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
let commands = &config.map.agent_template.commands;
assert!(commands[0].shell.as_ref().unwrap().contains("${item.id}"));
assert!(commands[1]
.claude
.as_ref()
.unwrap()
.contains("${item.file}"));
}
#[tokio::test]
async fn test_command_timeout() {
let yaml = r#"
name: test-timeout
mode: mapreduce
map:
input: test.json
agent_template:
commands:
- shell: "echo test"
timeout: 300
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
assert_eq!(config.map.agent_template.commands[0].timeout, Some(300));
}
#[tokio::test]
async fn test_extended_workflow_conversion() {
let yaml = r#"
name: test-conversion
mode: mapreduce
setup:
- shell: "echo setup"
map:
input: items.json
json_path: "$[*]"
max_parallel: 3
agent_template:
commands:
- shell: "echo map"
reduce:
commands:
- shell: "echo reduce"
"#;
let mapreduce_config = parse_mapreduce_workflow(yaml).unwrap();
let setup_steps = mapreduce_config
.setup
.as_ref()
.map(|setup| setup.commands.clone())
.unwrap_or_default();
let extended_workflow = ExtendedWorkflowConfig {
name: mapreduce_config.name.clone(),
mode: WorkflowMode::MapReduce,
steps: setup_steps,
setup_phase: mapreduce_config.to_setup_phase().unwrap(),
map_phase: Some(mapreduce_config.to_map_phase().unwrap()),
reduce_phase: mapreduce_config.to_reduce_phase(),
max_iterations: 1,
iterate: false,
retry_defaults: None,
environment: None,
};
assert_eq!(extended_workflow.name, "test-conversion");
assert_eq!(extended_workflow.mode, WorkflowMode::MapReduce);
assert_eq!(extended_workflow.steps.len(), 1); assert!(extended_workflow.map_phase.is_some());
assert!(extended_workflow.reduce_phase.is_some());
}
#[tokio::test]
async fn test_setup_runs_in_main_worktree() {
let temp_dir = TempDir::new().unwrap();
let test_file = temp_dir.path().join("setup_marker.txt");
let yaml = format!(
r#"
name: test-setup-location
mode: mapreduce
setup:
- shell: "echo 'setup complete' > {}"
- shell: 'echo ''[{{"id": 1}}]'' > items.json'
map:
input: items.json
json_path: "$[*]"
max_parallel: 1
agent_template:
commands:
- shell: "echo 'map phase'"
"#,
test_file.display()
);
let _config = parse_mapreduce_workflow(&yaml).unwrap();
}
#[tokio::test]
async fn test_missing_input_file_error() {
let yaml = r#"
name: test-missing-input
mode: mapreduce
# No setup phase to create the file
map:
input: nonexistent.json
json_path: "$[*]"
max_parallel: 1
agent_template:
commands:
- shell: "echo test"
"#;
let _config = parse_mapreduce_workflow(yaml).unwrap();
}
#[tokio::test]
async fn test_map_on_failure_handlers() {
let yaml = r#"
name: test-on-failure
mode: mapreduce
setup:
- shell: 'echo ''[{{"id": 1}}]'' > items.json'
map:
input: items.json
json_path: "$[*]"
max_parallel: 1
agent_template:
commands:
- shell: "exit 1" # Fail intentionally
on_failure:
claude: "/fix-error --output '${shell.output}'"
max_attempts: 2
fail_workflow: false
"#;
let _config = parse_mapreduce_workflow(yaml).unwrap();
}
#[tokio::test]
async fn test_reduce_phase_on_failure_with_retries() {
let yaml = r#"
name: test-reduce-retry
mode: mapreduce
setup:
- shell: 'echo ''[{"id": 1}, {"id": 2}]'' > items.json'
map:
input: items.json
json_path: "$[*]"
agent_template:
commands:
- shell: "echo 'Processing item ${item.id}'"
reduce:
commands:
- shell: "exit 1" # Fail intentionally to trigger retry
on_failure:
claude: "/fix-error"
max_attempts: 3 # Should retry up to 3 times
fail_workflow: false
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
assert!(config.reduce.is_some());
let reduce = config.reduce.unwrap();
assert_eq!(reduce.commands.len(), 1);
let step = &reduce.commands[0];
assert!(step.on_failure.is_some());
let on_failure = step.on_failure.as_ref().unwrap();
assert_eq!(on_failure.max_retries(), 3);
assert!(on_failure.should_retry());
assert!(!on_failure.should_fail_workflow());
}
#[tokio::test]
async fn test_reduce_phase_implicit_retry() {
use crate::cook::workflow::OnFailureConfig;
let yaml_config = r#"
claude: "/debug-test"
max_attempts: 2
fail_workflow: true
"#;
let on_failure: OnFailureConfig = serde_yaml::from_str(yaml_config).unwrap();
assert!(on_failure.should_retry());
assert_eq!(on_failure.max_retries(), 2);
assert!(on_failure.should_fail_workflow());
assert!(on_failure.handler().is_some());
}
#[tokio::test]
async fn test_reduce_phase_retry_exhaustion() {
let yaml = r#"
name: test-reduce-exhaustion
mode: mapreduce
map:
input: items.json
json_path: "$[*]"
agent_template:
commands:
- shell: "echo 'ok'"
reduce:
commands:
- shell: "false" # Always fails
on_failure:
shell: "echo 'Attempting recovery'"
max_attempts: 2
fail_workflow: true # Should fail workflow after retries exhausted
"#;
let config = parse_mapreduce_workflow(yaml).unwrap();
let reduce = config.reduce.unwrap();
let step = &reduce.commands[0];
let on_failure = step.on_failure.as_ref().unwrap();
assert_eq!(on_failure.max_retries(), 2);
assert!(on_failure.should_retry());
assert!(on_failure.should_fail_workflow());
}
#[tokio::test]
async fn test_reduce_phase_no_retry() {
use crate::cook::workflow::OnFailureConfig;
let yaml_config = r#"
claude: "/debug-test"
max_attempts: 0
fail_workflow: false
"#;
let on_failure: OnFailureConfig = serde_yaml::from_str(yaml_config).unwrap();
assert!(!on_failure.should_retry());
assert_eq!(on_failure.max_retries(), 0);
assert!(!on_failure.should_fail_workflow());
}
}