mod common;
use common::{
ServerProcess, create_diamond_workflow, run_cli_command, run_jobs_cli_command, start_server,
};
use rstest::rstest;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use torc::client::apis;
use torc::models;
#[rstest]
#[case(None)] #[case(Some(2))] fn test_diamond_workflow(start_server: &ServerProcess, #[case] max_parallel_jobs: Option<i64>) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
let work_dir = temp_dir.path().to_path_buf();
let jobs = create_diamond_workflow(config, false, &work_dir);
let preprocess = jobs.get("preprocess").expect("preprocess job not found");
let workflow_id = preprocess.workflow_id;
create_input_file(&work_dir);
let mut cli_args = vec![
workflow_id.to_string(),
"--output-dir".to_string(),
work_dir.to_str().unwrap().to_string(),
"--poll-interval".to_string(),
"0.1".to_string(),
];
if let Some(max_jobs) = max_parallel_jobs {
cli_args.push("--max-parallel-jobs".to_string());
cli_args.push(max_jobs.to_string());
} else {
cli_args.push("--num-cpus".to_string());
cli_args.push("4".to_string());
cli_args.push("--memory-gb".to_string());
cli_args.push("8.0".to_string());
}
let cli_args_refs: Vec<&str> = cli_args.iter().map(|s| s.as_str()).collect();
run_jobs_cli_command(&cli_args_refs, start_server).expect("Failed to run jobs");
verify_diamond_workflow_completion(config, workflow_id, &work_dir);
let temp_dir2 = tempfile::tempdir().expect("Failed to create temp dir");
let work_dir2 = temp_dir2.path();
let jobs2 = create_diamond_workflow(config, true, work_dir2);
check_diamond_workflow_init_job_statuses(config, &jobs2);
apis::workflows_api::delete_workflow(config, workflow_id).expect("Failed to delete workflow");
for (name, job) in &jobs {
let result = apis::jobs_api::get_job(config, job.id.unwrap());
assert!(
result.is_err(),
"Expected job {} to be deleted with workflow",
name
);
}
check_diamond_workflow_init_job_statuses(config, &jobs2);
}
fn create_input_file(work_dir: &Path) {
let input_data = r#"{"data": "initial input", "value": 42}"#;
fs::write(work_dir.join("f1.json"), input_data).expect("Failed to write f1.json");
}
fn verify_diamond_workflow_completion(
config: &torc::client::Configuration,
workflow_id: i64,
work_dir: &Path,
) {
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None, None, )
.expect("Failed to list jobs");
for job in jobs.items {
assert_eq!(
job.status.unwrap(),
models::JobStatus::Completed,
"Job {} should be completed. actual status: {:?}",
job.name,
job.status
);
}
let results = apis::results_api::list_results(
config,
workflow_id,
None, None, None, None, None, None, None, None, None, None, )
.expect("Failed to list results");
let result_items = results.items;
for result in result_items {
assert_eq!(
result.return_code, 0,
"Job ID {} should have return code 0, but got {}",
result.job_id, result.return_code
);
}
assert!(work_dir.join("f2.json").exists(), "f2.json should exist");
assert!(work_dir.join("f3.json").exists(), "f3.json should exist");
assert!(work_dir.join("f4.json").exists(), "f4.json should exist");
assert!(work_dir.join("f5.json").exists(), "f5.json should exist");
assert!(work_dir.join("f6.json").exists(), "f6.json should exist");
let f6_content = fs::read_to_string(work_dir.join("f6.json")).expect("Failed to read f6.json");
println!("Final output (f6.json): {}", f6_content);
}
#[rstest]
fn test_uninitialize_blocked_jobs(start_server: &ServerProcess) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let name = "test_workflow".to_string();
let user = "test_user".to_string();
let workflow = models::WorkflowModel::new(name.clone(), user.clone());
let created_workflow =
apis::workflows_api::create_workflow(config, workflow).expect("Failed to create workflow");
let workflow_id = created_workflow.id.unwrap();
let job1 = apis::jobs_api::create_job(
config,
models::JobModel::new(
workflow_id as i64,
"job1".to_string(),
"command".to_string(),
),
)
.expect("Failed to create job1");
let mut job2_pre = models::JobModel::new(
workflow_id as i64,
"job2".to_string(),
"command".to_string(),
);
job2_pre.depends_on_job_ids = Some(vec![job1.id.unwrap()]);
let mut job2 = apis::jobs_api::create_job(config, job2_pre).expect("Failed to create job2");
let mut bystander = apis::jobs_api::create_job(
config,
models::JobModel::new(
workflow_id as i64,
"bystander".to_string(),
"command".to_string(),
),
)
.expect("Failed to create bystander");
assert_eq!(job1.status, Some(models::JobStatus::Uninitialized));
job2.status = Some(models::JobStatus::Completed);
bystander.status = Some(models::JobStatus::Completed);
}
#[rstest]
fn test_remove_job(start_server: &ServerProcess) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
let work_dir = temp_dir.path();
let jobs = create_diamond_workflow(config, true, work_dir);
for (name, job) in &jobs {
let removed =
apis::jobs_api::delete_job(config, job.id.unwrap()).expect("Failed to delete job");
let result = apis::jobs_api::get_job(config, removed.id.unwrap());
assert!(result.is_err(), "Expected job {} to be deleted", name);
}
}
#[rstest]
fn test_events(start_server: &ServerProcess) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let name = "test_event_workflow".to_string();
let user = "test_user".to_string();
let workflow = models::WorkflowModel::new(name.clone(), user.clone());
let created_workflow =
apis::workflows_api::create_workflow(config, workflow).expect("Failed to create workflow");
let workflow_id = created_workflow.id.unwrap();
let event1 = apis::events_api::create_event(
config,
models::EventModel::new(
workflow_id as i64,
serde_json::json!({"key1": 1, "key2": 2}),
),
)
.expect("Failed to create event");
let event2 = apis::events_api::create_event(
config,
models::EventModel::new(
workflow_id as i64,
serde_json::json!({"key3": 3, "key4": 4}),
),
)
.expect("Failed to create event");
let event_id1 = event1.id.unwrap();
let event_id2 = event2.id.unwrap();
let events = apis::events_api::list_events(
config,
workflow_id as i64,
None,
None,
None,
None,
None,
None,
)
.expect("Failed to list events");
assert_eq!(events.items.len(), 2);
assert_eq!(
events.items[1].data,
serde_json::json!({"key3": 3, "key4": 4})
);
apis::events_api::delete_event(config, event_id1).expect("Failed to delete event");
apis::events_api::delete_event(config, event_id2).expect("Failed to delete event");
let events = apis::events_api::list_events(
config,
workflow_id as i64,
None,
None,
None,
None,
None,
None,
)
.expect("Failed to list events");
assert!(events.items.is_empty());
}
fn check_diamond_workflow_init_job_statuses(
config: &torc::client::Configuration,
jobs: &HashMap<String, models::JobModel>,
) {
let preprocess = jobs.get("preprocess").expect("preprocess job not found");
let work1 = jobs.get("work1").expect("work1 job not found");
let work2 = jobs.get("work2").expect("work2 job not found");
let postprocess = jobs.get("postprocess").expect("postprocess job not found");
let preprocess_post =
apis::jobs_api::get_job(config, preprocess.id.unwrap()).expect("Failed to get preprocess");
assert_eq!(preprocess_post.status.unwrap(), models::JobStatus::Ready);
let work1_post =
apis::jobs_api::get_job(config, work1.id.unwrap()).expect("Failed to get work1");
assert_eq!(work1_post.status.unwrap(), models::JobStatus::Blocked);
let work2_post =
apis::jobs_api::get_job(config, work2.id.unwrap()).expect("Failed to get work2");
assert_eq!(work2_post.status.unwrap(), models::JobStatus::Blocked);
let postprocess_post = apis::jobs_api::get_job(config, postprocess.id.unwrap())
.expect("Failed to get postprocess");
assert_eq!(postprocess_post.status.unwrap(), models::JobStatus::Blocked);
}
#[rstest]
#[case(None)] #[case(Some(10))] fn test_many_jobs_parameterized(
start_server: &ServerProcess,
#[case] max_parallel_jobs: Option<i64>,
) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
let work_dir = temp_dir.path().to_path_buf();
let workflow_name = format!(
"many_jobs_test_{}",
max_parallel_jobs
.map(|n| n.to_string())
.unwrap_or("resources".to_string())
);
let yaml_content = format!(
r#"name: {}
user: test_user
description: Test workflow with 30 parameterized jobs
jobs:
- name: job_{{i:03d}}
command: echo {{i}}
resource_requirements: minimal
parameters:
i: "1:30"
resource_requirements:
- name: minimal
num_cpus: 1
num_gpus: 0
num_nodes: 1
memory: 1m
runtime: P0DT1M
"#,
workflow_name
);
let yaml_path = work_dir.join("hundred_jobs_test.yaml");
fs::write(&yaml_path, yaml_content).expect("Failed to write YAML file");
let mut cli_args = vec![yaml_path.to_str().unwrap(), "--poll-interval", "0.1"];
let max_jobs_str;
if let Some(max_jobs) = max_parallel_jobs {
max_jobs_str = max_jobs.to_string();
cli_args.push("--max-parallel-jobs");
cli_args.push(&max_jobs_str);
} else {
cli_args.push("--num-cpus");
cli_args.push("12");
cli_args.push("--memory-gb");
cli_args.push("64.0");
}
run_jobs_cli_command(&cli_args, start_server).expect("Failed to run jobs");
let workflows = apis::workflows_api::list_workflows(
config,
None,
None,
None,
None,
Some(&workflow_name),
None,
None,
None,
)
.expect("Failed to list workflows");
let workflow = workflows.items.first().expect("Workflow not found");
let workflow_id = workflow.id.unwrap();
verify_many_jobs_completion(config, workflow_id, 30);
apis::workflows_api::delete_workflow(config, workflow_id).expect("Failed to delete workflow");
}
fn verify_many_jobs_completion(
config: &torc::client::Configuration,
workflow_id: i64,
num_jobs: usize,
) {
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None, None, )
.expect("Failed to list jobs");
let job_items = jobs.items;
assert_eq!(
job_items.len(),
num_jobs,
"Expected {} jobs, but got {}",
num_jobs,
job_items.len()
);
for job in &job_items {
assert_eq!(
job.status.unwrap(),
models::JobStatus::Completed,
"Job {} should be completed. actual status: {:?}",
job.name,
job.status
);
}
let results = apis::results_api::list_results(
config,
workflow_id,
None, None, None, None, None, None, None, None, None, None, )
.expect("Failed to list results");
let result_items = results.items;
assert_eq!(
result_items.len(),
num_jobs,
"Expected {} results, but got {}",
num_jobs,
result_items.len()
);
for result in &result_items {
assert_eq!(
result.return_code, 0,
"Job ID {} should have return code 0, but got {}",
result.job_id, result.return_code
);
}
}
#[rstest]
fn test_workflow_reinitialization_after_failure(start_server: &ServerProcess) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
let work_dir = temp_dir.path().to_path_buf();
let fail_flag_path = work_dir.join("should_fail.flag");
fs::write(&fail_flag_path, "fail").expect("Failed to write fail flag file");
let yaml_content = format!(
r#"name: restart_test_workflow
user: test_user
description: Test workflow restart after failure
jobs:
# Stage 1: Setup job (no dependencies)
- name: setup
command: echo "Setup complete"
resource_requirements: minimal
# Stage 2: Three parallel jobs that depend on setup
- name: work_a
command: echo "Work A complete"
depends_on:
- setup
resource_requirements: minimal
- name: work_b
command: echo "Work B complete"
depends_on:
- setup
resource_requirements: minimal
- name: work_fail
command: 'if [ -f "{}" ]; then echo "Intentional failure"; exit 1; else echo "Work fail job succeeds"; exit 0; fi'
depends_on:
- setup
resource_requirements: minimal
# Stage 3: Finalize job that depends on all Stage 2 jobs
# Note: cancel_on_blocking_job_failure defaults to true, so finalize will be
# automatically canceled if any of its dependencies fail
- name: finalize
command: echo "Finalize complete"
depends_on:
- work_a
- work_b
- work_fail
resource_requirements: minimal
resource_requirements:
- name: minimal
num_cpus: 1
num_gpus: 0
num_nodes: 1
memory: 1m
runtime: P0DT1M
"#,
fail_flag_path.display()
);
let yaml_path = work_dir.join("restart_test.yaml");
fs::write(&yaml_path, &yaml_content).expect("Failed to write YAML file");
run_jobs_cli_command(
&[
yaml_path.to_str().unwrap(),
"--poll-interval",
"0.1",
"--max-parallel-jobs",
"4",
],
start_server,
)
.expect("First run command should succeed (workflow completes, checking job statuses)");
let workflows = apis::workflows_api::list_workflows(
config,
None,
None,
None,
None,
Some("restart_test_workflow"),
None,
None,
None,
)
.expect("Failed to list workflows");
let workflow = workflows.items.first().expect("Workflow not found");
let workflow_id = workflow.id.unwrap();
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list jobs");
let job_items = jobs.items;
let job_statuses: HashMap<String, models::JobStatus> = job_items
.iter()
.map(|j| (j.name.clone(), j.status.unwrap()))
.collect();
assert_eq!(
job_statuses.get("setup").unwrap(),
&models::JobStatus::Completed,
"setup should be completed"
);
assert_eq!(
job_statuses.get("work_a").unwrap(),
&models::JobStatus::Completed,
"work_a should be completed"
);
assert_eq!(
job_statuses.get("work_b").unwrap(),
&models::JobStatus::Completed,
"work_b should be completed"
);
assert_eq!(
job_statuses.get("work_fail").unwrap(),
&models::JobStatus::Failed,
"work_fail should be failed"
);
assert_eq!(
job_statuses.get("finalize").unwrap(),
&models::JobStatus::Canceled,
"finalize should be canceled due to failed dependency"
);
let results = apis::results_api::list_results(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list results");
let result_items = results.items;
assert_eq!(
result_items.len(),
4,
"Expected 4 results (finalize was canceled)"
);
let work_fail_result = result_items
.iter()
.find(|r| {
let job = job_items
.iter()
.find(|j| j.id.unwrap() == r.job_id)
.unwrap();
job.name == "work_fail"
})
.expect("work_fail result not found");
assert_eq!(
work_fail_result.return_code, 1,
"work_fail should have return code 1"
);
run_cli_command(
&[
"workflows",
"reset-status",
&workflow_id.to_string(),
"--failed-only",
"--reinitialize",
"--no-prompts",
],
start_server,
None,
)
.expect("Failed to reset workflow status");
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list jobs after reset");
let job_items = jobs.items;
let job_statuses: HashMap<String, models::JobStatus> = job_items
.iter()
.map(|j| (j.name.clone(), j.status.unwrap()))
.collect();
assert_eq!(
job_statuses.get("setup").unwrap(),
&models::JobStatus::Completed,
"setup should still be completed after reset"
);
assert_eq!(
job_statuses.get("work_a").unwrap(),
&models::JobStatus::Completed,
"work_a should still be completed after reset"
);
assert_eq!(
job_statuses.get("work_b").unwrap(),
&models::JobStatus::Completed,
"work_b should still be completed after reset"
);
assert_eq!(
job_statuses.get("work_fail").unwrap(),
&models::JobStatus::Ready,
"work_fail should be ready after reset"
);
assert_eq!(
job_statuses.get("finalize").unwrap(),
&models::JobStatus::Blocked,
"finalize should be blocked after reset"
);
fs::remove_file(&fail_flag_path).expect("Failed to remove flag file");
run_jobs_cli_command(
&[
&workflow_id.to_string(),
"--poll-interval",
"0.1",
"--max-parallel-jobs",
"4",
],
start_server,
)
.expect("Second run should succeed");
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list jobs after second run");
let job_items = jobs.items;
for job in &job_items {
assert_eq!(
job.status.unwrap(),
models::JobStatus::Completed,
"Job {} should be completed after second run, got {:?}",
job.name,
job.status
);
}
let results = apis::results_api::list_results(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
Some(true), None, )
.expect("Failed to list all results");
let result_items = results.items;
let work_fail_job = job_items.iter().find(|j| j.name == "work_fail").unwrap();
let work_fail_latest = result_items
.iter()
.filter(|r| r.job_id == work_fail_job.id.unwrap())
.max_by_key(|r| (r.run_id, r.attempt_id.unwrap_or(1)))
.expect("work_fail should have results");
assert_eq!(
work_fail_latest.return_code, 0,
"work_fail latest run should have return code 0"
);
let finalize_job = job_items.iter().find(|j| j.name == "finalize").unwrap();
let finalize_result = result_items
.iter()
.find(|r| r.job_id == finalize_job.id.unwrap())
.expect("finalize should have a result after second run");
assert_eq!(
finalize_result.return_code, 0,
"finalize should have return code 0"
);
apis::workflows_api::delete_workflow(config, workflow_id)
.expect("Failed to delete restart_test workflow");
}
#[rstest]
fn test_workflow_reinitialize_after_fixing_input(start_server: &ServerProcess) {
assert!(start_server.child.id() > 0);
let config = &start_server.config;
let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
let work_dir = temp_dir.path().to_path_buf();
let input_file_path = work_dir.join("config.json");
let bad_input = r#"{"valid": false, "message": "This input should cause failure"}"#;
fs::write(&input_file_path, bad_input).expect("Failed to write config.json");
let yaml_content = format!(
r#"name: reinitialize_test_workflow
user: test_user
description: Test workflow reinitialize after fixing input file
files:
- name: config_file
path: {config_path}
jobs:
# Stage 1: Setup job (no dependencies)
- name: setup
command: echo "Setup complete"
resource_requirements: minimal
# Stage 2: Three parallel jobs that depend on setup
- name: work_a
command: echo "Work A complete"
depends_on:
- setup
resource_requirements: minimal
- name: work_b
command: echo "Work B complete"
depends_on:
- setup
resource_requirements: minimal
# This job reads the config file and fails if valid=false
- name: work_fail
command: 'if grep -q "\"valid\": true" {config_path}; then echo "Input valid, job succeeds"; exit 0; else echo "Input invalid, job fails"; exit 1; fi'
depends_on:
- setup
input_files:
- config_file
resource_requirements: minimal
# Stage 3: Finalize job that depends on all Stage 2 jobs
- name: finalize
command: echo "Finalize complete"
depends_on:
- work_a
- work_b
- work_fail
resource_requirements: minimal
resource_requirements:
- name: minimal
num_cpus: 1
num_gpus: 0
num_nodes: 1
memory: 1m
runtime: P0DT1M
"#,
config_path = input_file_path.display()
);
let yaml_path = work_dir.join("reinitialize_test.yaml");
fs::write(&yaml_path, &yaml_content).expect("Failed to write YAML file");
run_jobs_cli_command(
&[
yaml_path.to_str().unwrap(),
"--poll-interval",
"0.1",
"--max-parallel-jobs",
"4",
],
start_server,
)
.expect("First run command should succeed (workflow completes, checking job statuses)");
let workflows = apis::workflows_api::list_workflows(
config,
None,
None,
None,
None,
Some("reinitialize_test_workflow"),
None,
None,
None,
)
.expect("Failed to list workflows");
let workflow = workflows.items.first().expect("Workflow not found");
let workflow_id = workflow.id.unwrap();
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list jobs");
let job_items = jobs.items;
let job_statuses: HashMap<String, models::JobStatus> = job_items
.iter()
.map(|j| (j.name.clone(), j.status.unwrap()))
.collect();
assert_eq!(
job_statuses.get("setup").unwrap(),
&models::JobStatus::Completed,
"setup should be completed"
);
assert_eq!(
job_statuses.get("work_a").unwrap(),
&models::JobStatus::Completed,
"work_a should be completed"
);
assert_eq!(
job_statuses.get("work_b").unwrap(),
&models::JobStatus::Completed,
"work_b should be completed"
);
assert_eq!(
job_statuses.get("work_fail").unwrap(),
&models::JobStatus::Failed,
"work_fail should be failed"
);
assert_eq!(
job_statuses.get("finalize").unwrap(),
&models::JobStatus::Canceled,
"finalize should be canceled due to failed dependency"
);
std::thread::sleep(std::time::Duration::from_millis(100));
let good_input = r#"{"valid": true, "message": "This input should succeed"}"#;
fs::write(&input_file_path, good_input).expect("Failed to write fixed config.json");
run_cli_command(
&["workflows", "reinit", &workflow_id.to_string()],
start_server,
None,
)
.expect("Failed to reinitialize workflow");
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list jobs after reinitialize");
let job_items = jobs.items;
let job_statuses: HashMap<String, models::JobStatus> = job_items
.iter()
.map(|j| (j.name.clone(), j.status.unwrap()))
.collect();
assert_eq!(
job_statuses.get("setup").unwrap(),
&models::JobStatus::Completed,
"setup should still be completed after reinitialize"
);
assert_eq!(
job_statuses.get("work_a").unwrap(),
&models::JobStatus::Completed,
"work_a should still be completed after reinitialize"
);
assert_eq!(
job_statuses.get("work_b").unwrap(),
&models::JobStatus::Completed,
"work_b should still be completed after reinitialize"
);
assert_eq!(
job_statuses.get("work_fail").unwrap(),
&models::JobStatus::Ready,
"work_fail should be ready after reinitialize (input file changed)"
);
assert_eq!(
job_statuses.get("finalize").unwrap(),
&models::JobStatus::Blocked,
"finalize should be blocked after reinitialize"
);
run_jobs_cli_command(
&[
&workflow_id.to_string(),
"--poll-interval",
"0.1",
"--max-parallel-jobs",
"4",
],
start_server,
)
.expect("Second run should succeed");
let jobs = apis::jobs_api::list_jobs(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
None, )
.expect("Failed to list jobs after second run");
let job_items = jobs.items;
for job in &job_items {
assert_eq!(
job.status.unwrap(),
models::JobStatus::Completed,
"Job {} should be completed after second run, got {:?}",
job.name,
job.status
);
}
let results = apis::results_api::list_results(
config,
workflow_id,
None,
None,
None,
None,
None,
None,
None,
None,
Some(true), None, )
.expect("Failed to list all results");
let result_items = results.items;
let work_fail_job = job_items.iter().find(|j| j.name == "work_fail").unwrap();
let work_fail_latest = result_items
.iter()
.filter(|r| r.job_id == work_fail_job.id.unwrap())
.max_by_key(|r| (r.run_id, r.attempt_id.unwrap_or(1)))
.expect("work_fail should have results");
assert_eq!(
work_fail_latest.return_code, 0,
"work_fail latest run should have return code 0"
);
let finalize_job = job_items.iter().find(|j| j.name == "finalize").unwrap();
let finalize_result = result_items
.iter()
.find(|r| r.job_id == finalize_job.id.unwrap())
.expect("finalize should have a result after second run");
assert_eq!(
finalize_result.return_code, 0,
"finalize should have return code 0"
);
apis::workflows_api::delete_workflow(config, workflow_id)
.expect("Failed to delete reinitialize_test workflow");
}