Skip to main content

battlecommand_forge/
swebench_eval.rs

1use crate::swebench::InstanceResult;
2/// SWE-bench evaluation and reporting.
3/// Generates markdown reports from run results, aggregates per-repo stats,
4/// and compares against published baselines.
5use anyhow::Result;
6use std::collections::BTreeMap;
7
8pub fn generate_report(output_dir: &str) -> Result<()> {
9    let results_path = format!("{}/swebench_results.jsonl", output_dir);
10    if !std::path::Path::new(&results_path).exists() {
11        return Err(anyhow::anyhow!(
12            "No results found at {}. Run `swebench run` first.",
13            results_path
14        ));
15    }
16
17    let data = std::fs::read_to_string(&results_path)?;
18    let results: Vec<InstanceResult> = data
19        .lines()
20        .filter(|l| !l.is_empty())
21        .filter_map(|l| serde_json::from_str(l).ok())
22        .collect();
23    if results.is_empty() {
24        return Err(anyhow::anyhow!("Results file is empty"));
25    }
26
27    let model = results[0].model.clone();
28    let total = results.len();
29    let resolved: usize = results.iter().filter(|r| r.resolved).count();
30    let errors: usize = results.iter().filter(|r| r.error.is_some()).count();
31    let rate = resolved as f64 / total as f64 * 100.0;
32    let avg_turns: f64 = results.iter().map(|r| r.turns_used as f64).sum::<f64>() / total as f64;
33    let avg_duration: f64 = results.iter().map(|r| r.duration_secs).sum::<f64>() / total as f64;
34    let total_tokens: u64 = results.iter().map(|r| r.tokens_used).sum();
35    let total_duration: f64 = results.iter().map(|r| r.duration_secs).sum();
36
37    let mut by_repo: BTreeMap<String, Vec<&InstanceResult>> = BTreeMap::new();
38    for r in &results {
39        by_repo.entry(r.repo.clone()).or_default().push(r);
40    }
41
42    let mut report = format!(
43        "# SWE-bench Results — {}\n\nGenerated: {}\n\n",
44        model,
45        chrono::Utc::now().format("%Y-%m-%d %H:%M UTC")
46    );
47
48    report.push_str("## Summary\n\n| Metric | Value |\n|--------|-------|\n");
49    report.push_str(&format!("| Instances | {} |\n", total));
50    report.push_str(&format!(
51        "| Resolved | {}/{} ({:.1}%) |\n",
52        resolved, total, rate
53    ));
54    report.push_str(&format!("| Errors | {} |\n", errors));
55    report.push_str(&format!("| Avg turns | {:.1} |\n", avg_turns));
56    report.push_str(&format!("| Avg duration | {:.0}s |\n", avg_duration));
57    report.push_str(&format!("| Total tokens | {} |\n", total_tokens));
58    report.push_str(&format!(
59        "| Total time | {:.0}s ({:.1}h) |\n\n",
60        total_duration,
61        total_duration / 3600.0
62    ));
63
64    report.push_str("## Per-Repository Breakdown\n\n| Repository | Instances | Resolved | Rate | Avg Turns | Avg Duration |\n|------------|-----------|----------|------|-----------|-------------|\n");
65    for (repo, repo_results) in &by_repo {
66        let rt = repo_results.len();
67        let rr = repo_results.iter().filter(|r| r.resolved).count();
68        let rrate = rr as f64 / rt as f64 * 100.0;
69        let rat = repo_results
70            .iter()
71            .map(|r| r.turns_used as f64)
72            .sum::<f64>()
73            / rt as f64;
74        let rad = repo_results.iter().map(|r| r.duration_secs).sum::<f64>() / rt as f64;
75        report.push_str(&format!(
76            "| {} | {} | {} | {:.1}% | {:.1} | {:.0}s |\n",
77            repo, rt, rr, rrate, rat, rad
78        ));
79    }
80
81    report.push_str("\n## Comparison with Published Results\n\n| Agent | Dataset | Resolve Rate | Cost/Instance |\n|-------|---------|:------------:|:-------------:|\n");
82    report.push_str(&format!(
83        "| **BattleCommand Forge ({})** | **this run** | **{:.1}%** | **~$0.30** |\n",
84        model, rate
85    ));
86    report.push_str("| OpenHands (Claude 3.5) | Verified | 53.0% | ~$0.50 |\n");
87    report.push_str("| Moatless Tools (Claude 3.5) | Verified | 38.4% | ~$0.30 |\n");
88    report.push_str("| SWE-agent (GPT-4) | Full | 12.5% | ~$0.50 |\n");
89    report.push_str("| Devin | Lite | 13.8% | — |\n\n");
90
91    report.push_str("## Resolved Instances\n\n");
92    let resolved_results: Vec<&InstanceResult> = results.iter().filter(|r| r.resolved).collect();
93    if resolved_results.is_empty() {
94        report.push_str("None resolved.\n\n");
95    } else {
96        report.push_str("| Instance | Turns | Duration | Files Modified |\n|----------|-------|----------|----------------|\n");
97        for r in &resolved_results {
98            report.push_str(&format!(
99                "| {} | {} | {:.0}s | {} |\n",
100                r.instance_id,
101                r.turns_used,
102                r.duration_secs,
103                r.files_modified.join(", ")
104            ));
105        }
106    }
107
108    let error_results: Vec<&InstanceResult> =
109        results.iter().filter(|r| r.error.is_some()).collect();
110    if !error_results.is_empty() {
111        report.push_str("\n## Errors\n\n| Instance | Error |\n|----------|-------|\n");
112        for r in &error_results {
113            let err = r.error.as_deref().unwrap_or("unknown");
114            let short = if err.len() > 80 { &err[..80] } else { err };
115            report.push_str(&format!("| {} | {} |\n", r.instance_id, short));
116        }
117    }
118
119    let report_path = format!("{}/report.md", output_dir);
120    std::fs::write(&report_path, &report)?;
121    println!("Report written to {}", report_path);
122    println!("\n{}", report);
123    Ok(())
124}