use anyhow::Result;
use cqs::ReviewResult;
use cqs::RiskLevel;
pub(crate) fn cmd_review(
ctx: &crate::cli::CommandContext,
base: Option<&str>,
from_stdin: bool,
format: &crate::cli::OutputFormat,
max_tokens: Option<usize>,
) -> Result<()> {
let _span = tracing::info_span!("cmd_review", ?format, ?max_tokens).entered();
if matches!(format, crate::cli::OutputFormat::Mermaid) {
anyhow::bail!("Mermaid output is not supported for review — use text or json");
}
let json = matches!(format, crate::cli::OutputFormat::Json);
let store = &ctx.store;
let root = &ctx.root;
let diff_text = if from_stdin {
crate::cli::commands::read_stdin()?
} else {
crate::cli::commands::run_git_diff(base)?
};
let result = cqs::review_diff(store, &diff_text, root)?;
match result {
None => {
if json {
println!("{}", serde_json::to_string_pretty(&empty_review_json())?);
} else {
println!("No indexed functions affected by this diff.");
}
}
Some(mut review) => {
let token_count_used =
max_tokens.map(|budget| apply_token_budget(&mut review, budget, json));
if json {
let mut output: serde_json::Value = serde_json::to_value(&review)?;
if let Some(tokens) = token_count_used {
output["token_count"] = serde_json::json!(tokens);
output["token_budget"] = serde_json::json!(max_tokens.unwrap_or(0));
}
println!("{}", serde_json::to_string_pretty(&output)?);
} else {
display_review_text(&review, root, token_count_used, max_tokens);
}
}
}
Ok(())
}
pub(crate) fn apply_token_budget_public(
review: &mut ReviewResult,
budget: usize,
json: bool,
) -> usize {
apply_token_budget(review, budget, json)
}
fn apply_token_budget(review: &mut ReviewResult, budget: usize, json: bool) -> usize {
let _span = tracing::info_span!("review_token_budget", budget, json).entered();
let json_per_item = if json {
crate::cli::commands::JSON_OVERHEAD_PER_RESULT
} else {
0
};
let tokens_per_caller: usize = 15 + json_per_item;
let tokens_per_test: usize = 18 + json_per_item;
let tokens_per_function: usize = 12 + json_per_item;
let tokens_per_note: usize = 20 + json_per_item;
const BASE_OVERHEAD: usize = 30;
let mut used = BASE_OVERHEAD;
used += review.changed_functions.len() * tokens_per_function;
used += review.relevant_notes.len() * tokens_per_note;
let callers_budget = (budget.saturating_sub(used)) * 2 / 3; let max_callers = callers_budget / tokens_per_caller;
let original_callers = review.affected_callers.len();
if review.affected_callers.len() > max_callers {
review.affected_callers.truncate(max_callers.max(1));
}
used += review.affected_callers.len() * tokens_per_caller;
let tests_budget = budget.saturating_sub(used);
let max_tests = tests_budget / tokens_per_test;
let original_tests = review.affected_tests.len();
if review.affected_tests.len() > max_tests {
review.affected_tests.truncate(max_tests.max(1));
}
used += review.affected_tests.len() * tokens_per_test;
if review.affected_callers.len() < original_callers
|| review.affected_tests.len() < original_tests
{
let truncated_callers = original_callers - review.affected_callers.len();
let truncated_tests = original_tests - review.affected_tests.len();
tracing::info!(
budget,
used,
truncated_callers,
truncated_tests,
"Token-budgeted review"
);
review.warnings.push(format!(
"Output truncated to ~{} tokens (budget: {}). {} callers, {} tests omitted (min 1 caller + 1 test guaranteed).",
used, budget, truncated_callers, truncated_tests
));
}
used
}
fn empty_review_json() -> serde_json::Value {
serde_json::json!({
"changed_functions": [],
"affected_callers": [],
"affected_tests": [],
"relevant_notes": [],
"risk_summary": { "high": 0, "medium": 0, "low": 0, "overall": "low" },
"stale_warning": null
})
}
#[cfg(test)]
mod tests {
use super::*;
use cqs::{CallerDetail, DiffTestInfo, ReviewedFunction, RiskLevel, RiskScore, RiskSummary};
use std::path::PathBuf;
fn make_review(num_callers: usize, num_tests: usize) -> ReviewResult {
let callers: Vec<CallerDetail> = (0..num_callers)
.map(|i| CallerDetail {
name: format!("caller_{}", i),
file: PathBuf::from(format!("src/c{}.rs", i)),
line: (i as u32) + 1,
call_line: (i as u32) + 10,
snippet: None,
})
.collect();
let tests: Vec<DiffTestInfo> = (0..num_tests)
.map(|i| DiffTestInfo {
name: format!("test_{}", i),
file: PathBuf::from(format!("tests/t{}.rs", i)),
line: (i as u32) + 1,
via: "direct".into(),
call_depth: 1,
})
.collect();
ReviewResult {
changed_functions: vec![ReviewedFunction {
name: "target_fn".into(),
file: PathBuf::from("src/lib.rs"),
line_start: 42,
risk: RiskScore {
caller_count: num_callers,
test_count: num_tests,
test_ratio: if num_callers > 0 {
(num_tests as f32 / num_callers as f32).min(1.0)
} else {
1.0
},
risk_level: RiskLevel::Low,
blast_radius: RiskLevel::Low,
score: 0.0,
},
}],
affected_callers: callers,
affected_tests: tests,
relevant_notes: vec![],
risk_summary: RiskSummary {
high: 0,
medium: 0,
low: 1,
overall: RiskLevel::Low,
},
stale_warning: None,
warnings: vec![],
}
}
#[test]
fn test_apply_token_budget_preserves_when_fits() {
let mut review = make_review(3, 3);
let used = apply_token_budget(&mut review, 5000, false);
assert_eq!(
review.affected_callers.len(),
3,
"All callers should be preserved within budget"
);
assert_eq!(
review.affected_tests.len(),
3,
"All tests should be preserved within budget"
);
assert!(review.warnings.is_empty(), "No truncation warning expected");
assert!(used > 0, "Token count should be positive");
}
#[test]
fn test_apply_token_budget_truncates_when_over() {
let mut review = make_review(100, 100);
let budget = 100;
let used = apply_token_budget(&mut review, budget, false);
assert!(
review.affected_callers.len() < 100,
"Callers should be truncated, got {}",
review.affected_callers.len()
);
assert!(
review.affected_tests.len() < 100,
"Tests should be truncated, got {}",
review.affected_tests.len()
);
assert!(
review.affected_callers.len() >= 1,
"At least 1 caller guaranteed"
);
assert!(
review.affected_tests.len() >= 1,
"At least 1 test guaranteed"
);
assert!(
!review.warnings.is_empty(),
"Should have a truncation warning"
);
assert!(
used <= budget + 50,
"Used tokens ({used}) should be near budget ({budget})"
);
}
}
fn display_review_text(
review: &ReviewResult,
_root: &std::path::Path,
token_count_used: Option<usize>,
max_tokens: Option<usize>,
) {
use colored::Colorize;
let risk_color = match review.risk_summary.overall {
RiskLevel::High => "red",
RiskLevel::Medium => "yellow",
RiskLevel::Low => "green",
};
let overall_str = format!("{}", review.risk_summary.overall);
let colored_risk = match risk_color {
"red" => overall_str.red().bold().to_string(),
"yellow" => overall_str.yellow().bold().to_string(),
_ => overall_str.green().bold().to_string(),
};
let token_info = match (token_count_used, max_tokens) {
(Some(used), Some(budget)) => format!(" [{}/{}T]", used, budget),
_ => String::new(),
};
println!(
"{} {} (high: {}, medium: {}, low: {}){}",
"Risk:".bold(),
colored_risk,
review.risk_summary.high,
review.risk_summary.medium,
review.risk_summary.low,
token_info,
);
if let Some(ref stale) = review.stale_warning {
eprintln!();
eprintln!(
"{} Index is stale for {} file(s):",
"Warning:".yellow().bold(),
stale.len()
);
for f in stale {
eprintln!(" {}", f);
}
}
println!();
println!(
"{} ({}):",
"Changed functions".bold(),
review.changed_functions.len()
);
for f in &review.changed_functions {
let risk_indicator = match f.risk.risk_level {
RiskLevel::High => format!("[{}]", "HIGH".red()),
RiskLevel::Medium => format!("[{}]", "MED".yellow()),
RiskLevel::Low => format!("[{}]", "LOW".green()),
};
let blast_info = if f.risk.blast_radius != f.risk.risk_level {
format!(", blast radius: {}", f.risk.blast_radius)
} else {
String::new()
};
println!(
" {} {} ({}:{}) — {} callers, {} tests{}",
risk_indicator,
f.name,
f.file.display(),
f.line_start,
f.risk.caller_count,
f.risk.test_count,
blast_info,
);
}
if review.affected_callers.is_empty() {
println!();
println!("{}", "No affected callers.".dimmed());
} else {
println!();
println!(
"{} ({}):",
"Affected callers".cyan(),
review.affected_callers.len()
);
for c in &review.affected_callers {
println!(
" {} ({}:{}, call at line {})",
c.name,
c.file.display(),
c.line,
c.call_line
);
}
}
if review.affected_tests.is_empty() {
println!();
println!("{}", "No affected tests.".dimmed());
} else {
println!();
println!(
"{} ({}):",
"Tests to re-run".yellow(),
review.affected_tests.len()
);
for t in &review.affected_tests {
println!(
" {} ({}:{}) [via {}, depth {}]",
t.name,
t.file.display(),
t.line,
t.via,
t.call_depth
);
}
}
if !review.warnings.is_empty() {
println!();
for w in &review.warnings {
println!("{} {}", "Warning:".yellow().bold(), w);
}
}
if !review.relevant_notes.is_empty() {
println!();
println!(
"{} ({}):",
"Relevant notes".magenta(),
review.relevant_notes.len()
);
for n in &review.relevant_notes {
let sentiment_str = match n.sentiment {
s if s <= -0.5 => "⚠".to_string(),
s if s >= 0.5 => "✓".to_string(),
_ => "·".to_string(),
};
println!(
" {} {} ({})",
sentiment_str,
n.text,
n.matching_files.join(", ")
);
}
}
}