use std::collections::HashSet;
use anyhow::Result;
use chrono::Local;
use git2::{BranchType, Repository};
use serde::{Deserialize, Serialize};
use crate::event::GitEvent;
pub const MIN_COMMITS_FOR_COUPLING: usize = 5;
pub const MIN_COUPLING_RATIO: f64 = 0.3;
const BUS_FACTOR_THRESHOLD: usize = 5;
const TECH_DEBT_RECENCY_MONTHS: usize = 3;
const TOP_HOT_FILES_COUNT: usize = 5;
const TOP_COUPLING_COUNT: usize = 5;
const TOP_WARNINGS_COUNT: usize = 5;
const HEALTH_WARNINGS_IN_REVIEW: usize = 3;
const OWNER_CANDIDATE_HOT_FILES: usize = 2;
const HIGH_IMPACT_SCORE_THRESHOLD: f64 = 0.7;
const LARGE_CHANGE_LINE_THRESHOLD: usize = 300;
const HOT_FILE_SET_SIZE: usize = 20;
const TECH_DEBT_RISK_THRESHOLD: f64 = 0.7;
const OWNERSHIP_CONCENTRATION_THRESHOLD: f64 = 85.0;
const RISK_VERDICT_HIGH: f64 = 0.7;
const RISK_VERDICT_NEEDS_REVIEW: f64 = 0.4;
const TEST_GAP_IMPACT_THRESHOLD: f64 = 0.5;
const LOW_HEALTH_THRESHOLD: u8 = 60;
const HEALTH_PENALTY_WEIGHT: f64 = 0.3;
const HIGH_RISK_COUNT_WEIGHT: f64 = 0.15;
const TEST_GAP_MAX_COUNT: usize = 3;
const TEST_GAP_PENALTY_PER_GAP: f64 = 0.1;
const SIGNAL_HIGH_IMPACT_PENALTY: f64 = 0.25;
const SIGNAL_HOT_FILE_PENALTY: f64 = 0.15;
const SIGNAL_KNOWLEDGE_RISK_PENALTY: f64 = 0.25;
const CONFIDENCE_BASE: f64 = 0.45;
const EVIDENCE_WEIGHT_FACTOR: f64 = 0.35;
const DENSITY_MAX_COUNT: usize = 5;
const DENSITY_BOOST_PER_ITEM: f64 = 0.05;
const EVIDENCE_WEIGHT_HEALTH: f64 = 0.35;
const EVIDENCE_WEIGHT_SIGNALS: f64 = 0.30;
const EVIDENCE_WEIGHT_HOT_FILES: f64 = 0.20;
const EVIDENCE_WEIGHT_COMMIT: f64 = 0.15;
const _: () = assert!(RISK_VERDICT_NEEDS_REVIEW < RISK_VERDICT_HIGH);
const _: () = assert!(SIGNAL_HIGH_IMPACT_PENALTY > 0.0);
const _: () = assert!(SIGNAL_HOT_FILE_PENALTY > 0.0);
const _: () = assert!(SIGNAL_KNOWLEDGE_RISK_PENALTY > 0.0);
const _: () = assert!(CONFIDENCE_BASE > 0.0);
use crate::git::get_commit_files;
use crate::stats::{
calculate_bus_factor, calculate_change_coupling, calculate_file_heatmap,
calculate_impact_scores, calculate_ownership, calculate_project_health,
calculate_quality_scores, calculate_tech_debt, BusFactorAnalysis, CodeOwnership,
CommitImpactAnalysis, FileHeatmap, TechDebtAnalysis,
};
pub struct SignalContext<'a> {
pub heatmap: &'a FileHeatmap,
pub impact: &'a CommitImpactAnalysis,
pub ownership: &'a CodeOwnership,
pub bus_factor: &'a BusFactorAnalysis,
pub tech_debt: &'a TechDebtAnalysis,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SignalSet {
pub high_impact: bool,
pub hot_file: bool,
pub knowledge_risk: bool,
}
impl SignalSet {
pub fn glyphs(&self) -> String {
let mut out = String::new();
out.push(if self.high_impact { '!' } else { '.' });
out.push(if self.hot_file { '~' } else { '.' });
out.push(if self.knowledge_risk { '?' } else { '.' });
out
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ContextSummary {
pub generated_at: String,
pub branch: String,
pub ahead: usize,
pub behind: usize,
pub dirty_files: usize,
pub hot_path: Option<String>,
pub selected_hash: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ContextPack {
pub summary: ContextSummary,
pub selected_commit: Option<PackCommit>,
pub signals: Option<SignalSet>,
pub hot_files: Vec<PackHotFile>,
pub ownership: Option<PackOwner>,
pub coupling: Vec<PackCoupling>,
pub health: PackHealth,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackCommit {
pub hash: String,
pub author: String,
pub date: String,
pub message: String,
pub files_changed: usize,
pub insertions: usize,
pub deletions: usize,
pub impact_score: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackHotFile {
pub path: String,
pub changes: usize,
pub heat: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackOwner {
pub path: String,
pub primary_author: String,
pub ownership_percent: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackCoupling {
pub file: String,
pub coupled_file: String,
pub coupling_percent: f64,
pub co_changes: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackHealth {
pub overall_score: u8,
pub level: String,
pub warnings: Vec<String>,
pub confidence: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReviewRisk {
pub id: String,
pub title: String,
pub severity: String,
pub details: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReviewOwnerCandidate {
pub path: String,
pub author: String,
pub ownership_percent: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionRecommendation {
pub id: String,
pub title: String,
pub priority: String,
pub reason: String,
pub command_hint: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvidenceLink {
pub source_type: String,
pub source_ref: String,
pub weight: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReviewPack {
pub repo: String,
pub branch: String,
pub head: String,
pub summary: String,
pub risk_score: f64,
pub confidence: f64,
pub top_risks: Vec<ReviewRisk>,
pub test_gaps: Vec<String>,
pub owner_candidates: Vec<ReviewOwnerCandidate>,
pub recommended_actions: Vec<ActionRecommendation>,
pub evidence: Vec<EvidenceLink>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HandoffContext {
pub target: String,
pub generated_at: String,
pub review_pack: ReviewPack,
pub next_actions: Vec<ActionRecommendation>,
pub prompt: String,
}
pub fn build_context_summary(
repo: Option<&Repository>,
hot_path: Option<String>,
selected_hash: Option<String>,
) -> ContextSummary {
let generated_at = Local::now().format("%Y-%m-%dT%H:%M:%S%z").to_string();
let mut branch = "HEAD".to_string();
let mut ahead = 0usize;
let mut behind = 0usize;
let mut dirty_files = 0usize;
let owned_repo;
let repo = match repo {
Some(r) => Some(r),
None => {
owned_repo = Repository::discover(".").ok();
owned_repo.as_ref()
}
};
if let Some(repo) = repo {
if let Ok(head) = repo.head() {
branch = head
.shorthand()
.map(str::to_string)
.unwrap_or_else(|| "HEAD".to_string());
if let Some(branch_name) = head.shorthand() {
if let Ok(local_branch) = repo.find_branch(branch_name, BranchType::Local) {
if let Ok(upstream) = local_branch.upstream() {
if let (Some(local_oid), Some(upstream_oid)) =
(local_branch.get().target(), upstream.get().target())
{
if let Ok((a, b)) = repo.graph_ahead_behind(local_oid, upstream_oid) {
ahead = a;
behind = b;
}
}
}
}
}
}
if let Ok(statuses) = repo.statuses(None) {
dirty_files = statuses.len();
}
}
ContextSummary {
generated_at,
branch,
ahead,
behind,
dirty_files,
hot_path,
selected_hash,
}
}
pub fn build_context_pack(
repo: Option<&Repository>,
events: &[&GitEvent],
selected_hash: Option<&str>,
) -> Result<ContextPack> {
let heatmap = calculate_file_heatmap(events, |hash| get_commit_files(hash).ok());
let impact = calculate_impact_scores(events, |hash| get_commit_files(hash).ok(), &heatmap);
let ownership = calculate_ownership(events, |hash| get_commit_files(hash).ok());
let coupling = calculate_change_coupling(
events,
|hash| get_commit_files(hash).ok(),
MIN_COMMITS_FOR_COUPLING,
MIN_COUPLING_RATIO,
);
let quality = calculate_quality_scores(events, |hash| get_commit_files(hash).ok(), &coupling);
let bus_factor = calculate_bus_factor(
events,
|hash| get_commit_files(hash).ok(),
BUS_FACTOR_THRESHOLD,
);
let tech_debt = calculate_tech_debt(
events,
|hash| get_commit_files(hash).ok(),
TECH_DEBT_RECENCY_MONTHS,
);
let health = calculate_project_health(
events,
|hash| get_commit_files(hash).ok(),
Some(&quality),
Some(&bus_factor),
Some(&tech_debt),
&heatmap,
);
let signal_ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
let selected_event = selected_hash
.and_then(|h| events.iter().find(|e| e.short_hash == h).copied())
.or_else(|| events.first().copied());
let selected_files = selected_event
.map(|event| get_commit_files(&event.short_hash).unwrap_or_default())
.unwrap_or_default();
let selected_commit = selected_event.map(|event| {
let impact_score = impact
.commits
.iter()
.find(|c| c.commit_hash == event.short_hash)
.map(|c| c.score)
.unwrap_or(0.0);
PackCommit {
hash: event.short_hash.clone(),
author: event.author.clone(),
date: event.timestamp.format("%Y-%m-%dT%H:%M:%S%z").to_string(),
message: event.message.clone(),
files_changed: selected_files.len(),
insertions: event.files_added,
deletions: event.files_deleted,
impact_score,
}
});
let selected_signals =
selected_event.map(|event| compute_signals_for_event(event, &selected_files, &signal_ctx));
let hot_files = heatmap
.files
.iter()
.take(TOP_HOT_FILES_COUNT)
.map(|f| PackHotFile {
path: f.path.clone(),
changes: f.change_count,
heat: f.heat_level(),
})
.collect::<Vec<_>>();
let owner = selected_event.and_then(|_| {
selected_files.iter().find_map(|path| {
ownership
.entries
.iter()
.find(|e| !e.is_directory && e.path == *path)
.map(|entry| PackOwner {
path: entry.path.clone(),
primary_author: entry.primary_author.clone(),
ownership_percent: entry.ownership_percentage(),
})
})
});
let coupled_files = if selected_event.is_some() {
let set: HashSet<_> = selected_files.iter().collect();
coupling
.couplings
.iter()
.filter(|c| set.contains(&c.file))
.take(TOP_COUPLING_COUNT)
.map(|c| PackCoupling {
file: c.file.clone(),
coupled_file: c.coupled_file.clone(),
coupling_percent: c.coupling_percent,
co_changes: c.co_change_count,
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let warnings = health
.alerts
.iter()
.filter(|a| a.severity.is_warning_or_error())
.map(|a| a.message.clone())
.take(TOP_WARNINGS_COUNT)
.collect::<Vec<_>>();
let hot_path = hot_files.first().map(|f| f.path.clone());
Ok(ContextPack {
summary: build_context_summary(
repo,
hot_path,
selected_commit.as_ref().map(|c| c.hash.clone()),
),
selected_commit,
signals: selected_signals,
hot_files,
ownership: owner,
coupling: coupled_files,
health: PackHealth {
overall_score: health.overall_score,
level: health.level().to_string(),
warnings,
confidence: health.confidence.level.as_str().to_string(),
},
})
}
pub fn build_review_pack(
repo: Option<&Repository>,
events: &[&GitEvent],
selected_hash: Option<&str>,
) -> Result<ReviewPack> {
let context_pack = build_context_pack(repo, events, selected_hash)?;
let (repo_name, branch, head) = repo_identity(repo);
let mut top_risks = Vec::new();
if let Some(signal) = &context_pack.signals {
if signal.high_impact {
top_risks.push(ReviewRisk {
id: "risk-impact".to_string(),
title: "High-impact commit detected".to_string(),
severity: "high".to_string(),
details: "Changed size/importance indicates elevated review risk".to_string(),
});
}
if signal.hot_file {
top_risks.push(ReviewRisk {
id: "risk-hotspot".to_string(),
title: "Hotspot file involved".to_string(),
severity: "medium".to_string(),
details: "Commit touches frequently changed files".to_string(),
});
}
if signal.knowledge_risk {
top_risks.push(ReviewRisk {
id: "risk-knowledge".to_string(),
title: "Knowledge concentration risk".to_string(),
severity: "high".to_string(),
details: "Ownership/Bus-factor suggests concentrated knowledge".to_string(),
});
}
}
for (idx, warning) in context_pack
.health
.warnings
.iter()
.take(HEALTH_WARNINGS_IN_REVIEW)
.enumerate()
{
top_risks.push(ReviewRisk {
id: format!("risk-health-{}", idx + 1),
title: "Project health warning".to_string(),
severity: "medium".to_string(),
details: warning.clone(),
});
}
if top_risks.is_empty() {
top_risks.push(ReviewRisk {
id: "risk-none".to_string(),
title: "No major risk signals".to_string(),
severity: "low".to_string(),
details: "No high-priority warning was detected from current heuristics".to_string(),
});
}
let mut test_gaps = Vec::new();
if let Some(commit) = &context_pack.selected_commit {
let files = get_commit_files(&commit.hash).unwrap_or_default();
let changed_tests = files
.iter()
.filter(|f| f.contains("test") || f.ends_with("_test.rs") || f.ends_with(".spec.ts"))
.count();
if changed_tests == 0 && commit.impact_score >= TEST_GAP_IMPACT_THRESHOLD {
test_gaps
.push("High-impact change without obvious test file modifications".to_string());
}
}
if context_pack.health.overall_score < LOW_HEALTH_THRESHOLD {
test_gaps.push("Project health below 60; prioritize regression checks".to_string());
}
let mut owner_candidates = Vec::new();
if let Some(owner) = &context_pack.ownership {
owner_candidates.push(ReviewOwnerCandidate {
path: owner.path.clone(),
author: owner.primary_author.clone(),
ownership_percent: owner.ownership_percent,
});
}
owner_candidates.extend(
context_pack
.hot_files
.iter()
.take(OWNER_CANDIDATE_HOT_FILES)
.map(|file| ReviewOwnerCandidate {
path: file.path.clone(),
author: "unknown".to_string(),
ownership_percent: 0.0,
}),
);
let mut recommended_actions = Vec::new();
if context_pack.signals.as_ref().is_some_and(|s| s.high_impact) {
recommended_actions.push(ActionRecommendation {
id: "act-review-owner".to_string(),
title: "Request owner review".to_string(),
priority: "P1".to_string(),
reason: "High-impact change should be validated by area owner".to_string(),
command_hint: None,
});
}
if !test_gaps.is_empty() {
recommended_actions.push(ActionRecommendation {
id: "act-add-tests".to_string(),
title: "Add/expand tests".to_string(),
priority: "P1".to_string(),
reason: "Detected test gap for risky change".to_string(),
command_hint: None,
});
}
if context_pack.summary.behind > 0 {
recommended_actions.push(ActionRecommendation {
id: "act-rebase".to_string(),
title: "Rebase onto latest main".to_string(),
priority: "P2".to_string(),
reason: "Branch is behind upstream".to_string(),
command_hint: Some("git rebase origin/main".to_string()),
});
}
if recommended_actions.is_empty() {
recommended_actions.push(ActionRecommendation {
id: "act-ship".to_string(),
title: "Proceed with normal review".to_string(),
priority: "P3".to_string(),
reason: "No critical signals detected".to_string(),
command_hint: None,
});
}
let mut evidence = vec![
EvidenceLink {
source_type: "health_score".to_string(),
source_ref: format!("overall={}", context_pack.health.overall_score),
weight: EVIDENCE_WEIGHT_HEALTH,
},
EvidenceLink {
source_type: "signals".to_string(),
source_ref: context_pack
.signals
.as_ref()
.map(|s| s.glyphs())
.unwrap_or_else(|| "...".to_string()),
weight: EVIDENCE_WEIGHT_SIGNALS,
},
EvidenceLink {
source_type: "hot_files".to_string(),
source_ref: format!("count={}", context_pack.hot_files.len()),
weight: EVIDENCE_WEIGHT_HOT_FILES,
},
];
if let Some(commit) = &context_pack.selected_commit {
evidence.push(EvidenceLink {
source_type: "commit".to_string(),
source_ref: format!("{} impact={:.2}", commit.hash, commit.impact_score),
weight: EVIDENCE_WEIGHT_COMMIT,
});
}
let risk_score = compute_risk_score(&context_pack, &top_risks, &test_gaps);
let confidence = compute_confidence(&evidence, top_risks.len(), test_gaps.len());
let summary = if let Some(commit) = &context_pack.selected_commit {
format!(
"{} by {} | impact {:.2} | health {}",
commit.message, commit.author, commit.impact_score, context_pack.health.overall_score
)
} else {
format!(
"No selected commit | health {}",
context_pack.health.overall_score
)
};
Ok(ReviewPack {
repo: repo_name,
branch,
head,
summary,
risk_score,
confidence,
top_risks,
test_gaps,
owner_candidates,
recommended_actions,
evidence,
})
}
pub fn build_next_actions(review_pack: &ReviewPack) -> Vec<ActionRecommendation> {
review_pack.recommended_actions.clone()
}
pub fn explain_recommendation(review_pack: &ReviewPack, id: &str) -> Option<String> {
let item = review_pack
.recommended_actions
.iter()
.find(|action| action.id == id)?;
let mut explanation = format!("{} ({}): {}", item.title, item.priority, item.reason);
if let Some(cmd) = &item.command_hint {
explanation.push_str(&format!(" | hint: {}", cmd));
}
if let Some(primary) = review_pack.top_risks.first() {
explanation.push_str(&format!(" | primary risk: {}", primary.title));
}
Some(explanation)
}
pub fn verify_patch_risk(review_pack: &ReviewPack) -> serde_json::Value {
let verdict = if review_pack.risk_score >= RISK_VERDICT_HIGH {
"high_risk"
} else if review_pack.risk_score >= RISK_VERDICT_NEEDS_REVIEW {
"needs_review"
} else {
"low_risk"
};
serde_json::json!({
"verdict": verdict,
"risk_score": review_pack.risk_score,
"confidence": review_pack.confidence,
"critical_risks": review_pack.top_risks.iter().filter(|r| r.severity == "high").count(),
"recommended_actions": review_pack.recommended_actions.iter().map(|a| a.id.clone()).collect::<Vec<_>>()
})
}
pub fn build_handoff_context(review_pack: &ReviewPack, target: &str) -> HandoffContext {
let next_actions = build_next_actions(review_pack);
let prompt = format!(
"You are reviewing {} on {}. Prioritize actions: {}. Explain risks with evidence.",
review_pack.head,
review_pack.branch,
next_actions
.iter()
.map(|a| a.id.as_str())
.collect::<Vec<_>>()
.join(", ")
);
HandoffContext {
target: target.to_string(),
generated_at: Local::now().format("%Y-%m-%dT%H:%M:%S%z").to_string(),
review_pack: review_pack.clone(),
next_actions,
prompt,
}
}
fn repo_identity(repo: Option<&Repository>) -> (String, String, String) {
let owned_repo;
let repo = match repo {
Some(r) => Some(r),
None => {
owned_repo = Repository::discover(".").ok();
owned_repo.as_ref()
}
};
if let Some(repo) = repo {
let repo_name = repo
.workdir()
.and_then(|p| p.file_name())
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
let branch = repo
.head()
.ok()
.and_then(|h| h.shorthand().map(|s| s.to_string()))
.unwrap_or_else(|| "HEAD".to_string());
let head = repo
.head()
.ok()
.and_then(|h| h.target().map(|oid| oid.to_string()))
.unwrap_or_else(|| "unknown".to_string());
return (repo_name, branch, head);
}
(
"unknown".to_string(),
"HEAD".to_string(),
"unknown".to_string(),
)
}
fn compute_risk_score(
context_pack: &ContextPack,
top_risks: &[ReviewRisk],
test_gaps: &[String],
) -> f64 {
let health_penalty = (100_u8.saturating_sub(context_pack.health.overall_score)) as f64 / 100.0;
let high_risk_count = top_risks.iter().filter(|r| r.severity == "high").count() as f64;
let test_gap_penalty =
(test_gaps.len().min(TEST_GAP_MAX_COUNT) as f64) * TEST_GAP_PENALTY_PER_GAP;
let signal_penalty = context_pack
.signals
.as_ref()
.map(|s| {
(if s.high_impact {
SIGNAL_HIGH_IMPACT_PENALTY
} else {
0.0
}) + (if s.hot_file {
SIGNAL_HOT_FILE_PENALTY
} else {
0.0
}) + (if s.knowledge_risk {
SIGNAL_KNOWLEDGE_RISK_PENALTY
} else {
0.0
})
})
.unwrap_or(0.0);
(health_penalty * HEALTH_PENALTY_WEIGHT
+ high_risk_count * HIGH_RISK_COUNT_WEIGHT
+ test_gap_penalty
+ signal_penalty)
.min(1.0)
}
fn compute_confidence(evidence: &[EvidenceLink], risk_count: usize, test_gap_count: usize) -> f64 {
let evidence_weight: f64 = evidence.iter().map(|e| e.weight).sum();
let density_boost =
((risk_count + test_gap_count).min(DENSITY_MAX_COUNT) as f64) * DENSITY_BOOST_PER_ITEM;
(CONFIDENCE_BASE + evidence_weight * EVIDENCE_WEIGHT_FACTOR + density_boost).min(1.0)
}
pub fn compute_signals_for_event(
event: &GitEvent,
files: &[String],
ctx: &SignalContext,
) -> SignalSet {
let high_impact = ctx
.impact
.commits
.iter()
.find(|c| c.commit_hash == event.short_hash)
.map(|c| c.score >= HIGH_IMPACT_SCORE_THRESHOLD)
.unwrap_or((event.files_added + event.files_deleted) >= LARGE_CHANGE_LINE_THRESHOLD);
let hot_file_set: HashSet<&str> = ctx
.heatmap
.files
.iter()
.take(HOT_FILE_SET_SIZE)
.map(|f| f.path.as_str())
.collect();
let hot_file = files.iter().any(|f| hot_file_set.contains(f.as_str()));
let risky_paths: HashSet<&str> = ctx
.bus_factor
.entries
.iter()
.filter(|e| e.bus_factor <= 1)
.map(|e| e.path.as_str())
.chain(
ctx.tech_debt
.entries
.iter()
.filter(|e| e.score >= TECH_DEBT_RISK_THRESHOLD)
.map(|e| e.path.as_str()),
)
.chain(
ctx.ownership
.entries
.iter()
.filter(|e| {
!e.is_directory && e.ownership_percentage() >= OWNERSHIP_CONCENTRATION_THRESHOLD
})
.map(|e| e.path.as_str()),
)
.collect();
let knowledge_risk = files.iter().any(|f| {
risky_paths.contains(f.as_str())
|| risky_paths
.iter()
.any(|p| f.starts_with(*p) || p.starts_with(f))
});
SignalSet {
high_impact,
hot_file,
knowledge_risk,
}
}
pub fn summary_to_markdown(summary: &ContextSummary) -> String {
let hot = summary.hot_path.as_deref().unwrap_or("-");
let selected = summary.selected_hash.as_deref().unwrap_or("-");
format!(
"# Context Summary\n\n- **Branch**: {}\n- **Ahead/Behind**: ↑{} ↓{}\n- **Dirty Files**: {}\n- **Hot Path**: `{}`\n- **Selected**: `{}`\n",
summary.branch, summary.ahead, summary.behind, summary.dirty_files, hot, selected
)
}
pub fn pack_to_markdown(pack: &ContextPack) -> String {
let mut md = String::new();
md.push_str("# Insight Pack\n\n");
md.push_str(&format!(
"- **Branch**: {}\n- **Ahead/Behind**: ↑{} ↓{}\n- **Dirty Files**: {}\n- **Health**: {} ({})\n\n",
pack.summary.branch,
pack.summary.ahead,
pack.summary.behind,
pack.summary.dirty_files,
pack.health.overall_score,
pack.health.level
));
if let Some(commit) = &pack.selected_commit {
md.push_str("## Selected Commit\n\n");
md.push_str(&format!(
"- `{}` {}\n- Author: {}\n- Impact: {:.2}\n\n",
commit.hash, commit.message, commit.author, commit.impact_score
));
}
if let Some(signals) = &pack.signals {
md.push_str("## Signals\n\n");
md.push_str(&format!(
"- `{}` (`!` impact, `~` hot, `?` risk)\n\n",
signals.glyphs()
));
}
if !pack.hot_files.is_empty() {
md.push_str("## Hot Files\n\n");
for file in &pack.hot_files {
md.push_str(&format!(
"- `{}` ({}, {:.2})\n",
file.path, file.changes, file.heat
));
}
md.push('\n');
}
if !pack.health.warnings.is_empty() {
md.push_str("## Warnings\n\n");
for warn in &pack.health.warnings {
md.push_str(&format!("- {}\n", warn));
}
md.push('\n');
}
md
}
pub fn review_pack_to_markdown(pack: &ReviewPack) -> String {
let mut md = String::new();
md.push_str("# Review Pack\n\n");
md.push_str(&format!(
"- **Repo/Branch**: {}/{}\n- **Head**: `{}`\n- **Risk Score**: {:.2}\n- **Confidence**: {:.2}\n- **Summary**: {}\n\n",
pack.repo, pack.branch, pack.head, pack.risk_score, pack.confidence, pack.summary
));
md.push_str("## Top Risks\n\n");
for risk in &pack.top_risks {
md.push_str(&format!(
"- `{}` [{}] {} - {}\n",
risk.id, risk.severity, risk.title, risk.details
));
}
md.push('\n');
if !pack.test_gaps.is_empty() {
md.push_str("## Test Gaps\n\n");
for gap in &pack.test_gaps {
md.push_str(&format!("- {}\n", gap));
}
md.push('\n');
}
md.push_str("## Recommended Actions\n\n");
for action in &pack.recommended_actions {
md.push_str(&format!(
"- `{}` ({}) {} - {}\n",
action.id, action.priority, action.title, action.reason
));
}
md
}
pub fn actions_to_markdown(actions: &[ActionRecommendation]) -> String {
let mut md = String::new();
md.push_str("# Next Actions\n\n");
for action in actions {
md.push_str(&format!(
"- `{}` ({}) {} - {}\n",
action.id, action.priority, action.title, action.reason
));
}
md
}
pub fn handoff_to_markdown(handoff: &HandoffContext) -> String {
format!(
"# Handoff Context\n\n- **Target**: {}\n- **Generated**: {}\n- **Prompt**: {}\n",
handoff.target, handoff.generated_at, handoff.prompt
)
}
trait AlertSeverityExt {
fn is_warning_or_error(&self) -> bool;
}
impl AlertSeverityExt for crate::stats::AlertSeverity {
fn is_warning_or_error(&self) -> bool {
matches!(
self,
crate::stats::AlertSeverity::Warning | crate::stats::AlertSeverity::Critical
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::{GitEvent, GitEventKind};
use crate::stats::{
AggregationLevel, BusFactorAnalysis, BusFactorEntry, BusFactorRisk, CodeOwnership,
CodeOwnershipEntry, TechDebtAnalysis, TechDebtEntry, TechDebtLevel,
};
use crate::stats::{CommitImpactScore, FileHeatmap, FileHeatmapEntry};
use chrono::Local;
fn make_event(hash: &str, author: &str, message: &str) -> GitEvent {
GitEvent {
kind: GitEventKind::Commit,
short_hash: hash.to_string(),
message: message.to_string(),
author: author.to_string(),
timestamp: Local::now(),
files_added: 10,
files_deleted: 5,
parent_hashes: vec![],
branch_labels: vec![],
session_id: None,
inferred_intent: None,
}
}
fn make_large_event(hash: &str) -> GitEvent {
GitEvent {
kind: GitEventKind::Commit,
short_hash: hash.to_string(),
message: "large change".to_string(),
author: "dev".to_string(),
timestamp: Local::now(),
files_added: 200,
files_deleted: 150,
parent_hashes: vec![],
branch_labels: vec![],
session_id: None,
inferred_intent: None,
}
}
fn empty_heatmap() -> FileHeatmap {
FileHeatmap {
files: vec![],
total_files: 0,
aggregation_level: AggregationLevel::Files,
}
}
fn heatmap_with_files(paths: &[&str]) -> FileHeatmap {
let max = paths.len();
FileHeatmap {
files: paths
.iter()
.enumerate()
.map(|(i, p)| FileHeatmapEntry {
path: p.to_string(),
change_count: max - i,
max_changes: max,
})
.collect(),
total_files: paths.len(),
aggregation_level: AggregationLevel::Files,
}
}
fn empty_impact() -> crate::stats::CommitImpactAnalysis {
crate::stats::CommitImpactAnalysis {
commits: vec![],
total_commits: 0,
avg_score: 0.0,
max_score: 0.0,
high_impact_count: 0,
}
}
fn impact_with_commit(hash: &str, score: f64) -> crate::stats::CommitImpactAnalysis {
crate::stats::CommitImpactAnalysis {
commits: vec![CommitImpactScore {
commit_hash: hash.to_string(),
commit_message: "msg".to_string(),
author: "dev".to_string(),
date: Local::now(),
files_changed: 5,
insertions: 10,
deletions: 5,
score,
file_score: score * 0.4,
change_score: score * 0.4,
heat_score: score * 0.2,
}],
total_commits: 1,
avg_score: score,
max_score: score,
high_impact_count: if score >= 0.7 { 1 } else { 0 },
}
}
fn empty_ownership() -> CodeOwnership {
CodeOwnership {
entries: vec![],
total_files: 0,
}
}
fn ownership_with_concentrated(path: &str) -> CodeOwnership {
CodeOwnership {
entries: vec![CodeOwnershipEntry {
path: path.to_string(),
primary_author: "solo_dev".to_string(),
primary_commits: 95,
total_commits: 100,
depth: 0,
is_directory: false,
}],
total_files: 1,
}
}
fn empty_bus_factor() -> BusFactorAnalysis {
BusFactorAnalysis {
entries: vec![],
high_risk_count: 0,
medium_risk_count: 0,
total_paths_analyzed: 0,
}
}
fn bus_factor_with_risk(path: &str) -> BusFactorAnalysis {
BusFactorAnalysis {
entries: vec![BusFactorEntry {
path: path.to_string(),
bus_factor: 1,
contributors: vec![],
total_commits: 50,
risk_level: BusFactorRisk::High,
is_directory: false,
}],
high_risk_count: 1,
medium_risk_count: 0,
total_paths_analyzed: 1,
}
}
fn empty_tech_debt() -> TechDebtAnalysis {
TechDebtAnalysis {
entries: vec![],
avg_score: 0.0,
high_debt_count: 0,
total_files_analyzed: 0,
}
}
fn tech_debt_with_risk(path: &str) -> TechDebtAnalysis {
TechDebtAnalysis {
entries: vec![TechDebtEntry {
path: path.to_string(),
score: 0.85,
churn_score: 0.9,
complexity_score: 0.8,
age_score: 0.7,
change_count: 20,
total_changes: 500,
debt_level: TechDebtLevel::High,
}],
avg_score: 0.85,
high_debt_count: 1,
total_files_analyzed: 1,
}
}
#[test]
fn signal_set_glyphs_all_false() {
let s = SignalSet {
high_impact: false,
hot_file: false,
knowledge_risk: false,
};
assert_eq!(s.glyphs(), "...");
}
#[test]
fn signal_set_glyphs_all_true() {
let s = SignalSet {
high_impact: true,
hot_file: true,
knowledge_risk: true,
};
assert_eq!(s.glyphs(), "!~?");
}
#[test]
fn signal_set_glyphs_mixed() {
let s = SignalSet {
high_impact: true,
hot_file: false,
knowledge_risk: true,
};
assert_eq!(s.glyphs(), "!.?");
}
#[test]
fn signal_set_glyphs_only_hot_file() {
let s = SignalSet {
high_impact: false,
hot_file: true,
knowledge_risk: false,
};
assert_eq!(s.glyphs(), ".~.");
}
#[test]
fn signals_no_impact_no_hot_no_risk() {
let event = make_event("abc123", "dev", "small fix");
let files = vec!["src/main.rs".to_string()];
let heatmap = empty_heatmap();
let impact = empty_impact();
let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(!signals.high_impact);
assert!(!signals.hot_file);
assert!(!signals.knowledge_risk);
}
#[test]
fn signals_high_impact_from_score() {
let event = make_event("abc123", "dev", "big refactor");
let files = vec!["src/main.rs".to_string()];
let heatmap = empty_heatmap();
let impact = impact_with_commit("abc123", 0.8);
let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(signals.high_impact);
}
#[test]
fn signals_not_high_impact_below_threshold() {
let event = make_event("abc123", "dev", "minor fix");
let files = vec!["src/main.rs".to_string()];
let heatmap = empty_heatmap();
let impact = impact_with_commit("abc123", 0.5);
let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(!signals.high_impact);
}
#[test]
fn signals_high_impact_from_large_change_fallback() {
let event = make_large_event("abc123");
let files = vec!["src/main.rs".to_string()];
let heatmap = empty_heatmap();
let impact = empty_impact(); let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(signals.high_impact);
}
#[test]
fn signals_hot_file_detected() {
let event = make_event("abc123", "dev", "touch hot file");
let files = vec!["src/hot.rs".to_string()];
let heatmap = heatmap_with_files(&["src/hot.rs", "src/other.rs"]);
let impact = empty_impact();
let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(signals.hot_file);
}
#[test]
fn signals_hot_file_not_in_top() {
let event = make_event("abc123", "dev", "touch cold file");
let files = vec!["src/cold.rs".to_string()];
let heatmap = heatmap_with_files(&["src/hot.rs"]);
let impact = empty_impact();
let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(!signals.hot_file);
}
#[test]
fn signals_knowledge_risk_from_bus_factor() {
let event = make_event("abc123", "dev", "risky");
let files = vec!["src/risky.rs".to_string()];
let heatmap = empty_heatmap();
let impact = empty_impact();
let ownership = empty_ownership();
let bus = bus_factor_with_risk("src/risky.rs");
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(signals.knowledge_risk);
}
#[test]
fn signals_knowledge_risk_from_tech_debt() {
let event = make_event("abc123", "dev", "debt");
let files = vec!["src/debt.rs".to_string()];
let heatmap = empty_heatmap();
let impact = empty_impact();
let ownership = empty_ownership();
let bus = empty_bus_factor();
let debt = tech_debt_with_risk("src/debt.rs");
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(signals.knowledge_risk);
}
#[test]
fn signals_knowledge_risk_from_ownership_concentration() {
let event = make_event("abc123", "dev", "concentrated");
let files = vec!["src/solo.rs".to_string()];
let heatmap = empty_heatmap();
let impact = empty_impact();
let ownership = ownership_with_concentrated("src/solo.rs");
let bus = empty_bus_factor();
let debt = empty_tech_debt();
let signals = compute_signals_for_event(
&event,
&files,
&SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus,
tech_debt: &debt,
},
);
assert!(signals.knowledge_risk);
}
#[test]
fn risk_score_zero_for_perfect_health_no_risks() {
let pack = ContextPack {
summary: ContextSummary {
generated_at: "now".to_string(),
branch: "main".to_string(),
ahead: 0,
behind: 0,
dirty_files: 0,
hot_path: None,
selected_hash: None,
},
selected_commit: None,
signals: Some(SignalSet {
high_impact: false,
hot_file: false,
knowledge_risk: false,
}),
hot_files: vec![],
ownership: None,
coupling: vec![],
health: PackHealth {
overall_score: 100,
level: "healthy".to_string(),
warnings: vec![],
confidence: "high".to_string(),
},
};
let risks: Vec<ReviewRisk> = vec![];
let test_gaps: Vec<String> = vec![];
let score = compute_risk_score(&pack, &risks, &test_gaps);
assert!(score >= 0.0);
assert!(score < 0.01, "Expected near-zero risk, got {}", score);
}
#[test]
fn risk_score_increases_with_low_health() {
let pack_healthy = ContextPack {
summary: ContextSummary {
generated_at: "now".to_string(),
branch: "main".to_string(),
ahead: 0,
behind: 0,
dirty_files: 0,
hot_path: None,
selected_hash: None,
},
selected_commit: None,
signals: None,
hot_files: vec![],
ownership: None,
coupling: vec![],
health: PackHealth {
overall_score: 100,
level: "healthy".to_string(),
warnings: vec![],
confidence: "high".to_string(),
},
};
let pack_unhealthy = ContextPack {
health: PackHealth {
overall_score: 30,
level: "poor".to_string(),
warnings: vec![],
confidence: "low".to_string(),
},
..pack_healthy.clone()
};
let score_healthy = compute_risk_score(&pack_healthy, &[], &[]);
let score_unhealthy = compute_risk_score(&pack_unhealthy, &[], &[]);
assert!(score_unhealthy > score_healthy);
}
#[test]
fn risk_score_increases_with_signals() {
let base = ContextPack {
summary: ContextSummary {
generated_at: "now".to_string(),
branch: "main".to_string(),
ahead: 0,
behind: 0,
dirty_files: 0,
hot_path: None,
selected_hash: None,
},
selected_commit: None,
signals: Some(SignalSet {
high_impact: true,
hot_file: true,
knowledge_risk: true,
}),
hot_files: vec![],
ownership: None,
coupling: vec![],
health: PackHealth {
overall_score: 80,
level: "good".to_string(),
warnings: vec![],
confidence: "high".to_string(),
},
};
let score = compute_risk_score(&base, &[], &[]);
assert!(score > 0.5, "Expected > 0.5, got {}", score);
}
#[test]
fn risk_score_capped_at_1() {
let pack = ContextPack {
summary: ContextSummary {
generated_at: "now".to_string(),
branch: "main".to_string(),
ahead: 0,
behind: 0,
dirty_files: 0,
hot_path: None,
selected_hash: None,
},
selected_commit: None,
signals: Some(SignalSet {
high_impact: true,
hot_file: true,
knowledge_risk: true,
}),
hot_files: vec![],
ownership: None,
coupling: vec![],
health: PackHealth {
overall_score: 0,
level: "critical".to_string(),
warnings: vec![],
confidence: "low".to_string(),
},
};
let high_risks: Vec<ReviewRisk> = (0..10)
.map(|i| ReviewRisk {
id: format!("r{}", i),
title: "risk".to_string(),
severity: "high".to_string(),
details: "d".to_string(),
})
.collect();
let gaps = vec![
"g1".to_string(),
"g2".to_string(),
"g3".to_string(),
"g4".to_string(),
];
let score = compute_risk_score(&pack, &high_risks, &gaps);
assert!(
(score - 1.0).abs() < f64::EPSILON,
"Score should be capped at 1.0"
);
}
#[test]
fn confidence_base_with_no_evidence() {
let evidence: Vec<EvidenceLink> = vec![];
let confidence = compute_confidence(&evidence, 0, 0);
assert!((confidence - CONFIDENCE_BASE).abs() < f64::EPSILON);
}
#[test]
fn confidence_increases_with_evidence() {
let evidence = vec![
EvidenceLink {
source_type: "health_score".to_string(),
source_ref: "overall=80".to_string(),
weight: 0.35,
},
EvidenceLink {
source_type: "signals".to_string(),
source_ref: "!~?".to_string(),
weight: 0.30,
},
];
let confidence = compute_confidence(&evidence, 0, 0);
assert!(confidence > CONFIDENCE_BASE);
}
#[test]
fn confidence_capped_at_1() {
let evidence: Vec<EvidenceLink> = (0..20)
.map(|i| EvidenceLink {
source_type: format!("src{}", i),
source_ref: "ref".to_string(),
weight: 1.0,
})
.collect();
let confidence = compute_confidence(&evidence, 10, 10);
assert!((confidence - 1.0).abs() < f64::EPSILON);
}
#[test]
fn confidence_density_boost_from_risk_count() {
let evidence: Vec<EvidenceLink> = vec![];
let c0 = compute_confidence(&evidence, 0, 0);
let c3 = compute_confidence(&evidence, 3, 0);
assert!(c3 > c0);
}
#[test]
fn verify_patch_risk_low() {
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "summary".to_string(),
risk_score: 0.2,
confidence: 0.8,
top_risks: vec![],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: vec![],
evidence: vec![],
};
let result = verify_patch_risk(&pack);
assert_eq!(result["verdict"], "low_risk");
}
#[test]
fn verify_patch_risk_needs_review() {
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "summary".to_string(),
risk_score: 0.5,
confidence: 0.7,
top_risks: vec![],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: vec![],
evidence: vec![],
};
let result = verify_patch_risk(&pack);
assert_eq!(result["verdict"], "needs_review");
}
#[test]
fn verify_patch_risk_high() {
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "summary".to_string(),
risk_score: 0.9,
confidence: 0.9,
top_risks: vec![ReviewRisk {
id: "r1".to_string(),
title: "risk".to_string(),
severity: "high".to_string(),
details: "d".to_string(),
}],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: vec![ActionRecommendation {
id: "act-1".to_string(),
title: "t".to_string(),
priority: "P1".to_string(),
reason: "r".to_string(),
command_hint: None,
}],
evidence: vec![],
};
let result = verify_patch_risk(&pack);
assert_eq!(result["verdict"], "high_risk");
assert_eq!(result["critical_risks"], 1);
}
#[test]
fn explain_recommendation_found() {
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "s".to_string(),
risk_score: 0.5,
confidence: 0.7,
top_risks: vec![ReviewRisk {
id: "risk-1".to_string(),
title: "Primary risk".to_string(),
severity: "high".to_string(),
details: "d".to_string(),
}],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: vec![ActionRecommendation {
id: "act-review-owner".to_string(),
title: "Request owner review".to_string(),
priority: "P1".to_string(),
reason: "High-impact change".to_string(),
command_hint: Some("git log".to_string()),
}],
evidence: vec![],
};
let result = explain_recommendation(&pack, "act-review-owner");
assert!(result.is_some());
let text = result.unwrap();
assert!(text.contains("Request owner review"));
assert!(text.contains("P1"));
assert!(text.contains("git log"));
assert!(text.contains("Primary risk"));
}
#[test]
fn explain_recommendation_not_found() {
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "s".to_string(),
risk_score: 0.5,
confidence: 0.7,
top_risks: vec![],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: vec![],
evidence: vec![],
};
assert!(explain_recommendation(&pack, "nonexistent").is_none());
}
#[test]
fn build_next_actions_returns_cloned_actions() {
let actions = vec![ActionRecommendation {
id: "act-1".to_string(),
title: "Do something".to_string(),
priority: "P1".to_string(),
reason: "Because".to_string(),
command_hint: None,
}];
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "s".to_string(),
risk_score: 0.0,
confidence: 0.0,
top_risks: vec![],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: actions,
evidence: vec![],
};
let result = build_next_actions(&pack);
assert_eq!(result.len(), 1);
assert_eq!(result[0].id, "act-1");
}
#[test]
fn summary_to_markdown_contains_branch() {
let summary = ContextSummary {
generated_at: "2025-01-01".to_string(),
branch: "feature/test".to_string(),
ahead: 3,
behind: 1,
dirty_files: 2,
hot_path: Some("src/main.rs".to_string()),
selected_hash: Some("abc123".to_string()),
};
let md = summary_to_markdown(&summary);
assert!(md.contains("feature/test"));
assert!(md.contains("3"));
assert!(md.contains("src/main.rs"));
assert!(md.contains("abc123"));
}
#[test]
fn summary_to_markdown_handles_none() {
let summary = ContextSummary {
generated_at: "now".to_string(),
branch: "main".to_string(),
ahead: 0,
behind: 0,
dirty_files: 0,
hot_path: None,
selected_hash: None,
};
let md = summary_to_markdown(&summary);
assert!(md.contains("-"));
}
#[test]
fn pack_to_markdown_contains_sections() {
let pack = ContextPack {
summary: ContextSummary {
generated_at: "now".to_string(),
branch: "main".to_string(),
ahead: 0,
behind: 0,
dirty_files: 0,
hot_path: None,
selected_hash: None,
},
selected_commit: Some(PackCommit {
hash: "abc123".to_string(),
author: "dev".to_string(),
date: "2025-01-01".to_string(),
message: "test commit".to_string(),
files_changed: 3,
insertions: 10,
deletions: 5,
impact_score: 0.6,
}),
signals: Some(SignalSet {
high_impact: true,
hot_file: false,
knowledge_risk: false,
}),
hot_files: vec![PackHotFile {
path: "src/hot.rs".to_string(),
changes: 10,
heat: 0.9,
}],
ownership: None,
coupling: vec![],
health: PackHealth {
overall_score: 85,
level: "good".to_string(),
warnings: vec!["Some warning".to_string()],
confidence: "high".to_string(),
},
};
let md = pack_to_markdown(&pack);
assert!(md.contains("# Insight Pack"));
assert!(md.contains("## Selected Commit"));
assert!(md.contains("## Signals"));
assert!(md.contains("## Hot Files"));
assert!(md.contains("## Warnings"));
assert!(md.contains("src/hot.rs"));
}
#[test]
fn review_pack_to_markdown_contains_sections() {
let pack = ReviewPack {
repo: "myrepo".to_string(),
branch: "main".to_string(),
head: "abc123".to_string(),
summary: "test summary".to_string(),
risk_score: 0.5,
confidence: 0.8,
top_risks: vec![ReviewRisk {
id: "r1".to_string(),
title: "A risk".to_string(),
severity: "high".to_string(),
details: "details".to_string(),
}],
test_gaps: vec!["gap1".to_string()],
owner_candidates: vec![],
recommended_actions: vec![ActionRecommendation {
id: "a1".to_string(),
title: "action".to_string(),
priority: "P1".to_string(),
reason: "reason".to_string(),
command_hint: None,
}],
evidence: vec![],
};
let md = review_pack_to_markdown(&pack);
assert!(md.contains("# Review Pack"));
assert!(md.contains("myrepo"));
assert!(md.contains("## Top Risks"));
assert!(md.contains("## Test Gaps"));
assert!(md.contains("## Recommended Actions"));
}
#[test]
fn actions_to_markdown_output() {
let actions = vec![ActionRecommendation {
id: "act-1".to_string(),
title: "Do X".to_string(),
priority: "P1".to_string(),
reason: "Because Y".to_string(),
command_hint: Some("git cmd".to_string()),
}];
let md = actions_to_markdown(&actions);
assert!(md.contains("# Next Actions"));
assert!(md.contains("Do X"));
assert!(md.contains("P1"));
}
#[test]
fn handoff_to_markdown_output() {
let pack = ReviewPack {
repo: "test".to_string(),
branch: "main".to_string(),
head: "abc".to_string(),
summary: "s".to_string(),
risk_score: 0.0,
confidence: 0.0,
top_risks: vec![],
test_gaps: vec![],
owner_candidates: vec![],
recommended_actions: vec![],
evidence: vec![],
};
let handoff = build_handoff_context(&pack, "claude");
let md = handoff_to_markdown(&handoff);
assert!(md.contains("# Handoff Context"));
assert!(md.contains("claude"));
}
#[test]
fn alert_severity_warning_is_warning_or_error() {
assert!(crate::stats::AlertSeverity::Warning.is_warning_or_error());
}
#[test]
fn alert_severity_critical_is_warning_or_error() {
assert!(crate::stats::AlertSeverity::Critical.is_warning_or_error());
}
#[test]
fn alert_severity_info_is_not_warning_or_error() {
assert!(!crate::stats::AlertSeverity::Info.is_warning_or_error());
}
fn make_empty_ctx() -> (
FileHeatmap,
crate::stats::CommitImpactAnalysis,
CodeOwnership,
BusFactorAnalysis,
TechDebtAnalysis,
) {
(
empty_heatmap(),
empty_impact(),
empty_ownership(),
empty_bus_factor(),
empty_tech_debt(),
)
}
#[test]
fn signals_empty_commit_list() {
let (heatmap, impact, ownership, bus_factor, tech_debt) = make_empty_ctx();
let ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
let _ = compute_confidence(&[], 0, 0);
let _ = ctx;
}
#[test]
fn signals_single_commit() {
let e = make_event("abc1234", "Alice", "initial commit");
let (heatmap, impact, ownership, bus_factor, tech_debt) = make_empty_ctx();
let ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
let signals = compute_signals_for_event(&e, &[], &ctx);
assert!(!signals.hot_file);
assert!(!signals.knowledge_risk);
}
#[test]
fn signals_same_author_all_commits() {
let events: Vec<GitEvent> = (0..10)
.map(|i| make_event(&format!("abc{:04}", i), "Alice", "commit"))
.collect();
let event_refs: Vec<&GitEvent> = events.iter().collect();
let (heatmap, impact, ownership, bus_factor, tech_debt) = make_empty_ctx();
let ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
for e in &event_refs {
let signals = compute_signals_for_event(e, &[], &ctx);
assert!(!signals.hot_file);
}
}
#[test]
fn signals_large_file_change() {
let mut e = make_event("abc1234", "Bob", "large refactor");
e.files_added = 500;
e.files_deleted = 500;
let (heatmap, impact, ownership, bus_factor, tech_debt) = make_empty_ctx();
let ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
let signals = compute_signals_for_event(&e, &[], &ctx);
assert!(signals.high_impact);
}
#[test]
fn signals_binary_file_mixed() {
let e = make_event("abc1234", "Carol", "add image");
let files = vec!["assets/logo.png".to_string(), "src/main.rs".to_string()];
let (heatmap, impact, ownership, bus_factor, tech_debt) = make_empty_ctx();
let ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
let signals = compute_signals_for_event(&e, &files, &ctx);
assert!(!signals.hot_file);
}
#[test]
fn signals_long_author_name() {
let long_name = "A".repeat(500);
let e = make_event("abc1234", &long_name, "commit");
let (heatmap, impact, ownership, bus_factor, tech_debt) = make_empty_ctx();
let ctx = SignalContext {
heatmap: &heatmap,
impact: &impact,
ownership: &ownership,
bus_factor: &bus_factor,
tech_debt: &tech_debt,
};
let signals = compute_signals_for_event(&e, &[], &ctx);
assert!(!signals.hot_file);
}
#[test]
fn confidence_capped_at_one() {
let evidence: Vec<EvidenceLink> = (0..100)
.map(|i| EvidenceLink {
source_type: format!("evidence-{}", i),
source_ref: format!("src/file{}.rs", i),
weight: 1.0,
})
.collect();
let conf = compute_confidence(&evidence, 100, 100);
assert!(conf <= 1.0);
assert!(conf > 0.0);
}
}