use console::style;
use std::io::Write;
use super::OutputContext;
use crate::cli::OutputFormat;
pub fn show_progress(ctx: &OutputContext, current: usize, total: usize, action: &str) {
if matches!(ctx.format, OutputFormat::Text) {
println!("\n[{}/{}] {}", current, total, style(action).cyan().bold());
}
}
pub fn show_preview(ctx: &OutputContext, title: &str, labels: &[String]) {
if matches!(ctx.format, OutputFormat::Text) {
println!(" {} {}", style("title:").dim(), style(title).bold());
let labels_display = if labels.is_empty() {
style("none").dim().to_string()
} else {
labels
.iter()
.map(|l| style(l).cyan().to_string())
.collect::<Vec<_>>()
.join(", ")
};
println!(" {} {}", style("labels:").dim(), labels_display);
println!();
}
}
pub fn show_dry_run_message<W: Write + ?Sized>(w: &mut W, message: &str) -> std::io::Result<()> {
writeln!(w, "{}", style(message).yellow())
}
pub fn show_timing(
ctx: &OutputContext,
fetch_ms: u128,
model: &str,
duration_ms: u64,
input_tokens: u64,
output_tokens: u64,
) {
if ctx.is_verbose() && matches!(ctx.format, OutputFormat::Text) {
println!(
" {}",
style(format!("Fetched issue in {fetch_ms}ms")).dim()
);
#[allow(clippy::cast_precision_loss)]
let duration_secs = duration_ms as f64 / 1000.0;
let total_tokens = input_tokens + output_tokens;
println!(
" {} (model: {}) in {:.1}s ({} tokens)",
style("AI analysis").dim(),
style(model).cyan(),
duration_secs,
total_tokens
);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_show_progress_text_format() {
let ctx = OutputContext::from_cli(OutputFormat::Text, false);
show_progress(&ctx, 2, 5, "Processing");
}
#[test]
fn test_show_progress_json_format() {
let ctx = OutputContext::from_cli(OutputFormat::Json, false);
show_progress(&ctx, 1, 1, "Test");
}
#[test]
fn test_show_preview_with_labels() {
let ctx = OutputContext::from_cli(OutputFormat::Text, false);
let labels = vec!["bug".to_string(), "help wanted".to_string()];
show_preview(&ctx, "Test Issue", &labels);
}
#[test]
fn test_show_preview_no_labels() {
let ctx = OutputContext::from_cli(OutputFormat::Text, false);
show_preview(&ctx, "Test Issue", &[]);
}
#[test]
fn test_show_preview_json_format() {
let ctx = OutputContext::from_cli(OutputFormat::Json, false);
show_preview(&ctx, "Test", &["label".to_string()]);
}
#[test]
fn test_show_dry_run_message() {
let mut buf = Vec::new();
show_dry_run_message(&mut buf, "Dry run - comment not posted.").unwrap();
let output = String::from_utf8(buf).unwrap();
assert!(output.contains("Dry run - comment not posted."));
}
#[test]
fn test_show_timing_verbose() {
let ctx = OutputContext::from_cli(OutputFormat::Text, true);
show_timing(&ctx, 150, "gpt-4", 2500, 100, 50);
}
#[test]
fn test_show_timing_quiet() {
let ctx = OutputContext::from_cli(OutputFormat::Json, false);
show_timing(&ctx, 150, "gpt-4", 2500, 100, 50);
}
#[test]
fn test_show_timing_json_format() {
let ctx = OutputContext::from_cli(OutputFormat::Json, true);
show_timing(&ctx, 150, "gpt-4", 2500, 100, 50);
}
}