cqs 1.25.0

Code intelligence and RAG for AI agents. Semantic search, call graphs, impact analysis, type dependencies, and smart context assembly — in single tool calls. 54 languages + L5X/L5K PLC exports, 91.2% Recall@1 (BGE-large), 0.951 MRR (296 queries). Local ML, GPU-accelerated.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
//! Review command — comprehensive diff review context

use anyhow::Result;

use cqs::ReviewResult;
use cqs::RiskLevel;

pub(crate) fn cmd_review(
    ctx: &crate::cli::CommandContext,
    base: Option<&str>,
    from_stdin: bool,
    format: &crate::cli::OutputFormat,
    max_tokens: Option<usize>,
) -> Result<()> {
    let _span = tracing::info_span!("cmd_review", ?format, ?max_tokens).entered();

    if matches!(format, crate::cli::OutputFormat::Mermaid) {
        anyhow::bail!("Mermaid output is not supported for review — use text or json");
    }

    let json = matches!(format, crate::cli::OutputFormat::Json);
    let store = &ctx.store;
    let root = &ctx.root;

    // 1. Get diff text
    let diff_text = if from_stdin {
        crate::cli::commands::read_stdin()?
    } else {
        crate::cli::commands::run_git_diff(base)?
    };

    // 2. Run review
    let result = cqs::review_diff(store, &diff_text, root)?;

    match result {
        None => {
            if json {
                println!("{}", serde_json::to_string_pretty(&empty_review_json())?);
            } else {
                println!("No indexed functions affected by this diff.");
            }
        }
        Some(mut review) => {
            // Apply token budget: truncate callers and tests lists to fit
            let token_count_used =
                max_tokens.map(|budget| apply_token_budget(&mut review, budget, json));

            if json {
                let mut output: serde_json::Value = serde_json::to_value(&review)?;
                if let Some(tokens) = token_count_used {
                    output["token_count"] = serde_json::json!(tokens);
                    output["token_budget"] = serde_json::json!(max_tokens.unwrap_or(0));
                }
                println!("{}", serde_json::to_string_pretty(&output)?);
            } else {
                display_review_text(&review, root, token_count_used, max_tokens);
            }
        }
    }

    Ok(())
}

/// Apply token budget by truncating callers and tests lists.
/// Changed functions and risk summary are always included (small, essential).
/// Callers and tests are the variable-size sections that get truncated.
/// `json` adds per-item overhead for JSON field names and structure tokens.
/// Returns total token count used.
/// Public entry point for batch mode to apply token budgeting to review output.
pub(crate) fn apply_token_budget_public(
    review: &mut ReviewResult,
    budget: usize,
    json: bool,
) -> usize {
    apply_token_budget(review, budget, json)
}

fn apply_token_budget(review: &mut ReviewResult, budget: usize, json: bool) -> usize {
    let _span = tracing::info_span!("review_token_budget", budget, json).entered();

    // JSON wrapping adds ~35 tokens per item (field names, paths, metadata)
    let json_per_item = if json {
        crate::cli::commands::JSON_OVERHEAD_PER_RESULT
    } else {
        0
    };

    // Estimate tokens per item (~15 tokens per caller/test line in text output)
    let tokens_per_caller: usize = 15 + json_per_item;
    let tokens_per_test: usize = 18 + json_per_item;
    let tokens_per_function: usize = 12 + json_per_item;
    let tokens_per_note: usize = 20 + json_per_item;
    const BASE_OVERHEAD: usize = 30; // risk header, section headers, etc.

    let mut used = BASE_OVERHEAD;

    // Changed functions are always included (essential for review)
    used += review.changed_functions.len() * tokens_per_function;

    // Notes are always included (small, high value)
    used += review.relevant_notes.len() * tokens_per_note;

    // Fit callers within remaining budget (prioritize callers over tests)
    let callers_budget = (budget.saturating_sub(used)) * 2 / 3; // 2/3 of remaining for callers
    let max_callers = callers_budget / tokens_per_caller;
    let original_callers = review.affected_callers.len();
    if review.affected_callers.len() > max_callers {
        review.affected_callers.truncate(max_callers.max(1));
    }
    used += review.affected_callers.len() * tokens_per_caller;

    // Fit tests within remaining budget
    let tests_budget = budget.saturating_sub(used);
    let max_tests = tests_budget / tokens_per_test;
    let original_tests = review.affected_tests.len();
    if review.affected_tests.len() > max_tests {
        review.affected_tests.truncate(max_tests.max(1));
    }
    used += review.affected_tests.len() * tokens_per_test;

    if review.affected_callers.len() < original_callers
        || review.affected_tests.len() < original_tests
    {
        let truncated_callers = original_callers - review.affected_callers.len();
        let truncated_tests = original_tests - review.affected_tests.len();
        tracing::info!(
            budget,
            used,
            truncated_callers,
            truncated_tests,
            "Token-budgeted review"
        );
        review.warnings.push(format!(
            "Output truncated to ~{} tokens (budget: {}). {} callers, {} tests omitted (min 1 caller + 1 test guaranteed).",
            used, budget, truncated_callers, truncated_tests
        ));
    }

    used
}

/// Creates and returns a JSON object representing an empty code review with no findings.
/// This function constructs a default review structure containing empty arrays for changed functions, affected callers, and affected tests, along with empty risk assessments and a null stale warning field.
/// # Returns
/// A `serde_json::Value` containing a JSON object with the following fields:
/// - `changed_functions`: empty array
/// - `affected_callers`: empty array
/// - `affected_tests`: empty array
/// - `relevant_notes`: empty array
/// - `risk_summary`: object with zero counts for high, medium, and low risk items, and overall risk set to "low"
/// - `stale_warning`: null value
fn empty_review_json() -> serde_json::Value {
    serde_json::json!({
        "changed_functions": [],
        "affected_callers": [],
        "affected_tests": [],
        "relevant_notes": [],
        "risk_summary": { "high": 0, "medium": 0, "low": 0, "overall": "low" },
        "stale_warning": null
    })
}

#[cfg(test)]
mod tests {
    use super::*;
    use cqs::{CallerDetail, DiffTestInfo, ReviewedFunction, RiskLevel, RiskScore, RiskSummary};
    use std::path::PathBuf;

    fn make_review(num_callers: usize, num_tests: usize) -> ReviewResult {
        let callers: Vec<CallerDetail> = (0..num_callers)
            .map(|i| CallerDetail {
                name: format!("caller_{}", i),
                file: PathBuf::from(format!("src/c{}.rs", i)),
                line: (i as u32) + 1,
                call_line: (i as u32) + 10,
                snippet: None,
            })
            .collect();

        let tests: Vec<DiffTestInfo> = (0..num_tests)
            .map(|i| DiffTestInfo {
                name: format!("test_{}", i),
                file: PathBuf::from(format!("tests/t{}.rs", i)),
                line: (i as u32) + 1,
                via: "direct".into(),
                call_depth: 1,
            })
            .collect();

        ReviewResult {
            changed_functions: vec![ReviewedFunction {
                name: "target_fn".into(),
                file: PathBuf::from("src/lib.rs"),
                line_start: 42,
                risk: RiskScore {
                    caller_count: num_callers,
                    test_count: num_tests,
                    test_ratio: if num_callers > 0 {
                        (num_tests as f32 / num_callers as f32).min(1.0)
                    } else {
                        1.0
                    },
                    risk_level: RiskLevel::Low,
                    blast_radius: RiskLevel::Low,
                    score: 0.0,
                },
            }],
            affected_callers: callers,
            affected_tests: tests,
            relevant_notes: vec![],
            risk_summary: RiskSummary {
                high: 0,
                medium: 0,
                low: 1,
                overall: RiskLevel::Low,
            },
            stale_warning: None,
            warnings: vec![],
        }
    }

    #[test]
    fn test_apply_token_budget_preserves_when_fits() {
        let mut review = make_review(3, 3);
        let used = apply_token_budget(&mut review, 5000, false);

        assert_eq!(
            review.affected_callers.len(),
            3,
            "All callers should be preserved within budget"
        );
        assert_eq!(
            review.affected_tests.len(),
            3,
            "All tests should be preserved within budget"
        );
        assert!(review.warnings.is_empty(), "No truncation warning expected");
        assert!(used > 0, "Token count should be positive");
    }

    #[test]
    fn test_apply_token_budget_truncates_when_over() {
        let mut review = make_review(100, 100);
        // Tiny budget: base overhead (30) + 1 function (12) = 42 tokens, leaving very little
        let budget = 100;
        let used = apply_token_budget(&mut review, budget, false);

        assert!(
            review.affected_callers.len() < 100,
            "Callers should be truncated, got {}",
            review.affected_callers.len()
        );
        assert!(
            review.affected_tests.len() < 100,
            "Tests should be truncated, got {}",
            review.affected_tests.len()
        );
        // At least 1 caller and 1 test guaranteed by the max(1) logic
        assert!(
            review.affected_callers.len() >= 1,
            "At least 1 caller guaranteed"
        );
        assert!(
            review.affected_tests.len() >= 1,
            "At least 1 test guaranteed"
        );
        assert!(
            !review.warnings.is_empty(),
            "Should have a truncation warning"
        );
        assert!(
            used <= budget + 50,
            "Used tokens ({used}) should be near budget ({budget})"
        );
    }
}

fn display_review_text(
    review: &ReviewResult,
    _root: &std::path::Path,
    token_count_used: Option<usize>,
    max_tokens: Option<usize>,
) {
    use colored::Colorize;

    // Risk summary header
    let risk_color = match review.risk_summary.overall {
        RiskLevel::High => "red",
        RiskLevel::Medium => "yellow",
        RiskLevel::Low => "green",
    };
    let overall_str = format!("{}", review.risk_summary.overall);
    let colored_risk = match risk_color {
        "red" => overall_str.red().bold().to_string(),
        "yellow" => overall_str.yellow().bold().to_string(),
        _ => overall_str.green().bold().to_string(),
    };
    let token_info = match (token_count_used, max_tokens) {
        (Some(used), Some(budget)) => format!(" [{}/{}T]", used, budget),
        _ => String::new(),
    };
    println!(
        "{} {} (high: {}, medium: {}, low: {}){}",
        "Risk:".bold(),
        colored_risk,
        review.risk_summary.high,
        review.risk_summary.medium,
        review.risk_summary.low,
        token_info,
    );

    // Stale warning
    if let Some(ref stale) = review.stale_warning {
        eprintln!();
        eprintln!(
            "{} Index is stale for {} file(s):",
            "Warning:".yellow().bold(),
            stale.len()
        );
        for f in stale {
            eprintln!("  {}", f);
        }
    }

    // Changed functions with risk
    println!();
    println!(
        "{} ({}):",
        "Changed functions".bold(),
        review.changed_functions.len()
    );
    for f in &review.changed_functions {
        let risk_indicator = match f.risk.risk_level {
            RiskLevel::High => format!("[{}]", "HIGH".red()),
            RiskLevel::Medium => format!("[{}]", "MED".yellow()),
            RiskLevel::Low => format!("[{}]", "LOW".green()),
        };
        let blast_info = if f.risk.blast_radius != f.risk.risk_level {
            format!(", blast radius: {}", f.risk.blast_radius)
        } else {
            String::new()
        };
        println!(
            "  {} {} ({}:{}) — {} callers, {} tests{}",
            risk_indicator,
            f.name,
            f.file.display(),
            f.line_start,
            f.risk.caller_count,
            f.risk.test_count,
            blast_info,
        );
    }

    // Callers
    if review.affected_callers.is_empty() {
        println!();
        println!("{}", "No affected callers.".dimmed());
    } else {
        println!();
        println!(
            "{} ({}):",
            "Affected callers".cyan(),
            review.affected_callers.len()
        );
        for c in &review.affected_callers {
            println!(
                "  {} ({}:{}, call at line {})",
                c.name,
                c.file.display(),
                c.line,
                c.call_line
            );
        }
    }

    // Tests
    if review.affected_tests.is_empty() {
        println!();
        println!("{}", "No affected tests.".dimmed());
    } else {
        println!();
        println!(
            "{} ({}):",
            "Tests to re-run".yellow(),
            review.affected_tests.len()
        );
        for t in &review.affected_tests {
            println!(
                "  {} ({}:{}) [via {}, depth {}]",
                t.name,
                t.file.display(),
                t.line,
                t.via,
                t.call_depth
            );
        }
    }

    // Warnings
    if !review.warnings.is_empty() {
        println!();
        for w in &review.warnings {
            println!("{} {}", "Warning:".yellow().bold(), w);
        }
    }

    // Notes
    if !review.relevant_notes.is_empty() {
        println!();
        println!(
            "{} ({}):",
            "Relevant notes".magenta(),
            review.relevant_notes.len()
        );
        for n in &review.relevant_notes {
            let sentiment_str = match n.sentiment {
                s if s <= -0.5 => "".to_string(),
                s if s >= 0.5 => "".to_string(),
                _ => "·".to_string(),
            };
            println!(
                "  {} {} ({})",
                sentiment_str,
                n.text,
                n.matching_files.join(", ")
            );
        }
    }
}