Skip to main content

git_perf/
audit.rs

1use crate::{
2    config,
3    data::{Commit, MeasurementData},
4    defaults,
5    measurement_retrieval::{self, summarize_measurements},
6    stats::{self, DispersionMethod, ReductionFunc, StatsWithUnit, VecAggregation},
7};
8use anyhow::{anyhow, bail, Result};
9use itertools::Itertools;
10use log::info;
11use sparklines::spark;
12use std::cmp::Ordering;
13use std::collections::HashSet;
14use std::iter;
15
16/// Formats a z-score for display in audit output.
17/// Only finite z-scores are displayed with numeric values.
18/// Infinite and NaN values return an empty string.
19fn format_z_score_display(z_score: f64) -> String {
20    if z_score.is_finite() {
21        format!(" {:.2}", z_score)
22    } else {
23        String::new()
24    }
25}
26
27/// Determines the direction arrow based on comparison of head and tail means.
28/// Returns ↑ for greater, ↓ for less, → for equal.
29/// Returns → for NaN values to avoid panicking.
30fn get_direction_arrow(head_mean: f64, tail_mean: f64) -> &'static str {
31    match head_mean.partial_cmp(&tail_mean) {
32        Some(Ordering::Greater) => "↑",
33        Some(Ordering::Less) => "↓",
34        Some(Ordering::Equal) | None => "→",
35    }
36}
37
38#[derive(Debug, PartialEq)]
39struct AuditResult {
40    message: String,
41    passed: bool,
42}
43
44/// Resolved audit parameters for a specific measurement.
45#[derive(Debug, PartialEq)]
46pub(crate) struct ResolvedAuditParams {
47    pub min_count: u16,
48    pub summarize_by: ReductionFunc,
49    pub sigma: f64,
50    pub dispersion_method: DispersionMethod,
51}
52
53/// Resolves audit parameters for a specific measurement with proper precedence:
54/// CLI option -> measurement-specific config -> global config -> built-in default
55///
56/// Note: When CLI provides min_count, the caller (audit_multiple) uses the same
57/// value for all measurements. When CLI is None, this function reads per-measurement config.
58pub(crate) fn resolve_audit_params(
59    measurement: &str,
60    cli_min_count: Option<u16>,
61    cli_summarize_by: Option<ReductionFunc>,
62    cli_sigma: Option<f64>,
63    cli_dispersion_method: Option<DispersionMethod>,
64) -> ResolvedAuditParams {
65    let min_count = cli_min_count
66        .or_else(|| config::audit_min_measurements(measurement))
67        .unwrap_or(defaults::DEFAULT_MIN_MEASUREMENTS);
68
69    let summarize_by = cli_summarize_by
70        .or_else(|| config::audit_aggregate_by(measurement).map(ReductionFunc::from))
71        .unwrap_or(ReductionFunc::Min);
72
73    let sigma = cli_sigma
74        .or_else(|| config::audit_sigma(measurement))
75        .unwrap_or(defaults::DEFAULT_SIGMA);
76
77    let dispersion_method = cli_dispersion_method
78        .or_else(|| {
79            Some(DispersionMethod::from(config::audit_dispersion_method(
80                measurement,
81            )))
82        })
83        .unwrap_or(DispersionMethod::StandardDeviation);
84
85    ResolvedAuditParams {
86        min_count,
87        summarize_by,
88        sigma,
89        dispersion_method,
90    }
91}
92
93/// Discovers all unique measurement names from commits that match the filters and selectors.
94/// This is used to efficiently find which measurements to audit when filters are provided.
95fn discover_matching_measurements(
96    commits: &[Result<Commit>],
97    filters: &[regex::Regex],
98    selectors: &[(String, String)],
99) -> Vec<String> {
100    let mut unique_measurements = HashSet::new();
101
102    for commit in commits.iter().flatten() {
103        for measurement in &commit.measurements {
104            // Check if measurement name matches any filter
105            if !crate::filter::matches_any_filter(&measurement.name, filters) {
106                continue;
107            }
108
109            // Check if measurement matches selectors
110            if !measurement.key_values_is_superset_of(selectors) {
111                continue;
112            }
113
114            // This measurement matches - add to set
115            unique_measurements.insert(measurement.name.clone());
116        }
117    }
118
119    // Convert to sorted vector for deterministic ordering
120    let mut result: Vec<String> = unique_measurements.into_iter().collect();
121    result.sort();
122    result
123}
124
125/// Compute group value combinations for splitting measurements by metadata keys.
126///
127/// Returns a vector of group values where each inner vector contains the values
128/// for the split keys. If no splits are specified, returns a single empty group.
129///
130/// # Errors
131/// Returns error if separate_by is non-empty but no measurements have all required keys
132fn compute_group_values(
133    commits: &[Result<Commit>],
134    measurement_name: &str,
135    selectors: &[(String, String)],
136    separate_by: &[String],
137) -> Result<Vec<Vec<String>>> {
138    if separate_by.is_empty() {
139        return Ok(vec![vec![]]);
140    }
141
142    let mut unique_groups = HashSet::new();
143
144    for commit in commits.iter().flatten() {
145        for measurement in &commit.measurements {
146            // Only consider measurements that match the name
147            if measurement.name != measurement_name {
148                continue;
149            }
150
151            // Check if measurement matches selectors
152            if !measurement.key_values_is_superset_of(selectors) {
153                continue;
154            }
155
156            // Extract values for separate_by keys
157            let values: Vec<String> = separate_by
158                .iter()
159                .filter_map(|key| measurement.key_values.get(key).cloned())
160                .collect();
161
162            // Only include if all keys are present
163            if values.len() == separate_by.len() {
164                unique_groups.insert(values);
165            }
166        }
167    }
168
169    if unique_groups.is_empty() {
170        bail!(
171            "Measurement '{}': Invalid separator supplied, no measurements have all required keys: {:?}",
172            measurement_name,
173            separate_by
174        );
175    }
176
177    // Convert to sorted vector for deterministic ordering
178    let mut result: Vec<Vec<String>> = unique_groups.into_iter().collect();
179    result.sort();
180    Ok(result)
181}
182
183/// Formats a group label from separate_by keys and values.
184/// Example: ["os", "arch"] with ["ubuntu", "x64"] -> "os=ubuntu/arch=x64"
185fn format_group_label(separate_by: &[String], group_values: &[String]) -> String {
186    separate_by
187        .iter()
188        .zip(group_values.iter())
189        .map(|(key, value)| format!("{}={}", key, value))
190        .collect::<Vec<_>>()
191        .join("/")
192}
193
194#[allow(clippy::too_many_arguments)]
195pub fn audit_multiple(
196    start_commit: &str,
197    max_count: usize,
198    min_count: Option<u16>,
199    selectors: &[(String, String)],
200    summarize_by: Option<ReductionFunc>,
201    sigma: Option<f64>,
202    dispersion_method: Option<DispersionMethod>,
203    combined_patterns: &[String],
204    separate_by: &[String],
205    _no_change_point_warning: bool, // TODO: Implement change point warning in Phase 2
206) -> Result<()> {
207    // Early return if patterns are empty - nothing to audit
208    if combined_patterns.is_empty() {
209        return Ok(());
210    }
211
212    // Validate that separate_by keys don't overlap with selectors (would produce contradictory filters)
213    let selector_keys: std::collections::HashSet<&str> =
214        selectors.iter().map(|(k, _)| k.as_str()).collect();
215    for key in separate_by {
216        if selector_keys.contains(key.as_str()) {
217            bail!(
218                "separate-by key '{}' already present in selectors; remove it from --selectors or --separate-by",
219                key
220            );
221        }
222    }
223
224    // Compile combined regex patterns (measurements as exact matches + filter patterns)
225    // early to fail fast on invalid patterns
226    let filters = crate::filter::compile_filters(combined_patterns)?;
227
228    // Phase 1: Walk commits ONCE (optimization: scan commits only once)
229    // Collect into Vec so we can reuse the data for multiple measurements
230    let all_commits: Vec<Result<Commit>> =
231        measurement_retrieval::walk_commits_from(start_commit, max_count)?.collect();
232
233    // Phase 2: Discover all measurements that match the combined patterns from the commit data
234    // The combined_patterns already include both measurements (as exact regex) and filters (OR behavior)
235    let measurements_to_audit = discover_matching_measurements(&all_commits, &filters, selectors);
236
237    // If no measurements were discovered, provide appropriate error message
238    if measurements_to_audit.is_empty() {
239        // Check if we have any commits at all
240        if all_commits.is_empty() {
241            bail!("No commit at HEAD");
242        }
243        // Check if any commits have any measurements at all
244        let has_any_measurements = all_commits.iter().any(|commit_result| {
245            if let Ok(commit) = commit_result {
246                !commit.measurements.is_empty()
247            } else {
248                false
249            }
250        });
251
252        if !has_any_measurements {
253            // No measurements exist in any commits - specific error for this case
254            bail!("No measurement for HEAD");
255        }
256        // Measurements exist but don't match the patterns
257        bail!("No measurements found matching the provided patterns");
258    }
259
260    let mut failed = false;
261    let mut total_groups = 0;
262    let mut passed_groups = 0;
263
264    // Phase 3: For each measurement, audit using the pre-loaded commit data
265    for measurement in measurements_to_audit {
266        let params = resolve_audit_params(
267            &measurement,
268            min_count,
269            summarize_by,
270            sigma,
271            dispersion_method,
272        );
273
274        // Warn if max_count limits historical data below min_measurements requirement
275        if (max_count as u16) < params.min_count {
276            eprintln!(
277                "⚠️  Warning: --max_count ({}) is less than min_measurements ({}) for measurement '{}'.",
278                max_count, params.min_count, measurement
279            );
280            eprintln!(
281                "   This limits available historical data and may prevent achieving statistical significance."
282            );
283        }
284
285        // Compute groups for this measurement
286        let groups = compute_group_values(&all_commits, &measurement, selectors, separate_by)?;
287
288        // Audit each group independently
289        for group_values in &groups {
290            // Build combined selectors (original selectors + group selectors)
291            let mut group_selectors = selectors.to_vec();
292            for (key, value) in separate_by.iter().zip(group_values.iter()) {
293                group_selectors.push((key.clone(), value.clone()));
294            }
295
296            // Format group label for display
297            let group_label = if separate_by.is_empty() {
298                String::new()
299            } else {
300                format!(" ({})", format_group_label(separate_by, group_values))
301            };
302
303            let result = audit_with_commits(
304                &measurement,
305                &all_commits,
306                params.min_count,
307                &group_selectors,
308                params.summarize_by,
309                params.sigma,
310                params.dispersion_method,
311            )?;
312
313            // TODO(Phase 2): Add change point detection warning here
314            // If !_no_change_point_warning, detect change points in current epoch
315            // and warn if any exist, as they make z-score comparisons unreliable:
316            //   ⚠️  WARNING: Change point detected in current epoch at commit a1b2c3d (+23.5%)
317            //       Historical z-score comparison may be unreliable due to regime shift.
318            //       Consider bumping epoch or investigating the change.
319            // See docs/plans/change-point-detection.md for implementation details.
320
321            // Print the result with group label
322            if !separate_by.is_empty() {
323                // Print header for the group
324                println!("Auditing measurement \"{}\"{}:", measurement, group_label);
325                // Indent the result message
326                for line in result.message.lines() {
327                    println!("  {}", line);
328                }
329                println!(); // Add blank line between groups
330            } else {
331                println!("{}", result.message);
332            }
333
334            if !separate_by.is_empty() {
335                total_groups += 1;
336                if result.passed {
337                    passed_groups += 1;
338                }
339            }
340            if !result.passed {
341                failed = true;
342            }
343        }
344    }
345
346    // Print summary if grouping is active
347    if !separate_by.is_empty() {
348        if failed {
349            println!(
350                "Overall: FAILED ({}/{} groups passed)",
351                passed_groups, total_groups
352            );
353        } else {
354            println!(
355                "Overall: PASSED ({}/{} groups passed)",
356                passed_groups, total_groups
357            );
358        }
359    }
360
361    if failed {
362        bail!("One or more measurements failed audit.");
363    }
364
365    Ok(())
366}
367
368/// Audits a measurement using pre-loaded commit data.
369/// This is more efficient than the old `audit` function when auditing multiple measurements,
370/// as it reuses the same commit data instead of walking commits multiple times.
371fn audit_with_commits(
372    measurement: &str,
373    commits: &[Result<Commit>],
374    min_count: u16,
375    selectors: &[(String, String)],
376    summarize_by: ReductionFunc,
377    sigma: f64,
378    dispersion_method: DispersionMethod,
379) -> Result<AuditResult> {
380    // Convert Vec<Result<Commit>> into an iterator of Result<Commit> by cloning references
381    // This is necessary because summarize_measurements expects an iterator of Result<Commit>
382    let commits_iter = commits.iter().map(|r| match r {
383        Ok(commit) => Ok(Commit {
384            commit: commit.commit.clone(),
385            title: commit.title.clone(),
386            author: commit.author.clone(),
387            measurements: commit.measurements.clone(),
388        }),
389        Err(e) => Err(anyhow::anyhow!("{}", e)),
390    });
391
392    // Filter to only this specific measurement with matching selectors
393    let filter_by =
394        |m: &MeasurementData| m.name == measurement && m.key_values_is_superset_of(selectors);
395
396    let mut aggregates = measurement_retrieval::take_while_same_epoch(summarize_measurements(
397        commits_iter,
398        &summarize_by,
399        &filter_by,
400    ));
401
402    let head = aggregates
403        .next()
404        .ok_or(anyhow!("No commit at HEAD"))
405        .and_then(|s| {
406            s.and_then(|cs| {
407                cs.measurement
408                    .map(|m| m.val)
409                    .ok_or(anyhow!("No measurement for HEAD."))
410            })
411        })?;
412
413    let tail: Vec<_> = aggregates
414        .filter_map_ok(|cs| cs.measurement.map(|m| m.val))
415        .try_collect()?;
416
417    audit_with_data(
418        measurement,
419        head,
420        tail,
421        min_count,
422        sigma,
423        dispersion_method,
424        summarize_by,
425    )
426}
427
428/// Core audit logic that can be tested with mock data
429/// This function contains all the mutation-tested logic paths
430fn audit_with_data(
431    measurement: &str,
432    head: f64,
433    tail: Vec<f64>,
434    min_count: u16,
435    sigma: f64,
436    dispersion_method: DispersionMethod,
437    summarize_by: ReductionFunc,
438) -> Result<AuditResult> {
439    // Note: CLI enforces min_count >= 2 via clap::value_parser!(u16).range(2..)
440    // Tests may use lower values for edge case testing, but production code
441    // should never call this with min_count < 2
442    assert!(min_count >= 2, "min_count must be at least 2");
443
444    // Get unit for this measurement from config
445    let unit = config::measurement_unit(measurement);
446    let unit_str = unit.as_deref();
447
448    let head_summary = stats::aggregate_measurements(iter::once(&head));
449    let tail_summary = stats::aggregate_measurements(tail.iter());
450
451    // Generate sparkline and calculate range for all measurements - used in both skip and normal paths
452    let all_measurements = tail.into_iter().chain(iter::once(head)).collect::<Vec<_>>();
453
454    let mut tail_measurements = all_measurements.clone();
455    tail_measurements.pop(); // Remove head to get just tail for median calculation
456    let tail_median = tail_measurements.median().unwrap_or_default();
457
458    // Calculate min and max once for use in both branches
459    let min_val = all_measurements
460        .iter()
461        .min_by(|a, b| a.partial_cmp(b).unwrap())
462        .unwrap();
463    let max_val = all_measurements
464        .iter()
465        .max_by(|a, b| a.partial_cmp(b).unwrap())
466        .unwrap();
467
468    // Tiered approach for sparkline display:
469    // 1. If tail median is non-zero: use median as baseline, show percentages (default behavior)
470    // 2. If tail median is zero: show absolute differences instead
471    let tail_median_is_zero = tail_median.abs() < f64::EPSILON;
472
473    let sparkline = if tail_median_is_zero {
474        // Median is zero - show absolute range
475        format!(
476            " [{} – {}] {}",
477            min_val,
478            max_val,
479            spark(all_measurements.as_slice())
480        )
481    } else {
482        // MUTATION POINT: / vs % (Line 140)
483        // Median is non-zero - use it as baseline for percentage ranges
484        let relative_min = min_val / tail_median - 1.0;
485        let relative_max = max_val / tail_median - 1.0;
486
487        format!(
488            " [{:+.2}% – {:+.2}%] {}",
489            (relative_min * 100.0),
490            (relative_max * 100.0),
491            spark(all_measurements.as_slice())
492        )
493    };
494
495    // Helper function to build the measurement summary text
496    // This is used for both skipped and normal audit results to avoid duplication
497    let build_summary = || -> String {
498        let mut summary = String::new();
499
500        // Use the length of all_measurements vector for total count
501        let total_measurements = all_measurements.len();
502
503        // If only 1 total measurement (head only, no tail), show only head summary
504        if total_measurements == 1 {
505            let head_display = StatsWithUnit {
506                stats: &head_summary,
507                unit: unit_str,
508            };
509            summary.push_str(&format!("Head: {}\n", head_display));
510        } else if total_measurements >= 2 {
511            // 2+ measurements: show aggregation method, z-score, head, tail, and sparkline
512            let direction = get_direction_arrow(head_summary.mean, tail_summary.mean);
513            let z_score = head_summary.z_score_with_method(&tail_summary, dispersion_method);
514            let z_score_display = format_z_score_display(z_score);
515            let method_name = match dispersion_method {
516                DispersionMethod::StandardDeviation => "stddev",
517                DispersionMethod::MedianAbsoluteDeviation => "mad",
518            };
519
520            let head_display = StatsWithUnit {
521                stats: &head_summary,
522                unit: unit_str,
523            };
524            let tail_display = StatsWithUnit {
525                stats: &tail_summary,
526                unit: unit_str,
527            };
528
529            summary.push_str(&format!("Aggregation: {summarize_by}\n"));
530            summary.push_str(&format!(
531                "z-score ({method_name}): {direction}{}\n",
532                z_score_display
533            ));
534            summary.push_str(&format!("Head: {}\n", head_display));
535            summary.push_str(&format!("Tail: {}\n", tail_display));
536            summary.push_str(&sparkline);
537        }
538        // If 0 total measurements, return empty summary
539
540        summary
541    };
542
543    // MUTATION POINT: < vs == (Line 120)
544    if tail_summary.len < min_count.into() {
545        let number_measurements = tail_summary.len;
546        // MUTATION POINT: > vs < (Line 122)
547        let plural_s = if number_measurements == 1 { "" } else { "s" };
548        info!("Only {number_measurements} historical measurement{plural_s} found. Less than requested min_measurements of {min_count}. Skipping test.");
549
550        let mut skip_message = format!(
551            "⏭️ '{measurement}'\nOnly {number_measurements} historical measurement{plural_s} found. Less than requested min_measurements of {min_count}. Skipping test."
552        );
553
554        // Add summary using the same logic as passing/failing cases
555        let summary = build_summary();
556        if !summary.is_empty() {
557            skip_message.push('\n');
558            skip_message.push_str(&summary);
559        }
560
561        return Ok(AuditResult {
562            message: skip_message,
563            passed: true,
564        });
565    }
566
567    // MUTATION POINT: / vs % (Line 150)
568    // Calculate relative deviation - naturally handles infinity when tail_median is zero
569    let head_relative_deviation = (head / tail_median - 1.0).abs() * 100.0;
570
571    // Calculate absolute deviation
572    let head_absolute_deviation = (head - tail_median).abs();
573
574    // Check if we have a minimum relative deviation threshold configured
575    let min_relative_deviation = config::audit_min_relative_deviation(measurement);
576    let min_absolute_deviation = config::audit_min_absolute_deviation(measurement);
577
578    // MUTATION POINT: < vs == (Line 156)
579    let passed_due_to_relative_threshold = min_relative_deviation
580        .map(|threshold| head_relative_deviation < threshold)
581        .unwrap_or(false);
582
583    let passed_due_to_absolute_threshold = min_absolute_deviation
584        .map(|threshold| head_absolute_deviation < threshold)
585        .unwrap_or(false);
586
587    let passed_due_to_threshold =
588        passed_due_to_relative_threshold || passed_due_to_absolute_threshold;
589
590    let text_summary = build_summary();
591
592    // MUTATION POINT: > vs >= (Line 178)
593    let z_score_exceeds_sigma =
594        head_summary.is_significant(&tail_summary, sigma, dispersion_method);
595
596    // MUTATION POINT: ! removal (Line 181)
597    let passed = !z_score_exceeds_sigma || passed_due_to_threshold;
598
599    // Add threshold information to output if applicable
600    // Only show note when the audit would have failed without the threshold
601    let threshold_note = if z_score_exceeds_sigma {
602        let mut notes = Vec::new();
603        if passed_due_to_relative_threshold {
604            notes.push(format!(
605                "Note: Passed due to relative deviation ({:.1}%) being below threshold ({:.1}%)",
606                head_relative_deviation,
607                min_relative_deviation.unwrap()
608            ));
609        }
610        if passed_due_to_absolute_threshold {
611            notes.push(format!(
612                "Note: Passed due to absolute deviation ({:.1}) being below threshold ({:.1})",
613                head_absolute_deviation,
614                min_absolute_deviation.unwrap()
615            ));
616        }
617        if notes.is_empty() {
618            String::new()
619        } else {
620            format!("\n{}", notes.join("\n"))
621        }
622    } else {
623        String::new()
624    };
625
626    // MUTATION POINT: ! removal (Line 194)
627    if !passed {
628        return Ok(AuditResult {
629            message: format!(
630                "❌ '{measurement}'\nHEAD differs significantly from tail measurements.\n{text_summary}{threshold_note}"
631            ),
632            passed: false,
633        });
634    }
635
636    Ok(AuditResult {
637        message: format!("✅ '{measurement}'\n{text_summary}{threshold_note}"),
638        passed: true,
639    })
640}
641
642#[cfg(test)]
643mod test {
644    use crate::test_helpers::with_isolated_test_setup;
645
646    use super::*;
647
648    #[test]
649    fn test_format_z_score_display() {
650        // Test cases for z-score display formatting
651        let test_cases = vec![
652            (2.5_f64, " 2.50"),
653            (0.0_f64, " 0.00"),
654            (-1.5_f64, " -1.50"),
655            (999.999_f64, " 1000.00"),
656            (0.001_f64, " 0.00"),
657            (f64::INFINITY, ""),
658            (f64::NEG_INFINITY, ""),
659            (f64::NAN, ""),
660        ];
661
662        for (z_score, expected) in test_cases {
663            let result = format_z_score_display(z_score);
664            assert_eq!(result, expected, "Failed for z_score: {}", z_score);
665        }
666    }
667
668    #[test]
669    fn test_direction_arrows() {
670        // Test cases for direction arrow logic
671        let test_cases = vec![
672            (5.0_f64, 3.0_f64, "↑"), // head > tail
673            (1.0_f64, 3.0_f64, "↓"), // head < tail
674            (3.0_f64, 3.0_f64, "→"), // head == tail
675        ];
676
677        for (head_mean, tail_mean, expected) in test_cases {
678            let result = get_direction_arrow(head_mean, tail_mean);
679            assert_eq!(
680                result, expected,
681                "Failed for head_mean: {}, tail_mean: {}",
682                head_mean, tail_mean
683            );
684        }
685    }
686
687    #[test]
688    fn test_audit_with_different_dispersion_methods() {
689        // Test that audit produces different results with different dispersion methods
690
691        // Create mock data that would produce different z-scores with stddev vs MAD
692        let head_value = 35.0;
693        let tail_values = [30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 100.0];
694
695        let head_summary = stats::aggregate_measurements(std::iter::once(&head_value));
696        let tail_summary = stats::aggregate_measurements(tail_values.iter());
697
698        // Calculate z-scores with both methods
699        let z_score_stddev =
700            head_summary.z_score_with_method(&tail_summary, DispersionMethod::StandardDeviation);
701        let z_score_mad = head_summary
702            .z_score_with_method(&tail_summary, DispersionMethod::MedianAbsoluteDeviation);
703
704        // With the outlier (100.0), stddev should be much larger than MAD
705        // So z-score with stddev should be smaller than z-score with MAD
706        assert!(
707            z_score_stddev < z_score_mad,
708            "stddev z-score ({}) should be smaller than MAD z-score ({}) with outlier data",
709            z_score_stddev,
710            z_score_mad
711        );
712
713        // Both should be positive since head > tail mean
714        assert!(z_score_stddev > 0.0);
715        assert!(z_score_mad > 0.0);
716    }
717
718    #[test]
719    fn test_dispersion_method_conversion() {
720        // Test that the conversion from CLI types to stats types works correctly
721
722        // Test stddev conversion
723        let cli_stddev = git_perf_cli_types::DispersionMethod::StandardDeviation;
724        let stats_stddev: DispersionMethod = cli_stddev.into();
725        assert_eq!(stats_stddev, DispersionMethod::StandardDeviation);
726
727        // Test MAD conversion
728        let cli_mad = git_perf_cli_types::DispersionMethod::MedianAbsoluteDeviation;
729        let stats_mad: DispersionMethod = cli_mad.into();
730        assert_eq!(stats_mad, DispersionMethod::MedianAbsoluteDeviation);
731    }
732
733    #[test]
734    fn test_audit_multiple_with_no_measurements() {
735        // This test exercises the actual production audit_multiple function
736        // Tests the case where no patterns are provided (empty list)
737        // With no patterns, it should succeed (nothing to audit)
738        with_isolated_test_setup(|_git_dir, _home_path| {
739            let result = audit_multiple(
740                "HEAD",
741                100,
742                Some(1),
743                &[],
744                Some(ReductionFunc::Mean),
745                Some(2.0),
746                Some(DispersionMethod::StandardDeviation),
747                &[], // Empty combined_patterns
748                &[], // Empty separate_by
749                false,
750            );
751
752            // Should succeed when no measurements need to be audited
753            assert!(
754                result.is_ok(),
755                "audit_multiple should succeed with empty pattern list"
756            );
757        });
758    }
759
760    // MUTATION TESTING COVERAGE TESTS - Exercise actual production code paths
761
762    #[test]
763    fn test_min_count_boundary_condition() {
764        // COVERS MUTATION: tail_summary.len < min_count.into() vs ==
765        // Test with exactly min_count measurements (should NOT skip)
766        let result = audit_with_data(
767            "test_measurement",
768            15.0,
769            vec![10.0, 11.0, 12.0], // Exactly 3 measurements
770            3,                      // min_count = 3
771            2.0,
772            DispersionMethod::StandardDeviation,
773            ReductionFunc::Min,
774        );
775
776        assert!(result.is_ok());
777        let audit_result = result.unwrap();
778        // Should NOT be skipped (would be skipped if < was changed to ==)
779        assert!(!audit_result.message.contains("Skipping test"));
780
781        // Test with fewer than min_count (should skip)
782        let result = audit_with_data(
783            "test_measurement",
784            15.0,
785            vec![10.0, 11.0], // Only 2 measurements
786            3,                // min_count = 3
787            2.0,
788            DispersionMethod::StandardDeviation,
789            ReductionFunc::Min,
790        );
791
792        assert!(result.is_ok());
793        let audit_result = result.unwrap();
794        assert!(audit_result.message.contains("Skipping test"));
795        assert!(audit_result.passed); // Skipped tests are marked as passed
796    }
797
798    #[test]
799    fn test_pluralization_logic() {
800        // COVERS MUTATION: number_measurements > 1 vs ==
801        // Test with 0 measurements (should have 's' - grammatically correct)
802        let result = audit_with_data(
803            "test_measurement",
804            15.0,
805            vec![], // 0 measurements
806            5,      // min_count > 0 to trigger skip
807            2.0,
808            DispersionMethod::StandardDeviation,
809            ReductionFunc::Min,
810        );
811
812        assert!(result.is_ok());
813        let message = result.unwrap().message;
814        assert!(message.contains("0 historical measurements found")); // Has 's'
815        assert!(!message.contains("0 historical measurement found")); // Should not be singular
816
817        // Test with 1 measurement (no 's')
818        let result = audit_with_data(
819            "test_measurement",
820            15.0,
821            vec![10.0], // 1 measurement
822            5,          // min_count > 1 to trigger skip
823            2.0,
824            DispersionMethod::StandardDeviation,
825            ReductionFunc::Min,
826        );
827
828        assert!(result.is_ok());
829        let message = result.unwrap().message;
830        assert!(message.contains("1 historical measurement found")); // No 's'
831
832        // Test with 2+ measurements (should have 's')
833        let result = audit_with_data(
834            "test_measurement",
835            15.0,
836            vec![10.0, 11.0], // 2 measurements
837            5,                // min_count > 2 to trigger skip
838            2.0,
839            DispersionMethod::StandardDeviation,
840            ReductionFunc::Min,
841        );
842
843        assert!(result.is_ok());
844        let message = result.unwrap().message;
845        assert!(message.contains("2 historical measurements found")); // Has 's'
846    }
847
848    #[test]
849    fn test_skip_with_summaries() {
850        // Test that when audit is skipped, summaries are shown based on TOTAL measurement count
851        // Total measurements = 1 head + N tail
852        // and the format matches passing/failing cases
853
854        // Test with 0 tail measurements (1 total): should show Head only
855        let result = audit_with_data(
856            "test_measurement",
857            15.0,
858            vec![], // 0 tail measurements = 1 total measurement
859            5,      // min_count > 0 to trigger skip
860            2.0,
861            DispersionMethod::StandardDeviation,
862            ReductionFunc::Min,
863        );
864
865        assert!(result.is_ok());
866        let message = result.unwrap().message;
867        assert!(message.contains("Skipping test"));
868        assert!(message.contains("Head:")); // Head summary shown
869        assert!(!message.contains("z-score")); // No z-score (only 1 total measurement)
870        assert!(!message.contains("Tail:")); // No tail
871        assert!(!message.contains("[")); // No sparkline
872
873        // Test with 1 tail measurement (2 total): should show everything
874        let result = audit_with_data(
875            "test_measurement",
876            15.0,
877            vec![10.0], // 1 tail measurement = 2 total measurements
878            5,          // min_count > 1 to trigger skip
879            2.0,
880            DispersionMethod::StandardDeviation,
881            ReductionFunc::Min,
882        );
883
884        assert!(result.is_ok());
885        let message = result.unwrap().message;
886        assert!(message.contains("Skipping test"));
887        assert!(message.contains("z-score (stddev):")); // Z-score with method shown
888        assert!(message.contains("Head:")); // Head summary shown
889        assert!(message.contains("Tail:")); // Tail summary shown
890        assert!(message.contains("[")); // Sparkline shown
891                                        // Verify order: z-score, Head, Tail, sparkline
892        let z_pos = message.find("z-score").unwrap();
893        let head_pos = message.find("Head:").unwrap();
894        let tail_pos = message.find("Tail:").unwrap();
895        let spark_pos = message.find("[").unwrap();
896        assert!(z_pos < head_pos, "z-score should come before Head");
897        assert!(head_pos < tail_pos, "Head should come before Tail");
898        assert!(tail_pos < spark_pos, "Tail should come before sparkline");
899
900        // Test with 2 tail measurements (3 total): should show everything
901        let result = audit_with_data(
902            "test_measurement",
903            15.0,
904            vec![10.0, 11.0], // 2 tail measurements = 3 total measurements
905            5,                // min_count > 2 to trigger skip
906            2.0,
907            DispersionMethod::StandardDeviation,
908            ReductionFunc::Min,
909        );
910
911        assert!(result.is_ok());
912        let message = result.unwrap().message;
913        assert!(message.contains("Skipping test"));
914        assert!(message.contains("z-score (stddev):")); // Z-score with method shown
915        assert!(message.contains("Head:")); // Head summary shown
916        assert!(message.contains("Tail:")); // Tail summary shown
917        assert!(message.contains("[")); // Sparkline shown
918                                        // Verify order: z-score, Head, Tail, sparkline
919        let z_pos = message.find("z-score").unwrap();
920        let head_pos = message.find("Head:").unwrap();
921        let tail_pos = message.find("Tail:").unwrap();
922        let spark_pos = message.find("[").unwrap();
923        assert!(z_pos < head_pos, "z-score should come before Head");
924        assert!(head_pos < tail_pos, "Head should come before Tail");
925        assert!(tail_pos < spark_pos, "Tail should come before sparkline");
926
927        // Test with MAD dispersion method to ensure method name is correct
928        let result = audit_with_data(
929            "test_measurement",
930            15.0,
931            vec![10.0, 11.0], // 2 tail measurements = 3 total measurements
932            5,                // min_count > 2 to trigger skip
933            2.0,
934            DispersionMethod::MedianAbsoluteDeviation,
935            ReductionFunc::Min,
936        );
937
938        assert!(result.is_ok());
939        let message = result.unwrap().message;
940        assert!(message.contains("z-score (mad):")); // MAD method shown
941    }
942
943    #[test]
944    fn test_relative_calculations_division_vs_modulo() {
945        // COVERS MUTATIONS: / vs % in relative_min, relative_max, head_relative_deviation
946        // Use values where division and modulo produce very different results
947        let result = audit_with_data(
948            "test_measurement",
949            25.0,                   // head
950            vec![10.0, 10.0, 10.0], // tail, median = 10.0
951            2,
952            10.0, // High sigma to avoid z-score failures
953            DispersionMethod::StandardDeviation,
954            ReductionFunc::Min,
955        );
956
957        assert!(result.is_ok());
958        let audit_result = result.unwrap();
959
960        // With division:
961        // - relative_min = (10.0 / 10.0 - 1.0) * 100 = 0.0%
962        // - relative_max = (25.0 / 10.0 - 1.0) * 100 = 150.0%
963        // With modulo:
964        // - relative_min = (10.0 % 10.0 - 1.0) * 100 = -100.0% (since 10.0 % 10.0 = 0.0)
965        // - relative_max = (25.0 % 10.0 - 1.0) * 100 = -50.0% (since 25.0 % 10.0 = 5.0)
966
967        // Check that the calculation uses division, not modulo
968        // The range should show [+0.00% – +150.00%], not [-100.00% – -50.00%]
969        assert!(audit_result.message.contains("[+0.00% – +150.00%]"));
970
971        // Ensure the modulo results are NOT present
972        assert!(!audit_result.message.contains("[-100.00% – -50.00%]"));
973        assert!(!audit_result.message.contains("-100.00%"));
974        assert!(!audit_result.message.contains("-50.00%"));
975    }
976
977    #[test]
978    fn test_core_pass_fail_logic() {
979        // COVERS MUTATION: !z_score_exceeds_sigma || passed_due_to_threshold
980        // vs z_score_exceeds_sigma || passed_due_to_threshold
981
982        // Case 1: z_score exceeds sigma, no threshold bypass (should fail)
983        let result = audit_with_data(
984            "test_measurement",                 // No config threshold for this name
985            100.0,                              // Very high head value
986            vec![10.0, 10.0, 10.0, 10.0, 10.0], // Low tail values
987            2,
988            0.5, // Low sigma threshold
989            DispersionMethod::StandardDeviation,
990            ReductionFunc::Min,
991        );
992
993        assert!(result.is_ok());
994        let audit_result = result.unwrap();
995        assert!(!audit_result.passed); // Should fail
996        assert!(audit_result.message.contains("❌"));
997
998        // Case 2: z_score within sigma (should pass)
999        let result = audit_with_data(
1000            "test_measurement",
1001            10.2,                               // Close to tail values
1002            vec![10.0, 10.1, 10.0, 10.1, 10.0], // Some variance to avoid zero stddev
1003            2,
1004            100.0, // Very high sigma threshold
1005            DispersionMethod::StandardDeviation,
1006            ReductionFunc::Min,
1007        );
1008
1009        assert!(result.is_ok());
1010        let audit_result = result.unwrap();
1011        assert!(audit_result.passed); // Should pass
1012        assert!(audit_result.message.contains("✅"));
1013    }
1014
1015    #[test]
1016    fn test_final_result_logic() {
1017        // COVERS MUTATION: if !passed vs if passed
1018        // This tests the final branch that determines success vs failure message
1019
1020        // Test failing case (should get failure message)
1021        let result = audit_with_data(
1022            "test_measurement",
1023            1000.0, // Extreme outlier
1024            vec![10.0, 10.0, 10.0, 10.0, 10.0],
1025            2,
1026            0.1, // Very strict sigma
1027            DispersionMethod::StandardDeviation,
1028            ReductionFunc::Min,
1029        );
1030
1031        assert!(result.is_ok());
1032        let audit_result = result.unwrap();
1033        assert!(!audit_result.passed);
1034        assert!(audit_result.message.contains("❌"));
1035        assert!(audit_result.message.contains("differs significantly"));
1036
1037        // Test passing case (should get success message)
1038        let result = audit_with_data(
1039            "test_measurement",
1040            10.01,                              // Very close to tail
1041            vec![10.0, 10.1, 10.0, 10.1, 10.0], // Varied values to avoid zero variance
1042            2,
1043            100.0, // Very lenient sigma
1044            DispersionMethod::StandardDeviation,
1045            ReductionFunc::Min,
1046        );
1047
1048        assert!(result.is_ok());
1049        let audit_result = result.unwrap();
1050        assert!(audit_result.passed);
1051        assert!(audit_result.message.contains("✅"));
1052        assert!(!audit_result.message.contains("differs significantly"));
1053    }
1054
1055    #[test]
1056    fn test_dispersion_methods_produce_different_results() {
1057        // Test that different dispersion methods work in the production code
1058        let head = 35.0;
1059        let tail = vec![30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 100.0];
1060
1061        let result_stddev = audit_with_data(
1062            "test_measurement",
1063            head,
1064            tail.clone(),
1065            2,
1066            2.0,
1067            DispersionMethod::StandardDeviation,
1068            ReductionFunc::Min,
1069        );
1070
1071        let result_mad = audit_with_data(
1072            "test_measurement",
1073            head,
1074            tail,
1075            2,
1076            2.0,
1077            DispersionMethod::MedianAbsoluteDeviation,
1078            ReductionFunc::Min,
1079        );
1080
1081        assert!(result_stddev.is_ok());
1082        assert!(result_mad.is_ok());
1083
1084        let stddev_result = result_stddev.unwrap();
1085        let mad_result = result_mad.unwrap();
1086
1087        // Both should contain method indicators
1088        assert!(stddev_result.message.contains("stddev"));
1089        assert!(mad_result.message.contains("mad"));
1090    }
1091
1092    #[test]
1093    fn test_head_and_tail_have_units_and_auto_scaling() {
1094        // Test that both head and tail measurements display units with auto-scaling
1095
1096        // First, set up a test environment with a configured unit
1097        use crate::test_helpers::setup_test_env_with_config;
1098
1099        let config_content = r#"
1100[measurement."build_time"]
1101unit = "ms"
1102"#;
1103        let (_temp_dir, _dir_guard) = setup_test_env_with_config(config_content);
1104
1105        // Test with large millisecond values that should auto-scale to seconds
1106        let head = 12_345.67; // Will auto-scale to ~12.35s
1107        let tail = vec![10_000.0, 10_500.0, 11_000.0, 11_500.0, 12_000.0]; // Will auto-scale to 10s, 10.5s, 11s, etc.
1108
1109        let result = audit_with_data(
1110            "build_time",
1111            head,
1112            tail,
1113            2,
1114            10.0, // High sigma to ensure it passes
1115            DispersionMethod::StandardDeviation,
1116            ReductionFunc::Min,
1117        );
1118
1119        assert!(result.is_ok());
1120        let audit_result = result.unwrap();
1121        let message = &audit_result.message;
1122
1123        // Verify Head section exists
1124        assert!(
1125            message.contains("Head:"),
1126            "Message should contain Head section"
1127        );
1128
1129        // With auto-scaling, 12345.67ms should become ~12.35s or 12.3s
1130        // Check that the value is auto-scaled (contains 's' for seconds)
1131        assert!(
1132            message.contains("12.3s") || message.contains("12.35s"),
1133            "Head mean should be auto-scaled to seconds, got: {}",
1134            message
1135        );
1136
1137        let head_section: Vec<&str> = message
1138            .lines()
1139            .filter(|line| line.contains("Head:"))
1140            .collect();
1141
1142        assert!(
1143            !head_section.is_empty(),
1144            "Should find Head section in message"
1145        );
1146
1147        let head_line = head_section[0];
1148
1149        // With auto-scaling, all values (mean, stddev, MAD) get their units auto-scaled
1150        // They should all have units now (not just mean)
1151        assert!(
1152            head_line.contains("μ:") && head_line.contains("σ:") && head_line.contains("MAD:"),
1153            "Head line should contain μ, σ, and MAD labels, got: {}",
1154            head_line
1155        );
1156
1157        // Verify Tail section has units
1158        assert!(
1159            message.contains("Tail:"),
1160            "Message should contain Tail section"
1161        );
1162
1163        let tail_section: Vec<&str> = message
1164            .lines()
1165            .filter(|line| line.contains("Tail:"))
1166            .collect();
1167
1168        assert!(
1169            !tail_section.is_empty(),
1170            "Should find Tail section in message"
1171        );
1172
1173        let tail_line = tail_section[0];
1174
1175        // Tail mean should be auto-scaled to seconds (10000-12000ms → 10-12s)
1176        assert!(
1177            tail_line.contains("11s")
1178                || tail_line.contains("11.")
1179                || tail_line.contains("10.")
1180                || tail_line.contains("12."),
1181            "Tail should contain auto-scaled second values, got: {}",
1182            tail_line
1183        );
1184
1185        // Verify the basic format structure is present
1186        assert!(
1187            tail_line.contains("μ:")
1188                && tail_line.contains("σ:")
1189                && tail_line.contains("MAD:")
1190                && tail_line.contains("n:"),
1191            "Tail line should contain all stat labels, got: {}",
1192            tail_line
1193        );
1194    }
1195
1196    #[test]
1197    fn test_threshold_note_only_shown_when_audit_would_fail() {
1198        // Test that the threshold note is only shown when the audit would have
1199        // failed without the threshold (i.e., when z_score_exceeds_sigma is true)
1200        use crate::test_helpers::setup_test_env_with_config;
1201
1202        let config_content = r#"
1203[measurement."build_time"]
1204min_relative_deviation = 10.0
1205"#;
1206        let (_temp_dir, _dir_guard) = setup_test_env_with_config(config_content);
1207
1208        // Case 1: Low z-score AND low relative deviation (threshold is configured but not needed)
1209        // Should pass without showing the note
1210        let result = audit_with_data(
1211            "build_time",
1212            10.1,                               // Very close to tail values
1213            vec![10.0, 10.1, 10.0, 10.1, 10.0], // Low variance
1214            2,
1215            100.0, // Very high sigma threshold - won't be exceeded
1216            DispersionMethod::StandardDeviation,
1217            ReductionFunc::Min,
1218        );
1219
1220        assert!(result.is_ok());
1221        let audit_result = result.unwrap();
1222        assert!(audit_result.passed);
1223        assert!(audit_result.message.contains("✅"));
1224        // The note should NOT be shown because the audit would have passed anyway
1225        assert!(
1226            !audit_result
1227                .message
1228                .contains("Note: Passed due to relative deviation"),
1229            "Note should not appear when audit passes without needing threshold bypass"
1230        );
1231
1232        // Case 2: High z-score but low relative deviation (threshold saves the audit)
1233        // Should pass and show the note
1234        let result = audit_with_data(
1235            "build_time",
1236            1002.0, // High z-score outlier but low relative deviation
1237            vec![1000.0, 1000.1, 1000.0, 1000.1, 1000.0], // Very low variance
1238            2,
1239            0.5, // Low sigma threshold - will be exceeded
1240            DispersionMethod::StandardDeviation,
1241            ReductionFunc::Min,
1242        );
1243
1244        assert!(result.is_ok());
1245        let audit_result = result.unwrap();
1246        assert!(audit_result.passed);
1247        assert!(audit_result.message.contains("✅"));
1248        // The note SHOULD be shown because the audit would have failed without the threshold
1249        assert!(
1250            audit_result
1251                .message
1252                .contains("Note: Passed due to relative deviation"),
1253            "Note should appear when audit passes due to threshold bypass. Got: {}",
1254            audit_result.message
1255        );
1256
1257        // Case 3: High z-score AND high relative deviation (threshold doesn't help)
1258        // Should fail
1259        let result = audit_with_data(
1260            "build_time",
1261            1200.0, // High z-score AND high relative deviation
1262            vec![1000.0, 1000.1, 1000.0, 1000.1, 1000.0], // Very low variance
1263            2,
1264            0.5, // Low sigma threshold - will be exceeded
1265            DispersionMethod::StandardDeviation,
1266            ReductionFunc::Min,
1267        );
1268
1269        assert!(result.is_ok());
1270        let audit_result = result.unwrap();
1271        assert!(!audit_result.passed);
1272        assert!(audit_result.message.contains("❌"));
1273        // No note shown because the audit still failed
1274        assert!(
1275            !audit_result
1276                .message
1277                .contains("Note: Passed due to relative deviation"),
1278            "Note should not appear when audit fails"
1279        );
1280    }
1281
1282    #[test]
1283    fn test_absolute_threshold_note_and_deviation_value() {
1284        // Tests that:
1285        // 1. The note shows the correct absolute deviation value (catches - vs / mutation)
1286        // 2. The boundary: deviation exactly AT threshold fails (catches < vs <= mutation)
1287        use crate::test_helpers::setup_test_env_with_config;
1288
1289        let config_content = r#"
1290[measurement."build_time"]
1291min_absolute_deviation = 50.0
1292"#;
1293        let (_temp_dir, _dir_guard) = setup_test_env_with_config(config_content);
1294
1295        // Case 1: High z-score but low absolute deviation (threshold saves the audit)
1296        // head=1010, tail values very tightly clustered around 1000
1297        // absolute deviation = |1010 - 1000| = 10 < 50 => should pass
1298        // if - were replaced with /, deviation would be |1010/1000| = 1.01, still < 50 (passes anyway)
1299        // So we need values where subtraction and division give meaningfully different results
1300        // head=1005, tail=1000: subtract=5, divide=1.005; but threshold=50, both < 50
1301        // Let's use head=100, tail_median=10: subtract=90, divide=10; threshold=50
1302        // With threshold=50: subtract(90) >= 50 fails, divide(10) < 50 passes
1303        // This catches the - vs / mutation
1304        let result = audit_with_data(
1305            "build_time",
1306            100.0,                              // head value
1307            vec![10.0, 10.0, 10.0, 10.0, 10.0], // tail values, median=10
1308            2,
1309            0.5, // Low sigma - will be exceeded
1310            DispersionMethod::StandardDeviation,
1311            ReductionFunc::Min,
1312        );
1313
1314        assert!(result.is_ok());
1315        let audit_result = result.unwrap();
1316        // absolute deviation = |100 - 10| = 90, which is > 50 threshold => should FAIL
1317        assert!(
1318            !audit_result.passed,
1319            "Should fail: absolute deviation 90 > threshold 50. Got: {}",
1320            audit_result.message
1321        );
1322
1323        // Case 2: absolute deviation exactly equals threshold => should FAIL (< not <=)
1324        // head=1050, tail_median=1000, absolute_deviation=50, threshold=50
1325        // With < : 50 < 50 is false => fails (correct)
1326        // With <= : 50 <= 50 is true => passes (wrong)
1327        let result = audit_with_data(
1328            "build_time",
1329            1050.0,                                       // head value
1330            vec![1000.0, 1000.0, 1000.0, 1000.0, 1000.0], // tail values, median=1000
1331            2,
1332            0.5, // Low sigma - will be exceeded
1333            DispersionMethod::StandardDeviation,
1334            ReductionFunc::Min,
1335        );
1336
1337        assert!(result.is_ok());
1338        let audit_result = result.unwrap();
1339        // absolute deviation = |1050 - 1000| = 50, which equals threshold 50 => should FAIL
1340        assert!(
1341            !audit_result.passed,
1342            "Should fail: absolute deviation 50 == threshold 50 (not strictly less than). Got: {}",
1343            audit_result.message
1344        );
1345
1346        // Case 3: absolute deviation strictly below threshold => should PASS with note
1347        // head=1049, tail_median=1000, absolute_deviation=49, threshold=50
1348        let result = audit_with_data(
1349            "build_time",
1350            1049.0,                                       // head value
1351            vec![1000.0, 1000.0, 1000.0, 1000.0, 1000.0], // tail values, median=1000
1352            2,
1353            0.5, // Low sigma - will be exceeded
1354            DispersionMethod::StandardDeviation,
1355            ReductionFunc::Min,
1356        );
1357
1358        assert!(result.is_ok());
1359        let audit_result = result.unwrap();
1360        assert!(
1361            audit_result.passed,
1362            "Should pass: absolute deviation 49 < threshold 50. Got: {}",
1363            audit_result.message
1364        );
1365        assert!(
1366            audit_result
1367                .message
1368                .contains("Note: Passed due to absolute deviation"),
1369            "Note should appear when audit passes due to absolute threshold. Got: {}",
1370            audit_result.message
1371        );
1372        // Verify the note contains the correct deviation value (catches - vs / mutation)
1373        // If / were used: |1049/1000| = 1.049, note would say "1.0" not "49.0"
1374        assert!(
1375            audit_result.message.contains("49.0"),
1376            "Note should show absolute deviation 49.0, not 1.0 (which would indicate / instead of -). Got: {}",
1377            audit_result.message
1378        );
1379    }
1380
1381    // Integration tests that verify per-measurement config determination
1382    #[cfg(test)]
1383    mod integration {
1384        use super::*;
1385        use crate::config::{
1386            audit_aggregate_by, audit_dispersion_method, audit_min_measurements, audit_sigma,
1387        };
1388        use crate::test_helpers::setup_test_env_with_config;
1389
1390        #[test]
1391        fn test_different_dispersion_methods_per_measurement() {
1392            let (_temp_dir, _dir_guard) = setup_test_env_with_config(
1393                r#"
1394[measurement]
1395dispersion_method = "stddev"
1396
1397[measurement."build_time"]
1398dispersion_method = "mad"
1399
1400[measurement."memory_usage"]
1401dispersion_method = "stddev"
1402"#,
1403            );
1404
1405            // Verify each measurement gets its own config
1406            let build_time_method = audit_dispersion_method("build_time");
1407            let memory_usage_method = audit_dispersion_method("memory_usage");
1408            let other_method = audit_dispersion_method("other_metric");
1409
1410            assert_eq!(
1411                DispersionMethod::from(build_time_method),
1412                DispersionMethod::MedianAbsoluteDeviation,
1413                "build_time should use MAD"
1414            );
1415            assert_eq!(
1416                DispersionMethod::from(memory_usage_method),
1417                DispersionMethod::StandardDeviation,
1418                "memory_usage should use stddev"
1419            );
1420            assert_eq!(
1421                DispersionMethod::from(other_method),
1422                DispersionMethod::StandardDeviation,
1423                "other_metric should use default stddev"
1424            );
1425        }
1426
1427        #[test]
1428        fn test_different_min_measurements_per_measurement() {
1429            let (_temp_dir, _dir_guard) = setup_test_env_with_config(
1430                r#"
1431[measurement]
1432min_measurements = 5
1433
1434[measurement."build_time"]
1435min_measurements = 10
1436
1437[measurement."memory_usage"]
1438min_measurements = 3
1439"#,
1440            );
1441
1442            assert_eq!(
1443                audit_min_measurements("build_time"),
1444                Some(10),
1445                "build_time should require 10 measurements"
1446            );
1447            assert_eq!(
1448                audit_min_measurements("memory_usage"),
1449                Some(3),
1450                "memory_usage should require 3 measurements"
1451            );
1452            assert_eq!(
1453                audit_min_measurements("other_metric"),
1454                Some(5),
1455                "other_metric should use default 5 measurements"
1456            );
1457        }
1458
1459        #[test]
1460        fn test_different_aggregate_by_per_measurement() {
1461            let (_temp_dir, _dir_guard) = setup_test_env_with_config(
1462                r#"
1463[measurement]
1464aggregate_by = "median"
1465
1466[measurement."build_time"]
1467aggregate_by = "max"
1468
1469[measurement."memory_usage"]
1470aggregate_by = "mean"
1471"#,
1472            );
1473
1474            assert_eq!(
1475                audit_aggregate_by("build_time"),
1476                Some(git_perf_cli_types::ReductionFunc::Max),
1477                "build_time should use max"
1478            );
1479            assert_eq!(
1480                audit_aggregate_by("memory_usage"),
1481                Some(git_perf_cli_types::ReductionFunc::Mean),
1482                "memory_usage should use mean"
1483            );
1484            assert_eq!(
1485                audit_aggregate_by("other_metric"),
1486                Some(git_perf_cli_types::ReductionFunc::Median),
1487                "other_metric should use default median"
1488            );
1489        }
1490
1491        #[test]
1492        fn test_different_sigma_per_measurement() {
1493            let (_temp_dir, _dir_guard) = setup_test_env_with_config(
1494                r#"
1495[measurement]
1496sigma = 3.0
1497
1498[measurement."build_time"]
1499sigma = 5.5
1500
1501[measurement."memory_usage"]
1502sigma = 2.0
1503"#,
1504            );
1505
1506            assert_eq!(
1507                audit_sigma("build_time"),
1508                Some(5.5),
1509                "build_time should use sigma 5.5"
1510            );
1511            assert_eq!(
1512                audit_sigma("memory_usage"),
1513                Some(2.0),
1514                "memory_usage should use sigma 2.0"
1515            );
1516            assert_eq!(
1517                audit_sigma("other_metric"),
1518                Some(3.0),
1519                "other_metric should use default sigma 3.0"
1520            );
1521        }
1522
1523        #[test]
1524        fn test_cli_overrides_config() {
1525            let (_temp_dir, _dir_guard) = setup_test_env_with_config(
1526                r#"
1527[measurement."build_time"]
1528min_measurements = 10
1529aggregate_by = "max"
1530sigma = 5.5
1531dispersion_method = "mad"
1532"#,
1533            );
1534
1535            // Test that CLI values override config
1536            let params = super::resolve_audit_params(
1537                "build_time",
1538                Some(2),                                   // CLI min
1539                Some(ReductionFunc::Min),                  // CLI aggregate
1540                Some(3.0),                                 // CLI sigma
1541                Some(DispersionMethod::StandardDeviation), // CLI dispersion
1542            );
1543
1544            assert_eq!(
1545                params.min_count, 2,
1546                "CLI min_measurements should override config"
1547            );
1548            assert_eq!(
1549                params.summarize_by,
1550                ReductionFunc::Min,
1551                "CLI aggregate_by should override config"
1552            );
1553            assert_eq!(params.sigma, 3.0, "CLI sigma should override config");
1554            assert_eq!(
1555                params.dispersion_method,
1556                DispersionMethod::StandardDeviation,
1557                "CLI dispersion should override config"
1558            );
1559        }
1560
1561        #[test]
1562        fn test_config_overrides_defaults() {
1563            let (_temp_dir, _dir_guard) = setup_test_env_with_config(
1564                r#"
1565[measurement."build_time"]
1566min_measurements = 10
1567aggregate_by = "max"
1568sigma = 5.5
1569dispersion_method = "mad"
1570"#,
1571            );
1572
1573            // Test that config values are used when no CLI values provided
1574            let params = super::resolve_audit_params(
1575                "build_time",
1576                None, // No CLI values
1577                None,
1578                None,
1579                None,
1580            );
1581
1582            assert_eq!(
1583                params.min_count, 10,
1584                "Config min_measurements should override default"
1585            );
1586            assert_eq!(
1587                params.summarize_by,
1588                ReductionFunc::Max,
1589                "Config aggregate_by should override default"
1590            );
1591            assert_eq!(params.sigma, 5.5, "Config sigma should override default");
1592            assert_eq!(
1593                params.dispersion_method,
1594                DispersionMethod::MedianAbsoluteDeviation,
1595                "Config dispersion should override default"
1596            );
1597        }
1598
1599        #[test]
1600        fn test_uses_defaults_when_no_config_or_cli() {
1601            let (_temp_dir, _dir_guard) = setup_test_env_with_config("");
1602
1603            // Test that defaults are used when no CLI or config
1604            let params = super::resolve_audit_params(
1605                "non_existent_measurement",
1606                None, // No CLI values
1607                None,
1608                None,
1609                None,
1610            );
1611
1612            assert_eq!(
1613                params.min_count, 2,
1614                "Should use default min_measurements of 2"
1615            );
1616            assert_eq!(
1617                params.summarize_by,
1618                ReductionFunc::Min,
1619                "Should use default aggregate_by of Min"
1620            );
1621            assert_eq!(params.sigma, 4.0, "Should use default sigma of 4.0");
1622            assert_eq!(
1623                params.dispersion_method,
1624                DispersionMethod::StandardDeviation,
1625                "Should use default dispersion of stddev"
1626            );
1627        }
1628    }
1629
1630    #[test]
1631    fn test_discover_matching_measurements() {
1632        use crate::data::{Commit, MeasurementData};
1633        use std::collections::HashMap;
1634
1635        // Create mock commits with various measurements
1636        let commits = vec![
1637            Ok(Commit {
1638                commit: "abc123".to_string(),
1639                title: "test: commit 1".to_string(),
1640                author: "Test Author".to_string(),
1641                measurements: vec![
1642                    MeasurementData {
1643                        epoch: 0,
1644                        name: "bench_cpu".to_string(),
1645                        timestamp: 1000.0,
1646                        val: 100.0,
1647                        key_values: {
1648                            let mut map = HashMap::new();
1649                            map.insert("os".to_string(), "linux".to_string());
1650                            map
1651                        },
1652                    },
1653                    MeasurementData {
1654                        epoch: 0,
1655                        name: "bench_memory".to_string(),
1656                        timestamp: 1000.0,
1657                        val: 200.0,
1658                        key_values: {
1659                            let mut map = HashMap::new();
1660                            map.insert("os".to_string(), "linux".to_string());
1661                            map
1662                        },
1663                    },
1664                    MeasurementData {
1665                        epoch: 0,
1666                        name: "test_unit".to_string(),
1667                        timestamp: 1000.0,
1668                        val: 50.0,
1669                        key_values: {
1670                            let mut map = HashMap::new();
1671                            map.insert("os".to_string(), "linux".to_string());
1672                            map
1673                        },
1674                    },
1675                ],
1676            }),
1677            Ok(Commit {
1678                commit: "def456".to_string(),
1679                title: "test: commit 2".to_string(),
1680                author: "Test Author".to_string(),
1681                measurements: vec![
1682                    MeasurementData {
1683                        epoch: 0,
1684                        name: "bench_cpu".to_string(),
1685                        timestamp: 1000.0,
1686                        val: 105.0,
1687                        key_values: {
1688                            let mut map = HashMap::new();
1689                            map.insert("os".to_string(), "mac".to_string());
1690                            map
1691                        },
1692                    },
1693                    MeasurementData {
1694                        epoch: 0,
1695                        name: "other_metric".to_string(),
1696                        timestamp: 1000.0,
1697                        val: 75.0,
1698                        key_values: {
1699                            let mut map = HashMap::new();
1700                            map.insert("os".to_string(), "linux".to_string());
1701                            map
1702                        },
1703                    },
1704                ],
1705            }),
1706        ];
1707
1708        // Test 1: Single filter pattern matching "bench_*"
1709        let patterns = vec!["bench_.*".to_string()];
1710        let filters = crate::filter::compile_filters(&patterns).unwrap();
1711        let selectors = vec![];
1712        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1713
1714        assert_eq!(discovered.len(), 2);
1715        assert!(discovered.contains(&"bench_cpu".to_string()));
1716        assert!(discovered.contains(&"bench_memory".to_string()));
1717        assert!(!discovered.contains(&"test_unit".to_string()));
1718        assert!(!discovered.contains(&"other_metric".to_string()));
1719
1720        // Test 2: Multiple filter patterns (OR behavior)
1721        let patterns = vec!["bench_cpu".to_string(), "test_.*".to_string()];
1722        let filters = crate::filter::compile_filters(&patterns).unwrap();
1723        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1724
1725        assert_eq!(discovered.len(), 2);
1726        assert!(discovered.contains(&"bench_cpu".to_string()));
1727        assert!(discovered.contains(&"test_unit".to_string()));
1728        assert!(!discovered.contains(&"bench_memory".to_string()));
1729
1730        // Test 3: Filter with selectors
1731        let patterns = vec!["bench_.*".to_string()];
1732        let filters = crate::filter::compile_filters(&patterns).unwrap();
1733        let selectors = vec![("os".to_string(), "linux".to_string())];
1734        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1735
1736        // bench_cpu and bench_memory both have os=linux (in first commit)
1737        // bench_cpu also has os=mac (in second commit) but selector filters it to only linux
1738        assert_eq!(discovered.len(), 2);
1739        assert!(discovered.contains(&"bench_cpu".to_string()));
1740        assert!(discovered.contains(&"bench_memory".to_string()));
1741
1742        // Test 4: No matches
1743        let patterns = vec!["nonexistent.*".to_string()];
1744        let filters = crate::filter::compile_filters(&patterns).unwrap();
1745        let selectors = vec![];
1746        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1747
1748        assert_eq!(discovered.len(), 0);
1749
1750        // Test 5: Empty filters (should match all)
1751        let filters = vec![];
1752        let selectors = vec![];
1753        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1754
1755        // Empty filters should match nothing based on the logic
1756        // Actually, looking at matches_any_filter, empty filters return true
1757        // So this should discover all measurements
1758        assert_eq!(discovered.len(), 4);
1759        assert!(discovered.contains(&"bench_cpu".to_string()));
1760        assert!(discovered.contains(&"bench_memory".to_string()));
1761        assert!(discovered.contains(&"test_unit".to_string()));
1762        assert!(discovered.contains(&"other_metric".to_string()));
1763
1764        // Test 6: Selector filters out everything
1765        let patterns = vec!["bench_.*".to_string()];
1766        let filters = crate::filter::compile_filters(&patterns).unwrap();
1767        let selectors = vec![("os".to_string(), "windows".to_string())];
1768        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1769
1770        assert_eq!(discovered.len(), 0);
1771
1772        // Test 7: Exact match with anchored regex (simulating -m argument)
1773        let patterns = vec!["^bench_cpu$".to_string()];
1774        let filters = crate::filter::compile_filters(&patterns).unwrap();
1775        let selectors = vec![];
1776        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1777
1778        assert_eq!(discovered.len(), 1);
1779        assert!(discovered.contains(&"bench_cpu".to_string()));
1780
1781        // Test 8: Sorted output (verify deterministic ordering)
1782        let patterns = vec![".*".to_string()]; // Match all
1783        let filters = crate::filter::compile_filters(&patterns).unwrap();
1784        let selectors = vec![];
1785        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1786
1787        // Should be sorted alphabetically
1788        assert_eq!(discovered[0], "bench_cpu");
1789        assert_eq!(discovered[1], "bench_memory");
1790        assert_eq!(discovered[2], "other_metric");
1791        assert_eq!(discovered[3], "test_unit");
1792    }
1793
1794    #[test]
1795    fn test_audit_multiple_with_combined_patterns() {
1796        // This test verifies that combining explicit measurements (-m) and filter patterns (--filter)
1797        // works correctly with OR behavior. Both should be audited.
1798        // Note: This is an integration test that uses actual audit_multiple function,
1799        // but we can't easily test it without a real git repo, so we test the pattern combination
1800        // and discovery logic instead.
1801
1802        use crate::data::{Commit, MeasurementData};
1803        use std::collections::HashMap;
1804
1805        // Create mock commits
1806        let commits = vec![Ok(Commit {
1807            commit: "abc123".to_string(),
1808            title: "test: commit".to_string(),
1809            author: "Test Author".to_string(),
1810            measurements: vec![
1811                MeasurementData {
1812                    epoch: 0,
1813                    name: "timer".to_string(),
1814                    timestamp: 1000.0,
1815                    val: 10.0,
1816                    key_values: HashMap::new(),
1817                },
1818                MeasurementData {
1819                    epoch: 0,
1820                    name: "bench_cpu".to_string(),
1821                    timestamp: 1000.0,
1822                    val: 100.0,
1823                    key_values: HashMap::new(),
1824                },
1825                MeasurementData {
1826                    epoch: 0,
1827                    name: "memory".to_string(),
1828                    timestamp: 1000.0,
1829                    val: 500.0,
1830                    key_values: HashMap::new(),
1831                },
1832            ],
1833        })];
1834
1835        // Simulate combining -m timer with --filter "bench_.*"
1836        // This is what combine_measurements_and_filters does in cli.rs
1837        let measurements = vec!["timer".to_string()];
1838        let filter_patterns = vec!["bench_.*".to_string()];
1839        let combined =
1840            crate::filter::combine_measurements_and_filters(&measurements, &filter_patterns);
1841
1842        // combined should have: ["^timer$", "bench_.*"]
1843        assert_eq!(combined.len(), 2);
1844        assert_eq!(combined[0], "^timer$");
1845        assert_eq!(combined[1], "bench_.*");
1846
1847        // Now compile and discover
1848        let filters = crate::filter::compile_filters(&combined).unwrap();
1849        let selectors = vec![];
1850        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1851
1852        // Should discover both timer (exact match) and bench_cpu (pattern match)
1853        assert_eq!(discovered.len(), 2);
1854        assert!(discovered.contains(&"timer".to_string()));
1855        assert!(discovered.contains(&"bench_cpu".to_string()));
1856        assert!(!discovered.contains(&"memory".to_string())); // Not in -m or filter
1857
1858        // Test with multiple explicit measurements and multiple filters
1859        let measurements = vec!["timer".to_string(), "memory".to_string()];
1860        let filter_patterns = vec!["bench_.*".to_string(), "test_.*".to_string()];
1861        let combined =
1862            crate::filter::combine_measurements_and_filters(&measurements, &filter_patterns);
1863
1864        assert_eq!(combined.len(), 4);
1865
1866        let filters = crate::filter::compile_filters(&combined).unwrap();
1867        let discovered = discover_matching_measurements(&commits, &filters, &selectors);
1868
1869        // Should discover timer, memory, and bench_cpu (no test_* in commits)
1870        assert_eq!(discovered.len(), 3);
1871        assert!(discovered.contains(&"timer".to_string()));
1872        assert!(discovered.contains(&"memory".to_string()));
1873        assert!(discovered.contains(&"bench_cpu".to_string()));
1874    }
1875
1876    #[test]
1877    fn test_audit_with_empty_tail() {
1878        // Test for division by zero bug when tail is empty
1879        // This test reproduces the bug where tail_median is 0.0 when tail is empty,
1880        // causing division by zero in sparkline calculation
1881        let result = audit_with_data(
1882            "test_measurement",
1883            10.0,   // head
1884            vec![], // empty tail - triggers the bug
1885            2,      // min_count
1886            2.0,    // sigma
1887            DispersionMethod::StandardDeviation,
1888            ReductionFunc::Min,
1889        );
1890
1891        // Should succeed and skip (not crash with division by zero)
1892        assert!(result.is_ok(), "Should not crash on empty tail");
1893        let audit_result = result.unwrap();
1894
1895        // Should be skipped due to insufficient measurements
1896        assert!(audit_result.passed);
1897        assert!(audit_result.message.contains("Skipping test"));
1898
1899        // The message should not contain inf or NaN
1900        assert!(!audit_result.message.to_lowercase().contains("inf"));
1901        assert!(!audit_result.message.to_lowercase().contains("nan"));
1902    }
1903
1904    #[test]
1905    fn test_audit_with_all_zero_tail() {
1906        // Test for division by zero when all tail measurements are 0.0
1907        // This tests the edge case where median is 0.0 even with measurements
1908        let result = audit_with_data(
1909            "test_measurement",
1910            5.0,                 // non-zero head
1911            vec![0.0, 0.0, 0.0], // all zeros in tail
1912            2,                   // min_count
1913            2.0,                 // sigma
1914            DispersionMethod::StandardDeviation,
1915            ReductionFunc::Min,
1916        );
1917
1918        // Should succeed (not crash with division by zero)
1919        assert!(result.is_ok(), "Should not crash when tail median is 0.0");
1920        let audit_result = result.unwrap();
1921
1922        // The message should not contain inf or NaN
1923        assert!(!audit_result.message.to_lowercase().contains("inf"));
1924        assert!(!audit_result.message.to_lowercase().contains("nan"));
1925    }
1926
1927    #[test]
1928    fn test_tiered_baseline_approach() {
1929        // Test the tiered approach:
1930        // 1. Non-zero median → use median, show percentages
1931        // 2. Zero median → show absolute values
1932
1933        // Case 1: Median is non-zero - use percentages (default behavior)
1934        let result = audit_with_data(
1935            "test_measurement",
1936            15.0,                   // head
1937            vec![10.0, 11.0, 12.0], // median=11.0 (non-zero)
1938            2,
1939            2.0,
1940            DispersionMethod::StandardDeviation,
1941            ReductionFunc::Min,
1942        );
1943
1944        assert!(result.is_ok());
1945        let audit_result = result.unwrap();
1946        // Should use median as baseline and show percentage
1947        assert!(audit_result.message.contains('%'));
1948        assert!(!audit_result.message.to_lowercase().contains("inf"));
1949
1950        // Case 2: Median is zero with non-zero head - use absolute values
1951        let result = audit_with_data(
1952            "test_measurement",
1953            5.0,                 // head (non-zero)
1954            vec![0.0, 0.0, 0.0], // median=0
1955            2,
1956            2.0,
1957            DispersionMethod::StandardDeviation,
1958            ReductionFunc::Min,
1959        );
1960
1961        assert!(result.is_ok());
1962        let audit_result = result.unwrap();
1963        // Should show absolute values instead of percentages
1964        // The message should contain the sparkline but not percentage symbols
1965        assert!(!audit_result.message.to_lowercase().contains("inf"));
1966        assert!(!audit_result.message.to_lowercase().contains("nan"));
1967        // Check that sparkline exists (contains the dash character)
1968        assert!(audit_result.message.contains('–') || audit_result.message.contains('-'));
1969
1970        // Case 3: Everything is zero - show absolute values [0 - 0]
1971        let result = audit_with_data(
1972            "test_measurement",
1973            0.0,                 // head
1974            vec![0.0, 0.0, 0.0], // median=0
1975            2,
1976            2.0,
1977            DispersionMethod::StandardDeviation,
1978            ReductionFunc::Min,
1979        );
1980
1981        assert!(result.is_ok());
1982        let audit_result = result.unwrap();
1983        // Should show absolute range [0 - 0]
1984        assert!(!audit_result.message.to_lowercase().contains("inf"));
1985        assert!(!audit_result.message.to_lowercase().contains("nan"));
1986    }
1987
1988    #[test]
1989    fn test_min_measurements_two_with_no_tail() {
1990        // Test the minimum allowed min_measurements value (2) with no tail measurements.
1991        // This should skip the audit since we have 0 < 2 tail measurements.
1992        let result = audit_with_data(
1993            "test_measurement",
1994            15.0,   // head
1995            vec![], // no tail measurements
1996            2,      // min_count = 2 (minimum allowed by CLI)
1997            2.0,
1998            DispersionMethod::StandardDeviation,
1999            ReductionFunc::Min,
2000        );
2001
2002        assert!(result.is_ok());
2003        let audit_result = result.unwrap();
2004
2005        // Should pass (skipped) since we have 0 < 2 tail measurements
2006        assert!(audit_result.passed);
2007        assert!(audit_result.message.contains("Skipping test"));
2008        assert!(audit_result
2009            .message
2010            .contains("0 historical measurements found"));
2011        assert!(audit_result
2012            .message
2013            .contains("Less than requested min_measurements of 2"));
2014
2015        // Should show Head summary only (total_measurements = 1)
2016        assert!(audit_result.message.contains("Head:"));
2017        assert!(!audit_result.message.contains("z-score"));
2018        assert!(!audit_result.message.contains("Tail:"));
2019    }
2020
2021    #[test]
2022    fn test_min_measurements_two_with_single_tail() {
2023        // Test the minimum allowed min_measurements value (2) with a single tail measurement.
2024        // This should skip since we have 1 < 2 tail measurements.
2025        let result = audit_with_data(
2026            "test_measurement",
2027            15.0,       // head
2028            vec![10.0], // single tail measurement
2029            2,          // min_count = 2 (minimum allowed by CLI)
2030            2.0,
2031            DispersionMethod::StandardDeviation,
2032            ReductionFunc::Min,
2033        );
2034
2035        assert!(result.is_ok());
2036        let audit_result = result.unwrap();
2037
2038        // Should pass (skipped) since we have 1 < 2 tail measurements
2039        assert!(audit_result.passed);
2040        assert!(audit_result.message.contains("Skipping test"));
2041        assert!(audit_result
2042            .message
2043            .contains("1 historical measurement found"));
2044        assert!(audit_result
2045            .message
2046            .contains("Less than requested min_measurements of 2"));
2047
2048        // Should show both Head and Tail summaries with z-score (total_measurements = 2)
2049        assert!(audit_result.message.contains("Head:"));
2050        assert!(audit_result.message.contains("Tail:"));
2051        assert!(audit_result.message.contains("z-score"));
2052        assert!(audit_result.message.contains("["));
2053    }
2054
2055    #[test]
2056    fn test_aggregation_method_display_min() {
2057        // Test that the aggregation method is displayed correctly with ReductionFunc::Min
2058        let result = audit_with_data(
2059            "test_measurement",
2060            15.0,
2061            vec![10.0, 11.0, 12.0],
2062            2,
2063            2.0,
2064            DispersionMethod::StandardDeviation,
2065            ReductionFunc::Min,
2066        );
2067
2068        assert!(result.is_ok());
2069        let audit_result = result.unwrap();
2070        assert!(audit_result.message.contains("Aggregation: min"));
2071    }
2072
2073    #[test]
2074    fn test_aggregation_method_display_max() {
2075        // Test that the aggregation method is displayed correctly with ReductionFunc::Max
2076        let result = audit_with_data(
2077            "test_measurement",
2078            15.0,
2079            vec![10.0, 11.0, 12.0],
2080            2,
2081            2.0,
2082            DispersionMethod::StandardDeviation,
2083            ReductionFunc::Max,
2084        );
2085
2086        assert!(result.is_ok());
2087        let audit_result = result.unwrap();
2088        assert!(audit_result.message.contains("Aggregation: max"));
2089    }
2090
2091    #[test]
2092    fn test_aggregation_method_display_median() {
2093        // Test that the aggregation method is displayed correctly with ReductionFunc::Median
2094        let result = audit_with_data(
2095            "test_measurement",
2096            15.0,
2097            vec![10.0, 11.0, 12.0],
2098            2,
2099            2.0,
2100            DispersionMethod::StandardDeviation,
2101            ReductionFunc::Median,
2102        );
2103
2104        assert!(result.is_ok());
2105        let audit_result = result.unwrap();
2106        assert!(audit_result.message.contains("Aggregation: median"));
2107    }
2108
2109    #[test]
2110    fn test_aggregation_method_display_mean() {
2111        // Test that the aggregation method is displayed correctly with ReductionFunc::Mean
2112        let result = audit_with_data(
2113            "test_measurement",
2114            15.0,
2115            vec![10.0, 11.0, 12.0],
2116            2,
2117            2.0,
2118            DispersionMethod::StandardDeviation,
2119            ReductionFunc::Mean,
2120        );
2121
2122        assert!(result.is_ok());
2123        let audit_result = result.unwrap();
2124        assert!(audit_result.message.contains("Aggregation: mean"));
2125    }
2126
2127    #[test]
2128    fn test_aggregation_method_not_shown_with_single_measurement() {
2129        // Test that aggregation method is NOT shown when there's only 1 measurement
2130        let result = audit_with_data(
2131            "test_measurement",
2132            15.0,
2133            vec![], // No tail measurements, total = 1
2134            2,
2135            2.0,
2136            DispersionMethod::StandardDeviation,
2137            ReductionFunc::Median,
2138        );
2139
2140        assert!(result.is_ok());
2141        let audit_result = result.unwrap();
2142        // Should NOT show aggregation method (only 1 measurement total)
2143        assert!(!audit_result.message.contains("Aggregation:"));
2144        // But should show Head summary
2145        assert!(audit_result.message.contains("Head:"));
2146    }
2147}