pub mod decompose;
pub mod defaults;
pub mod params;
pub mod tau_search;
pub use defaults::{default_tau_grid, tp_threshold_for};
pub use params::LrpParams;
use crate::dataset::{CocoDataset, CocoDetections, EvalDataset};
use crate::error::EvalError;
use crate::evaluate::COLLAPSED_CATEGORY_SENTINEL;
use crate::parity::ParityMode;
use crate::similarity::{BboxIou, BoundaryIou, OksSimilarity, SegmIou};
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum LrpKernelMarker {
Bbox,
Segm,
Boundary,
Keypoints,
}
impl LrpKernelMarker {
#[must_use]
pub fn as_str(self) -> &'static str {
match self {
Self::Bbox => "bbox",
Self::Segm => "segm",
Self::Boundary => "boundary",
Self::Keypoints => "keypoints",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct LrpPerClass {
pub category_id: i64,
pub olrp: Option<f64>,
pub olrp_loc: Option<f64>,
pub olrp_fp: Option<f64>,
pub olrp_fn: Option<f64>,
pub tau: Option<f64>,
}
#[derive(Debug, Clone, PartialEq)]
pub struct LrpConfig {
pub tp_threshold: f64,
pub tau_grid_len: usize,
pub kernel: LrpKernelMarker,
}
#[derive(Debug, Clone)]
pub struct LrpReport {
pub olrp: f64,
pub olrp_loc: f64,
pub olrp_fp: f64,
pub olrp_fn: f64,
pub per_class: Vec<LrpPerClass>,
pub n_empty_classes: u32,
pub config: LrpConfig,
}
pub fn optimal_lrp_with<K: crate::evaluate::EvalKernel>(
gt: &CocoDataset,
dt: &CocoDetections,
kernel: &K,
kernel_marker: LrpKernelMarker,
params: LrpParams<'_>,
parity_mode: ParityMode,
) -> Result<LrpReport, EvalError> {
validate_params(¶ms)?;
let ctx = decompose::prepare_lrp_pass(gt, dt, kernel, ¶ms, parity_mode)?;
let decompositions = decompose::decompose_all_classes(&ctx, parity_mode, ¶ms, None)?;
Ok(build_report(
gt,
&decompositions,
params.use_cats,
params.tp_threshold,
params.tau_grid.len(),
kernel_marker,
))
}
pub fn optimal_lrp_with_partitioned<K: crate::evaluate::EvalKernel>(
gt: &CocoDataset,
dt: &CocoDetections,
kernel: &K,
kernel_marker: LrpKernelMarker,
params: LrpParams<'_>,
parity_mode: ParityMode,
image_filters: &[HashSet<usize>],
) -> Result<Vec<LrpReport>, EvalError> {
validate_params(¶ms)?;
let ctx = decompose::prepare_lrp_pass(gt, dt, kernel, ¶ms, parity_mode)?;
let mut reports: Vec<LrpReport> = Vec::with_capacity(image_filters.len() + 1);
let overall = decompose::decompose_all_classes(&ctx, parity_mode, ¶ms, None)?;
reports.push(build_report(
gt,
&overall,
params.use_cats,
params.tp_threshold,
params.tau_grid.len(),
kernel_marker,
));
for filter in image_filters {
let sliced = decompose::decompose_all_classes(&ctx, parity_mode, ¶ms, Some(filter))?;
reports.push(build_report(
gt,
&sliced,
params.use_cats,
params.tp_threshold,
params.tau_grid.len(),
kernel_marker,
));
}
Ok(reports)
}
fn build_report(
gt: &CocoDataset,
decompositions: &[decompose::PerClassDecomposition],
use_cats: bool,
tp_threshold: f64,
tau_grid_len: usize,
kernel_marker: LrpKernelMarker,
) -> LrpReport {
let cat_id_by_index = build_category_id_lookup(gt, use_cats);
let mut per_class: Vec<LrpPerClass> = Vec::with_capacity(decompositions.len());
for d in decompositions {
let category_id = cat_id_by_index
.get(&d.category_index)
.copied()
.unwrap_or(COLLAPSED_CATEGORY_SENTINEL);
per_class.push(LrpPerClass {
category_id,
olrp: d.olrp,
olrp_loc: d.olrp_loc,
olrp_fp: d.olrp_fp,
olrp_fn: d.olrp_fn,
tau: d.tau,
});
}
let (olrp, olrp_loc, olrp_fp, olrp_fn, n_empty) = aggregate(&per_class);
LrpReport {
olrp,
olrp_loc,
olrp_fp,
olrp_fn,
per_class,
n_empty_classes: n_empty,
config: LrpConfig {
tp_threshold,
tau_grid_len,
kernel: kernel_marker,
},
}
}
pub fn optimal_lrp_bbox(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
) -> Result<LrpReport, EvalError> {
optimal_lrp_with(gt, dt, &BboxIou, LrpKernelMarker::Bbox, params, parity_mode)
}
pub fn optimal_lrp_segm(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
) -> Result<LrpReport, EvalError> {
optimal_lrp_with(gt, dt, &SegmIou, LrpKernelMarker::Segm, params, parity_mode)
}
pub fn optimal_lrp_boundary(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
dilation_ratio: f64,
) -> Result<LrpReport, EvalError> {
let kernel = BoundaryIou { dilation_ratio };
optimal_lrp_with(
gt,
dt,
&kernel,
LrpKernelMarker::Boundary,
params,
parity_mode,
)
}
pub fn optimal_lrp_keypoints(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
sigmas: HashMap<i64, Vec<f64>>,
) -> Result<LrpReport, EvalError> {
let kernel = OksSimilarity::new(sigmas);
optimal_lrp_with(
gt,
dt,
&kernel,
LrpKernelMarker::Keypoints,
params,
parity_mode,
)
}
pub fn optimal_lrp_bbox_partitioned(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
image_filters: &[HashSet<usize>],
) -> Result<Vec<LrpReport>, EvalError> {
optimal_lrp_with_partitioned(
gt,
dt,
&BboxIou,
LrpKernelMarker::Bbox,
params,
parity_mode,
image_filters,
)
}
pub fn optimal_lrp_segm_partitioned(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
image_filters: &[HashSet<usize>],
) -> Result<Vec<LrpReport>, EvalError> {
optimal_lrp_with_partitioned(
gt,
dt,
&SegmIou,
LrpKernelMarker::Segm,
params,
parity_mode,
image_filters,
)
}
pub fn optimal_lrp_boundary_partitioned(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
dilation_ratio: f64,
image_filters: &[HashSet<usize>],
) -> Result<Vec<LrpReport>, EvalError> {
let kernel = BoundaryIou { dilation_ratio };
optimal_lrp_with_partitioned(
gt,
dt,
&kernel,
LrpKernelMarker::Boundary,
params,
parity_mode,
image_filters,
)
}
pub fn optimal_lrp_keypoints_partitioned(
gt: &CocoDataset,
dt: &CocoDetections,
params: LrpParams<'_>,
parity_mode: ParityMode,
sigmas: HashMap<i64, Vec<f64>>,
image_filters: &[HashSet<usize>],
) -> Result<Vec<LrpReport>, EvalError> {
let kernel = OksSimilarity::new(sigmas);
optimal_lrp_with_partitioned(
gt,
dt,
&kernel,
LrpKernelMarker::Keypoints,
params,
parity_mode,
image_filters,
)
}
fn validate_params(params: &LrpParams<'_>) -> Result<(), EvalError> {
if params.tau_grid.is_empty() {
return Err(EvalError::InvalidConfig {
detail: "lrp: tau_grid must contain at least one value".into(),
});
}
if !params.tp_threshold.is_finite() {
return Err(EvalError::InvalidConfig {
detail: format!(
"lrp: tp_threshold must be finite; got {}",
params.tp_threshold
),
});
}
if !(0.0..=1.0).contains(¶ms.tp_threshold) {
return Err(EvalError::InvalidConfig {
detail: format!(
"lrp: tp_threshold must lie in [0.0, 1.0]; got {}",
params.tp_threshold
),
});
}
Ok(())
}
fn build_category_id_lookup(gt: &CocoDataset, use_cats: bool) -> HashMap<usize, i64> {
let mut out = HashMap::new();
if use_cats {
let mut cats: Vec<_> = gt.categories().iter().map(|c| c.id).collect();
cats.sort_unstable_by_key(|c| c.0);
for (idx, id) in cats.into_iter().enumerate() {
out.insert(idx, id.0);
}
} else {
out.insert(0, COLLAPSED_CATEGORY_SENTINEL);
}
out
}
fn aggregate(per_class: &[LrpPerClass]) -> (f64, f64, f64, f64, u32) {
let mut olrp_sum = 0.0_f64;
let mut olrp_n = 0_u64;
let mut loc_sum = 0.0_f64;
let mut loc_n = 0_u64;
let mut fp_sum = 0.0_f64;
let mut fp_n = 0_u64;
let mut fn_sum = 0.0_f64;
let mut fn_n = 0_u64;
let mut empty: u32 = 0;
for entry in per_class {
if let Some(o) = entry.olrp {
olrp_sum += o;
olrp_n += 1;
} else {
empty = empty.saturating_add(1);
}
if let Some(v) = entry.olrp_loc {
loc_sum += v;
loc_n += 1;
}
if let Some(v) = entry.olrp_fp {
fp_sum += v;
fp_n += 1;
}
if let Some(v) = entry.olrp_fn {
fn_sum += v;
fn_n += 1;
}
}
let mean = |s: f64, n: u64| if n == 0 { 0.0 } else { s / (n as f64) };
(
mean(olrp_sum, olrp_n),
mean(loc_sum, loc_n),
mean(fp_sum, fp_n),
mean(fn_sum, fn_n),
empty,
)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::dataset::{
Bbox, CategoryMeta, CocoAnnotation, CocoDataset, CocoDetection, CocoDetections, ImageMeta,
};
fn cat(id: i64) -> CategoryMeta {
CategoryMeta {
id: crate::dataset::CategoryId(id),
name: format!("cat_{id}"),
supercategory: None,
}
}
fn img(id: i64) -> ImageMeta {
ImageMeta {
id: crate::dataset::ImageId(id),
width: 100,
height: 100,
file_name: None,
}
}
fn gt_ann(id: i64, image_id: i64, category_id: i64, bbox: Bbox) -> CocoAnnotation {
CocoAnnotation {
id: crate::dataset::AnnId(id),
image_id: crate::dataset::ImageId(image_id),
category_id: crate::dataset::CategoryId(category_id),
area: bbox.w * bbox.h,
is_crowd: false,
ignore_flag: None,
bbox,
segmentation: None,
keypoints: None,
num_keypoints: None,
}
}
fn dt_ann(id: i64, image_id: i64, category_id: i64, bbox: Bbox, score: f64) -> CocoDetection {
CocoDetection {
id: crate::dataset::AnnId(id),
image_id: crate::dataset::ImageId(image_id),
category_id: crate::dataset::CategoryId(category_id),
score,
bbox,
area: bbox.w * bbox.h,
segmentation: None,
keypoints: None,
num_keypoints: None,
}
}
fn bbox(x: f64, y: f64, w: f64, h: f64) -> Bbox {
Bbox { x, y, w, h }
}
fn build_perfect_dataset() -> (CocoDataset, CocoDetections) {
let gt = CocoDataset::from_parts(
vec![img(1)],
vec![gt_ann(1, 1, 1, bbox(0.0, 0.0, 10.0, 10.0))],
vec![cat(1)],
)
.expect("gt build");
let dt =
CocoDetections::from_records(vec![dt_ann(1, 1, 1, bbox(0.0, 0.0, 10.0, 10.0), 0.9)]);
(gt, dt)
}
fn build_all_fp_dataset() -> (CocoDataset, CocoDetections) {
let gt = CocoDataset::from_parts(
vec![img(1)],
vec![gt_ann(1, 1, 1, bbox(0.0, 0.0, 10.0, 10.0))],
vec![cat(1)],
)
.expect("gt build");
let dt =
CocoDetections::from_records(vec![dt_ann(1, 1, 1, bbox(50.0, 50.0, 10.0, 10.0), 0.9)]);
(gt, dt)
}
fn default_params<'a>(
iou_thresholds: &'a [f64],
area_ranges: &'a [crate::evaluate::AreaRange],
tau_grid: &'a [f64],
) -> LrpParams<'a> {
LrpParams {
tp_threshold: 0.5,
tau_grid,
max_dets_per_image: 100,
use_cats: true,
iou_thresholds,
area_ranges,
}
}
#[test]
fn perfect_match_olrp_zero() {
let (gt, dt) = build_perfect_dataset();
let iou_thr = [0.5];
let area = crate::evaluate::AreaRange::coco_default();
let tau_grid = default_tau_grid();
let params = default_params(&iou_thr, &area, tau_grid);
let report = optimal_lrp_bbox(>, &dt, params, ParityMode::Corrected).expect("eval");
assert!(report.olrp.abs() < 1e-9, "olrp = {}", report.olrp);
assert!(report.olrp_loc.abs() < 1e-9);
assert!(report.olrp_fp.abs() < 1e-9);
assert!(report.olrp_fn.abs() < 1e-9);
assert_eq!(report.per_class.len(), 1);
let cls = report.per_class[0];
assert_eq!(cls.category_id, 1);
assert_eq!(cls.olrp, Some(0.0));
assert!(cls.tau.is_some());
}
#[test]
fn all_fp_class_olrp_one() {
let (gt, dt) = build_all_fp_dataset();
let iou_thr = [0.5];
let area = crate::evaluate::AreaRange::coco_default();
let tau_grid = default_tau_grid();
let params = default_params(&iou_thr, &area, tau_grid);
let report = optimal_lrp_bbox(>, &dt, params, ParityMode::Corrected).expect("eval");
assert!((report.olrp - 1.0).abs() < 1e-9, "olrp = {}", report.olrp);
assert!((report.olrp_fn - 1.0).abs() < 1e-9);
let cls = report.per_class[0];
assert_eq!(cls.olrp, Some(1.0));
assert!(cls.tau.is_none());
assert_eq!(cls.olrp_loc, None);
}
#[test]
fn partitioned_overall_matches_unpartitioned() {
let (gt, dt) = build_perfect_dataset();
let iou_thr = [0.5];
let area = crate::evaluate::AreaRange::coco_default();
let tau_grid = default_tau_grid();
let params = default_params(&iou_thr, &area, tau_grid);
let baseline =
optimal_lrp_bbox(>, &dt, params, ParityMode::Corrected).expect("baseline eval");
let part = optimal_lrp_bbox_partitioned(
>,
&dt,
params,
ParityMode::Corrected,
&[HashSet::from([0])],
)
.expect("partitioned eval");
assert_eq!(part.len(), 2);
assert_eq!(part[0].olrp, baseline.olrp);
assert_eq!(part[0].olrp_loc, baseline.olrp_loc);
assert_eq!(part[0].olrp_fp, baseline.olrp_fp);
assert_eq!(part[0].olrp_fn, baseline.olrp_fn);
assert_eq!(part[0].per_class.len(), baseline.per_class.len());
for (p, b) in part[0].per_class.iter().zip(baseline.per_class.iter()) {
assert_eq!(p.olrp, b.olrp);
assert_eq!(p.olrp_loc, b.olrp_loc);
}
assert_eq!(part[1].olrp, baseline.olrp);
}
#[test]
fn partitioned_empty_filter_yields_empty_class() {
let (gt, dt) = build_perfect_dataset();
let iou_thr = [0.5];
let area = crate::evaluate::AreaRange::coco_default();
let tau_grid = default_tau_grid();
let params = default_params(&iou_thr, &area, tau_grid);
let part = optimal_lrp_bbox_partitioned(
>,
&dt,
params,
ParityMode::Corrected,
&[HashSet::new()],
)
.expect("partitioned eval");
assert_eq!(part.len(), 2);
let cls = part[1].per_class[0];
assert_eq!(cls.olrp, None);
assert_eq!(cls.olrp_loc, None);
assert_eq!(cls.olrp_fp, None);
assert_eq!(cls.olrp_fn, None);
assert_eq!(part[1].n_empty_classes, 1);
}
#[test]
fn tau_search_argmin_tie_picks_larger_tau() {
let gt = CocoDataset::from_parts(
vec![img(1)],
vec![gt_ann(1, 1, 1, bbox(0.0, 0.0, 10.0, 10.0))],
vec![cat(1)],
)
.expect("gt build");
let dt =
CocoDetections::from_records(vec![dt_ann(1, 1, 1, bbox(0.0, 0.0, 10.0, 10.0), 0.5)]);
let iou_thr = [0.5];
let area = crate::evaluate::AreaRange::coco_default();
let tau_grid = default_tau_grid();
let params = default_params(&iou_thr, &area, tau_grid);
let report = optimal_lrp_bbox(>, &dt, params, ParityMode::Corrected).expect("eval");
let cls = report.per_class[0];
assert_eq!(cls.olrp, Some(0.0));
let tau = cls.tau.expect("tau set on TP class");
assert!(
(tau - 0.50).abs() < 1e-9,
"argmin-tie should pick larger tau on ties; got {tau}"
);
}
}