Skip to main content

locus_core/
decoder.rs

1//! Tag decoding, homography computation, and bit sampling.
2//!
3//! This module handles the final stage of the pipeline:
4//! 1. **Homography**: Computing the projection from canonical tag space to image pixels.
5//! 2. **Bit Sampling**: Bilinear interpolation of intensities at grid points.
6//! 3. **Error Correction**: Correcting bit flips using tag-family specific Hamming distances.
7
8#![allow(unsafe_code, clippy::cast_sign_loss)]
9use crate::config;
10use multiversion::multiversion;
11use nalgebra::{SMatrix, SVector};
12
13/// A 3x3 Homography matrix.
14pub struct Homography {
15    /// The 3x3 homography matrix.
16    pub h: SMatrix<f64, 3, 3>,
17}
18
19impl Homography {
20    /// Compute homography from 4 source points to 4 destination points using DLT.
21    /// Points are [x, y].
22    /// Compute homography from 4 source points to 4 destination points using DLT.
23    /// Points are [x, y].
24    #[must_use]
25    pub fn from_pairs(src: &[[f64; 2]; 4], dst: &[[f64; 2]; 4]) -> Option<Self> {
26        let mut a = SMatrix::<f64, 8, 9>::zeros();
27
28        for i in 0..4 {
29            let sx = src[i][0];
30            let sy = src[i][1];
31            let dx = dst[i][0];
32            let dy = dst[i][1];
33
34            a[(i * 2, 0)] = -sx;
35            a[(i * 2, 1)] = -sy;
36            a[(i * 2, 2)] = -1.0;
37            a[(i * 2, 6)] = sx * dx;
38            a[(i * 2, 7)] = sy * dx;
39            a[(i * 2, 8)] = dx;
40
41            a[(i * 2 + 1, 3)] = -sx;
42            a[(i * 2 + 1, 4)] = -sy;
43            a[(i * 2 + 1, 5)] = -1.0;
44            a[(i * 2 + 1, 6)] = sx * dy;
45            a[(i * 2 + 1, 7)] = sy * dy;
46            a[(i * 2 + 1, 8)] = dy;
47        }
48
49        let mut b = SVector::<f64, 8>::zeros();
50        let mut m = SMatrix::<f64, 8, 8>::zeros();
51        for i in 0..8 {
52            for j in 0..8 {
53                m[(i, j)] = a[(i, j)];
54            }
55            b[i] = -a[(i, 8)];
56        }
57
58        m.lu().solve(&b).and_then(|h_vec| {
59            let mut h = SMatrix::<f64, 3, 3>::identity();
60            h[(0, 0)] = h_vec[0];
61            h[(0, 1)] = h_vec[1];
62            h[(0, 2)] = h_vec[2];
63            h[(1, 0)] = h_vec[3];
64            h[(1, 1)] = h_vec[4];
65            h[(1, 2)] = h_vec[5];
66            h[(2, 0)] = h_vec[6];
67            h[(2, 1)] = h_vec[7];
68            h[(2, 2)] = 1.0;
69            let res = Self { h };
70            for i in 0..4 {
71                let p_proj = res.project(src[i]);
72                let err_sq = (p_proj[0] - dst[i][0]).powi(2) + (p_proj[1] - dst[i][1]).powi(2);
73                if !err_sq.is_finite() || err_sq > 1e-4 {
74                    return None;
75                }
76            }
77            Some(res)
78        })
79    }
80
81    /// Optimized homography computation from canonical unit square to a quad.
82    /// Source points are assumed to be: `[(-1,-1), (1,-1), (1,1), (-1,1)]`.
83    #[must_use]
84    pub fn square_to_quad(dst: &[[f64; 2]; 4]) -> Option<Self> {
85        let mut b = SVector::<f64, 8>::zeros();
86        let mut m = SMatrix::<f64, 8, 8>::zeros();
87
88        // Hardcoded coefficients for src = [(-1,-1), (1,-1), (1,1), (-1,1)]
89        // Point 0: (-1, -1) -> (x0, y0)
90        let x0 = dst[0][0];
91        let y0 = dst[0][1];
92        // h0 + h1 - h2 - x0*h6 - x0*h7 = -x0  =>  1, 1, -1, ..., -x0, -x0
93        m[(0, 0)] = 1.0;
94        m[(0, 1)] = 1.0;
95        m[(0, 2)] = -1.0;
96        m[(0, 6)] = -x0;
97        m[(0, 7)] = -x0;
98        b[0] = -x0;
99        // h3 + h4 - h5 - y0*h6 - y0*h7 = -y0  =>  ..., 1, 1, -1, -y0, -y0
100        m[(1, 3)] = 1.0;
101        m[(1, 4)] = 1.0;
102        m[(1, 5)] = -1.0;
103        m[(1, 6)] = -y0;
104        m[(1, 7)] = -y0;
105        b[1] = -y0;
106
107        // Point 1: (1, -1) -> (x1, y1)
108        let x1 = dst[1][0];
109        let y1 = dst[1][1];
110        // -h0 + h1 + h2 + x1*h6 - x1*h7 = -x1
111        m[(2, 0)] = -1.0;
112        m[(2, 1)] = 1.0;
113        m[(2, 2)] = -1.0;
114        m[(2, 6)] = x1;
115        m[(2, 7)] = -x1;
116        b[2] = -x1;
117        m[(3, 3)] = -1.0;
118        m[(3, 4)] = 1.0;
119        m[(3, 5)] = -1.0;
120        m[(3, 6)] = y1;
121        m[(3, 7)] = -y1;
122        b[3] = -y1;
123
124        // Point 2: (1, 1) -> (x2, y2)
125        let x2 = dst[2][0];
126        let y2 = dst[2][1];
127        // -h0 - h1 + h2 + x2*h6 + x2*h7 = -x2
128        m[(4, 0)] = -1.0;
129        m[(4, 1)] = -1.0;
130        m[(4, 2)] = -1.0;
131        m[(4, 6)] = x2;
132        m[(4, 7)] = x2;
133        b[4] = -x2;
134        m[(5, 3)] = -1.0;
135        m[(5, 4)] = -1.0;
136        m[(5, 5)] = -1.0;
137        m[(5, 6)] = y2;
138        m[(5, 7)] = y2;
139        b[5] = -y2;
140
141        // Point 3: (-1, 1) -> (x3, y3)
142        let x3 = dst[3][0];
143        let y3 = dst[3][1];
144        // h0 - h1 + h2 - x3*h6 + x3*h7 = -x3
145        m[(6, 0)] = 1.0;
146        m[(6, 1)] = -1.0;
147        m[(6, 2)] = -1.0;
148        m[(6, 6)] = -x3;
149        m[(6, 7)] = x3;
150        b[6] = -x3;
151        m[(7, 3)] = 1.0;
152        m[(7, 4)] = -1.0;
153        m[(7, 5)] = -1.0;
154        m[(7, 6)] = -y3;
155        m[(7, 7)] = y3;
156        b[7] = -y3;
157
158        m.lu().solve(&b).and_then(|h_vec| {
159            let mut h = SMatrix::<f64, 3, 3>::identity();
160            h[(0, 0)] = h_vec[0];
161            h[(0, 1)] = h_vec[1];
162            h[(0, 2)] = h_vec[2];
163            h[(1, 0)] = h_vec[3];
164            h[(1, 1)] = h_vec[4];
165            h[(1, 2)] = h_vec[5];
166            h[(2, 0)] = h_vec[6];
167            h[(2, 1)] = h_vec[7];
168            h[(2, 2)] = 1.0;
169            let res = Self { h };
170            let src_unit = [[-1.0, -1.0], [1.0, -1.0], [1.0, 1.0], [-1.0, 1.0]];
171            for i in 0..4 {
172                let p_proj = res.project(src_unit[i]);
173                let err_sq = (p_proj[0] - dst[i][0]).powi(2) + (p_proj[1] - dst[i][1]).powi(2);
174                if err_sq > 1e-4 {
175                    return None;
176                }
177            }
178            Some(res)
179        })
180    }
181
182    /// Project a point using the homography.
183    #[must_use]
184    pub fn project(&self, p: [f64; 2]) -> [f64; 2] {
185        let res = self.h * SVector::<f64, 3>::new(p[0], p[1], 1.0);
186        let w = res[2];
187        [res[0] / w, res[1] / w]
188    }
189}
190
191/// Refine corner positions using edge-based optimization with the homography.
192///
193/// After successful decoding, we fit lines to each edge using gradient-weighted
194/// least squares, then compute corners as line intersections. This provides
195/// more accurate corner localization than the initial detection.
196#[must_use]
197pub fn refine_corners_with_homography(
198    img: &crate::image::ImageView,
199    corners: &[[f64; 2]; 4],
200    _homography: &Homography,
201) -> [[f64; 2]; 4] {
202    // Fit a line to each of the 4 edges
203    let mut lines = [(0.0f64, 0.0f64, 0.0f64); 4]; // (a, b, c) where ax + by + c = 0
204    let mut line_valid = [false; 4];
205
206    for i in 0..4 {
207        let next = (i + 1) % 4;
208        let p1 = corners[i];
209        let p2 = corners[next];
210
211        let dx = p2[0] - p1[0];
212        let dy = p2[1] - p1[1];
213        let len = (dx * dx + dy * dy).sqrt();
214
215        if len < 4.0 {
216            continue;
217        }
218
219        // Normal direction (perpendicular to edge)
220        let nx = -dy / len;
221        let ny = dx / len;
222
223        // Collect weighted samples along the edge
224        let mut sum_w = 0.0;
225        let mut sum_d = 0.0;
226        let n_samples = (len as usize).clamp(5, 20);
227
228        for s in 1..=n_samples {
229            let t = s as f64 / (n_samples + 1) as f64;
230            let px = p1[0] + dx * t;
231            let py = p1[1] + dy * t;
232
233            // Search for gradient peak perpendicular to edge
234            let mut best_pos = (px, py);
235            let mut best_mag = 0.0;
236
237            for step in -3..=3 {
238                let sx = px + nx * f64::from(step);
239                let sy = py + ny * f64::from(step);
240
241                if sx < 1.0
242                    || sx >= (img.width - 2) as f64
243                    || sy < 1.0
244                    || sy >= (img.height - 2) as f64
245                {
246                    continue;
247                }
248
249                let g = img.sample_gradient_bilinear(sx, sy);
250                let mag = (g[0] * g[0] + g[1] * g[1]).sqrt();
251
252                if mag > best_mag {
253                    best_mag = mag;
254                    best_pos = (sx, sy);
255                }
256            }
257
258            if best_mag > 5.0 {
259                // Distance from best_pos to original line
260                let d = nx * best_pos.0 + ny * best_pos.1;
261                sum_w += best_mag;
262                sum_d += d * best_mag;
263            }
264        }
265
266        if sum_w > 1.0 {
267            // Line equation: nx * x + ny * y + c = 0
268            let c = -sum_d / sum_w;
269            lines[i] = (nx, ny, c);
270            line_valid[i] = true;
271        }
272    }
273
274    // If we don't have all 4 lines, return original corners
275    if !line_valid.iter().all(|&v| v) {
276        return *corners;
277    }
278
279    // Compute corner intersections
280    let mut refined = *corners;
281    for i in 0..4 {
282        let prev = (i + 3) % 4;
283        let (a1, b1, c1) = lines[prev];
284        let (a2, b2, c2) = lines[i];
285
286        let det = a1 * b2 - a2 * b1;
287        if det.abs() > 1e-6 {
288            let x = (b1 * c2 - b2 * c1) / det;
289            let y = (a2 * c1 - a1 * c2) / det;
290
291            // Sanity check: intersection should be near original corner
292            let dist_sq = (x - corners[i][0]).powi(2) + (y - corners[i][1]).powi(2);
293            if dist_sq < 4.0 {
294                refined[i] = [x, y];
295            }
296        }
297    }
298
299    refined
300}
301
302/// Refine corner positions using GridFit optimization.
303///
304/// This method optimizes the homography by adjusting corners to maximize the
305/// contrast between expected black and white cells in the decoded grid.
306/// This minimizes photometric error in the tag's coordinate system.
307pub fn refine_corners_gridfit(
308    img: &crate::image::ImageView,
309    corners: &[[f64; 2]; 4],
310    decoder: &(impl TagDecoder + ?Sized),
311    bits: u64,
312) -> [[f64; 2]; 4] {
313    let mut current_corners = *corners;
314    let mut best_corners = *corners;
315    let mut best_contrast = -1.0;
316
317    // Optimization parameters
318    let step_sizes = [0.5, 0.25, 0.125]; // Coarse to fine
319    // let _window = 1;
320
321    // Compute initial contrast
322    if let Some(h) = Homography::square_to_quad(&current_corners)
323        && let Some(contrast) = compute_grid_contrast(img, &h, decoder, bits)
324    {
325        best_contrast = contrast;
326    }
327
328    // We assume the initial decode was valid, so we should have a valid contrast.
329    // If not, just return original corners.
330    if best_contrast < 0.0 {
331        return *corners;
332    }
333
334    // Coordinate Descent Optimization
335    // Iteratively perturb each coordinate of each corner
336    for &step in &step_sizes {
337        let mut improved = true;
338        let mut iters = 0;
339
340        while improved && iters < 5 {
341            improved = false;
342            iters += 1;
343
344            for i in 0..4 {
345                for axis in 0..2 {
346                    // Try moving -step, 0, +step
347                    let original_val = current_corners[i][axis];
348
349                    let candidate_vals = [original_val - step, original_val + step];
350
351                    for &val in &candidate_vals {
352                        current_corners[i][axis] = val;
353
354                        // Check hypothesis
355                        let mut valid = false;
356                        if let Some(h) = Homography::square_to_quad(&current_corners)
357                            && let Some(contrast) = compute_grid_contrast(img, &h, decoder, bits)
358                            && contrast > best_contrast
359                        {
360                            best_contrast = contrast;
361                            best_corners = current_corners;
362                            improved = true;
363                            valid = true;
364                        }
365
366                        if valid {
367                            // Greedy update: accept improvement and continue.
368                            break;
369                        }
370                        // Revert if worse or invalid
371                        current_corners[i][axis] = original_val;
372                    }
373
374                    // Ensure current matches best before moving to next coordinate
375                    current_corners = best_corners;
376                }
377            }
378        }
379    }
380
381    best_corners
382}
383
384/// Compute the contrast of the grid given a homography and the expected bit pattern.
385/// Returns (mean_white - mean_black).
386fn compute_grid_contrast(
387    img: &crate::image::ImageView,
388    h: &Homography,
389    decoder: &(impl TagDecoder + ?Sized),
390    bits: u64,
391) -> Option<f64> {
392    let points = decoder.sample_points();
393    let _n = points.len();
394
395    // Pre-calculate homography terms for speed
396    let h00 = h.h[(0, 0)];
397    let h01 = h.h[(0, 1)];
398    let h02 = h.h[(0, 2)];
399    let h10 = h.h[(1, 0)];
400    let h11 = h.h[(1, 1)];
401    let h12 = h.h[(1, 2)];
402    let h20 = h.h[(2, 0)];
403    let h21 = h.h[(2, 1)];
404    let h22 = h.h[(2, 2)];
405
406    let mut sum_white = 0.0;
407    let mut cnt_white = 0;
408    let mut sum_black = 0.0;
409    let mut cnt_black = 0;
410
411    for (i, &p) in points.iter().enumerate() {
412        // Project
413        let wz = h20 * p.0 + h21 * p.1 + h22;
414        if wz.abs() < 1e-6 {
415            return None;
416        }
417        let img_x = (h00 * p.0 + h01 * p.1 + h02) / wz;
418        let img_y = (h10 * p.0 + h11 * p.1 + h12) / wz;
419
420        // Check bounds
421        if img_x < 0.0
422            || img_x >= (img.width - 1) as f64
423            || img_y < 0.0
424            || img_y >= (img.height - 1) as f64
425        {
426            return None;
427        }
428
429        // Bilinear sample
430        let xf = img_x.floor();
431        let yf = img_y.floor();
432        let ix = xf as usize;
433        let iy = yf as usize;
434        let dx = img_x - xf;
435        let dy = img_y - yf;
436
437        let val = unsafe {
438            let row0 = img.get_row_unchecked(iy);
439            let row1 = img.get_row_unchecked(iy + 1);
440            let v00 = f64::from(*row0.get_unchecked(ix));
441            let v10 = f64::from(*row0.get_unchecked(ix + 1));
442            let v01 = f64::from(*row1.get_unchecked(ix));
443            let v11 = f64::from(*row1.get_unchecked(ix + 1));
444            let top = v00 + dx * (v10 - v00);
445            let bot = v01 + dx * (v11 - v01);
446            top + dy * (bot - top)
447        };
448
449        let expected_bit = (bits >> i) & 1;
450        if expected_bit == 1 {
451            sum_white += val;
452            cnt_white += 1;
453        } else {
454            sum_black += val;
455            cnt_black += 1;
456        }
457    }
458
459    if cnt_white == 0 || cnt_black == 0 {
460        return None;
461    }
462
463    let mean_white = sum_white / f64::from(cnt_white);
464    let mean_black = sum_black / f64::from(cnt_black);
465
466    Some(mean_white - mean_black)
467}
468
469/// Refine corners using "Erf-Fit" (Gaussian fit to intensity profile).
470///
471/// This assumes the edge intensity profile is an Error Function (convolution of step edge with Gaussian PSF).
472/// We minimize the photometric error between the image and the ERF model using Gauss-Newton.
473pub fn refine_corners_erf(
474    arena: &bumpalo::Bump,
475    img: &crate::image::ImageView,
476    corners: &[[f64; 2]; 4],
477    sigma: f64,
478) -> [[f64; 2]; 4] {
479    let mut lines = [(0.0f64, 0.0f64, 0.0f64); 4];
480    let mut line_valid = [false; 4];
481
482    // Sub-pixel edge refinement for each of the 4 edges
483    for i in 0..4 {
484        let next = (i + 1) % 4;
485        let p1 = corners[i];
486        let p2 = corners[next];
487
488        if let Some((nx, ny, d)) = fit_edge_erf(arena, img, p1, p2, sigma) {
489            lines[i] = (nx, ny, d);
490            line_valid[i] = true;
491        }
492    }
493
494    if !line_valid.iter().all(|&v| v) {
495        return *corners;
496    }
497
498    // Intersect lines to get refined corners
499    let mut refined = *corners;
500    for i in 0..4 {
501        let prev = (i + 3) % 4;
502        let (a1, b1, c1) = lines[prev];
503        let (a2, b2, c2) = lines[i];
504        let det = a1 * b2 - a2 * b1;
505        if det.abs() > 1e-6 {
506            let x = (b1 * c2 - b2 * c1) / det;
507            let y = (a2 * c1 - a1 * c2) / det;
508
509            // Sanity check
510            let dist_sq = (x - corners[i][0]).powi(2) + (y - corners[i][1]).powi(2);
511            if dist_sq < 4.0 {
512                refined[i] = [x, y];
513            }
514        }
515    }
516    refined
517}
518
519/// Helper for ERF edge fitting
520struct EdgeFitter<'a> {
521    img: &'a crate::image::ImageView<'a>,
522    p1: [f64; 2],
523    dx: f64,
524    dy: f64,
525    len: f64,
526    nx: f64,
527    ny: f64,
528    d: f64,
529}
530
531impl<'a> EdgeFitter<'a> {
532    fn new(img: &'a crate::image::ImageView<'a>, p1: [f64; 2], p2: [f64; 2]) -> Option<Self> {
533        let dx = p2[0] - p1[0];
534        let dy = p2[1] - p1[1];
535        let len = (dx * dx + dy * dy).sqrt();
536        if len < 4.0 {
537            return None;
538        }
539        let nx = -dy / len;
540        let ny = dx / len;
541        // Initial d from input corners
542        let d = -(nx * p1[0] + ny * p1[1]);
543
544        Some(Self {
545            img,
546            p1,
547            dx,
548            dy,
549            len,
550            nx,
551            ny,
552            d,
553        })
554    }
555
556    fn scan_initial_d(&mut self) {
557        let window = 2.5;
558        let (x0, x1, y0, y1) = self.get_scan_bounds(window);
559
560        let mut best_offset = 0.0;
561        let mut best_grad = 0.0;
562
563        for k in -6..=6 {
564            let offset = f64::from(k) * 0.4;
565            let mut sum_g = 0.0;
566            let mut count = 0;
567            let scan_d = self.d + offset;
568
569            for py in y0..=y1 {
570                for px in x0..=x1 {
571                    let x = px as f64;
572                    let y = py as f64;
573                    let dist = self.nx * x + self.ny * y + scan_d;
574                    if dist.abs() < 1.0 {
575                        let g = self.img.sample_gradient_bilinear(x, y);
576                        sum_g += (g[0] * self.nx + g[1] * self.ny).abs();
577                        count += 1;
578                    }
579                }
580            }
581
582            if count > 0 && sum_g > best_grad {
583                best_grad = sum_g;
584                best_offset = offset;
585            }
586        }
587        self.d += best_offset;
588    }
589
590    fn collect_samples(
591        &self,
592        arena: &'a bumpalo::Bump,
593    ) -> bumpalo::collections::Vec<'a, (f64, f64, f64)> {
594        let window = 2.5;
595        let (x0, x1, y0, y1) = self.get_scan_bounds(window);
596
597        let mut samples = bumpalo::collections::Vec::with_capacity_in(128, arena);
598
599        for py in y0..=y1 {
600            for px in x0..=x1 {
601                let x = px as f64;
602                let y = py as f64;
603
604                let dist = self.nx * x + self.ny * y + self.d;
605                if dist.abs() > window {
606                    continue;
607                }
608
609                let t = ((x - self.p1[0]) * self.dx + (y - self.p1[1]) * self.dy)
610                    / (self.len * self.len);
611
612                if (-0.1..=1.1).contains(&t) {
613                    let val = f64::from(self.img.get_pixel(px, py));
614                    samples.push((x, y, val));
615                }
616            }
617        }
618        samples
619    }
620
621    fn refine(&mut self, samples: &[(f64, f64, f64)], sigma: f64) {
622        if samples.len() < 10 {
623            return;
624        }
625        let mut a = 128.0;
626        let mut b = 128.0;
627        let inv_sigma = 1.0 / sigma;
628        let sqrt_pi = std::f64::consts::PI.sqrt();
629
630        for _ in 0..15 {
631            let mut dark_sum = 0.0;
632            let mut dark_weight = 0.0;
633            let mut light_sum = 0.0;
634            let mut light_weight = 0.0;
635
636            for &(x, y, _) in samples {
637                let dist = self.nx * x + self.ny * y + self.d;
638                let val = self.img.sample_bilinear(x, y);
639                if dist < -1.0 {
640                    let w = (-dist - 0.5).clamp(0.1, 2.0);
641                    dark_sum += val * w;
642                    dark_weight += w;
643                } else if dist > 1.0 {
644                    let w = (dist - 0.5).clamp(0.1, 2.0);
645                    light_sum += val * w;
646                    light_weight += w;
647                }
648            }
649
650            if dark_weight > 0.0 && light_weight > 0.0 {
651                a = dark_sum / dark_weight;
652                b = light_sum / light_weight;
653            }
654
655            if (b - a).abs() < 5.0 {
656                break;
657            }
658
659            let mut sum_jtj = 0.0;
660            let mut sum_jt_res = 0.0;
661            let k = (b - a) / (sqrt_pi * sigma);
662
663            for &(x, y, _) in samples {
664                let dist = self.nx * x + self.ny * y + self.d;
665                let s = dist * inv_sigma;
666                if s.abs() > 3.0 {
667                    continue;
668                }
669                let val = self.img.sample_bilinear(x, y);
670                let model = (a + b) * 0.5 + (b - a) * 0.5 * crate::quad::erf_approx(s);
671                let residual = val - model;
672                let jac = k * (-s * s).exp();
673                sum_jtj += jac * jac;
674                sum_jt_res += jac * residual;
675            }
676
677            if sum_jtj < 1e-6 {
678                break;
679            }
680            let step = sum_jt_res / sum_jtj;
681            self.d += step.clamp(-0.5, 0.5);
682            if step.abs() < 1e-4 {
683                break;
684            }
685        }
686    }
687
688    #[allow(clippy::cast_sign_loss)]
689    fn get_scan_bounds(&self, window: f64) -> (usize, usize, usize, usize) {
690        let p2_0 = self.p1[0] + self.dx;
691        let p2_1 = self.p1[1] + self.dy;
692
693        // Clamp to valid image coordinates (padding of 1 pixel)
694        let w_limit = (self.img.width - 2) as f64;
695        let h_limit = (self.img.height - 2) as f64;
696
697        let x0 = (self.p1[0].min(p2_0) - window - 0.5).clamp(1.0, w_limit) as usize;
698        let x1 = (self.p1[0].max(p2_0) + window + 0.5).clamp(1.0, w_limit) as usize;
699        let y0 = (self.p1[1].min(p2_1) - window - 0.5).clamp(1.0, h_limit) as usize;
700        let y1 = (self.p1[1].max(p2_1) + window + 0.5).clamp(1.0, h_limit) as usize;
701        (x0, x1, y0, y1)
702    }
703}
704
705/// Fit a line (nx*x + ny*y + d = 0) to an edge using the ERF intensity model.
706fn fit_edge_erf(
707    arena: &bumpalo::Bump,
708    img: &crate::image::ImageView,
709    p1: [f64; 2],
710    p2: [f64; 2],
711    sigma: f64,
712) -> Option<(f64, f64, f64)> {
713    let mut fitter = EdgeFitter::new(img, p1, p2)?;
714    fitter.scan_initial_d();
715    let samples = fitter.collect_samples(arena);
716    if samples.len() < 10 {
717        return None;
718    }
719    fitter.refine(&samples, sigma);
720    Some((fitter.nx, fitter.ny, fitter.d))
721}
722
723/// Returns the threshold that maximizes inter-class variance.
724pub fn compute_otsu_threshold(values: &[f64]) -> f64 {
725    if values.is_empty() {
726        return 128.0;
727    }
728
729    let n = values.len() as f64;
730    let total_sum: f64 = values.iter().sum();
731
732    // Find min/max to define search range
733    let min_val = values.iter().copied().fold(f64::MAX, f64::min);
734    let max_val = values.iter().copied().fold(f64::MIN, f64::max);
735
736    if (max_val - min_val) < 1.0 {
737        return f64::midpoint(min_val, max_val);
738    }
739
740    // Search for optimal threshold
741    let mut best_threshold = f64::midpoint(min_val, max_val);
742    let mut best_variance = 0.0;
743
744    // Use 16 candidate thresholds between min and max
745    for i in 1..16 {
746        let t = min_val + (max_val - min_val) * (f64::from(i) / 16.0);
747
748        let mut w0 = 0.0;
749        let mut sum0 = 0.0;
750
751        for &v in values {
752            if v <= t {
753                w0 += 1.0;
754                sum0 += v;
755            }
756        }
757
758        let w1 = n - w0;
759        if w0 < 1.0 || w1 < 1.0 {
760            continue;
761        }
762
763        let mean0 = sum0 / w0;
764        let mean1 = (total_sum - sum0) / w1;
765
766        // Inter-class variance
767        let variance = w0 * w1 * (mean0 - mean1) * (mean0 - mean1);
768
769        if variance > best_variance {
770            best_variance = variance;
771            best_threshold = t;
772        }
773    }
774
775    best_threshold
776}
777
778/// Optimized grid sampling kernel.
779#[multiversion(targets(
780    "x86_64+avx2+bmi1+bmi2+popcnt+lzcnt",
781    "x86_64+avx512f+avx512bw+avx512dq+avx512vl",
782    "aarch64+neon"
783))]
784fn sample_grid_values_simd(
785    img: &crate::image::ImageView,
786    h: &Homography,
787    points: &[(f64, f64)],
788    intensities: &mut [f64],
789    n: usize,
790) -> bool {
791    let h00 = h.h[(0, 0)];
792    let h01 = h.h[(0, 1)];
793    let h02 = h.h[(0, 2)];
794    let h10 = h.h[(1, 0)];
795    let h11 = h.h[(1, 1)];
796    let h12 = h.h[(1, 2)];
797    let h20 = h.h[(2, 0)];
798    let h21 = h.h[(2, 1)];
799    let h22 = h.h[(2, 2)];
800
801    let w_limit = (img.width - 1) as f64;
802    let h_limit = (img.height - 1) as f64;
803
804    for (i, &p) in points.iter().take(n).enumerate() {
805        let wz = h20 * p.0 + h21 * p.1 + h22;
806        let img_x = (h00 * p.0 + h01 * p.1 + h02) / wz;
807        let img_y = (h10 * p.0 + h11 * p.1 + h12) / wz;
808
809        if img_x < 0.0 || img_x >= w_limit || img_y < 0.0 || img_y >= h_limit {
810            return false;
811        }
812
813        let xf = img_x.floor();
814        let yf = img_y.floor();
815        let ix = xf as usize;
816        let iy = yf as usize;
817        let dx = img_x - xf;
818        let dy = img_y - yf;
819
820        // SAFETY: Bounds checked above.
821        let val = unsafe {
822            let row0 = img.get_row_unchecked(iy);
823            let row1 = img.get_row_unchecked(iy + 1);
824            let v00 = f64::from(*row0.get_unchecked(ix));
825            let v10 = f64::from(*row0.get_unchecked(ix + 1));
826            let v01 = f64::from(*row1.get_unchecked(ix));
827            let v11 = f64::from(*row1.get_unchecked(ix + 1));
828
829            let top = v00 + dx * (v10 - v00);
830            let bot = v01 + dx * (v11 - v01);
831            top + dy * (bot - top)
832        };
833        intensities[i] = val;
834    }
835    true
836}
837
838/// Sample the bit grid from the image using the homography and decoder points.
839///
840/// Uses bilinear interpolation for sampling and a spatially adaptive threshold
841/// (based on min/max stats of the grid) to determine bit values.
842///
843/// # Parameters
844/// - `min_contrast`: Minimum contrast range for Otsu-based classification.
845///   Default is 20.0. Lower values (e.g., 10.0) improve recall on small/blurry tags.
846///
847/// This computes the intensities at sample points and the adaptive thresholds,
848/// then delegates to the strategy to produce the code.
849#[allow(clippy::cast_sign_loss, clippy::too_many_lines)]
850pub fn sample_grid_generic<S: crate::strategy::DecodingStrategy>(
851    img: &crate::image::ImageView,
852    homography: &Homography,
853    decoder: &(impl TagDecoder + ?Sized),
854) -> Option<S::Code> {
855    let points = decoder.sample_points();
856    // Stack-allocated buffer for up to 64 sample points (covers all standard tag families)
857    let mut intensities = [0.0f64; 64];
858    let n = points.len().min(64);
859
860    if !sample_grid_values_simd(img, homography, points, &mut intensities, n) {
861        return None;
862    }
863
864    // Quadrant-based Adaptive Thresholding
865    // Combines global Otsu (robust for bimodal) with local quadrant averages (robust for shadows)
866    let global_threshold = compute_otsu_threshold(&intensities[..n]);
867
868    let mut quad_sums = [0.0; 4];
869    let mut quad_counts = [0; 4];
870    for (i, &p) in points.iter().take(n).enumerate() {
871        let qi = if p.0 < 0.0 {
872            usize::from(p.1 >= 0.0)
873        } else {
874            2 + usize::from(p.1 >= 0.0)
875        };
876        quad_sums[qi] += intensities[i];
877        quad_counts[qi] += 1;
878    }
879
880    let mut thresholds = [0.0f64; 64];
881
882    for (i, &p) in points.iter().take(n).enumerate() {
883        let qi = if p.0 < 0.0 {
884            usize::from(p.1 >= 0.0)
885        } else {
886            2 + usize::from(p.1 >= 0.0)
887        };
888        let quad_avg = if quad_counts[qi] > 0 {
889            quad_sums[qi] / f64::from(quad_counts[qi])
890        } else {
891            global_threshold
892        };
893
894        // Blend global Otsu and local mean (0.7 / 0.3 weighting is common for fiducials)
895        let effective_threshold = 0.7 * global_threshold + 0.3 * quad_avg;
896        thresholds[i] = effective_threshold;
897    }
898
899    Some(S::from_intensities(&intensities[..n], &thresholds[..n]))
900}
901
902/// Sample the bit grid from the image (Legacy/Hard wrapper).
903#[allow(clippy::cast_sign_loss, clippy::too_many_lines)]
904pub fn sample_grid(
905    img: &crate::image::ImageView,
906    homography: &Homography,
907    decoder: &(impl TagDecoder + ?Sized),
908    _min_contrast: f64,
909) -> Option<u64> {
910    sample_grid_generic::<crate::strategy::HardStrategy>(img, homography, decoder)
911}
912
913/// A trait for decoding binary payloads from extracted tags.
914pub trait TagDecoder: Send + Sync {
915    /// Returns the name of the decoder family (e.g., "AprilTag36h11").
916    fn name(&self) -> &str;
917    /// Returns the dimension of the tag grid (e.g., 6 for 36h11).
918    fn dimension(&self) -> usize;
919    /// Returns the ideal sample points in canonical coordinates [-1, 1].
920    fn sample_points(&self) -> &[(f64, f64)];
921    /// Decodes the extracted bits into a tag ID, hamming distance, and rotation count.
922    ///
923    /// Returns `Some((id, hamming, rotation))` if decoding is successful, `None` otherwise.
924    /// `rotation` is 0-3, representing 90-degree CW increments.
925    fn decode(&self, bits: u64) -> Option<(u32, u32, u8)>; // (id, hamming, rotation)
926    /// Decodes with custom maximum hamming distance.
927    fn decode_full(&self, bits: u64, max_hamming: u32) -> Option<(u32, u32, u8)>;
928    /// Get the original code for a given ID (useful for testing/simulation).
929    fn get_code(&self, id: u16) -> Option<u64>;
930    /// Returns the total number of codes in the dictionary.
931    fn num_codes(&self) -> usize;
932    /// Returns all rotated versions of all codes in the dictionary: (bits, id, rotation)
933    fn rotated_codes(&self) -> &[(u64, u16, u8)];
934}
935
936/// Decoder for the AprilTag 36h11 family.
937pub struct AprilTag36h11;
938
939impl TagDecoder for AprilTag36h11 {
940    fn name(&self) -> &'static str {
941        "36h11"
942    }
943    fn dimension(&self) -> usize {
944        6
945    } // 6x6 grid of bits (excluding border)
946
947    fn sample_points(&self) -> &[(f64, f64)] {
948        &crate::dictionaries::APRILTAG_36H11.sample_points
949    }
950
951    fn decode(&self, bits: u64) -> Option<(u32, u32, u8)> {
952        // Use the full 587-code dictionary with O(1) exact match + hamming search
953        crate::dictionaries::APRILTAG_36H11
954            .decode(bits, 4) // Allow up to 4 bit errors for maximum recall
955            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
956    }
957
958    fn decode_full(&self, bits: u64, max_hamming: u32) -> Option<(u32, u32, u8)> {
959        crate::dictionaries::APRILTAG_36H11
960            .decode(bits, max_hamming)
961            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
962    }
963
964    fn get_code(&self, id: u16) -> Option<u64> {
965        crate::dictionaries::APRILTAG_36H11.get_code(id)
966    }
967
968    fn num_codes(&self) -> usize {
969        crate::dictionaries::APRILTAG_36H11.len()
970    }
971
972    fn rotated_codes(&self) -> &[(u64, u16, u8)] {
973        crate::dictionaries::APRILTAG_36H11.rotated_codes()
974    }
975}
976
977/// Decoder for the AprilTag 16h5 family.
978pub struct AprilTag16h5;
979
980impl TagDecoder for AprilTag16h5 {
981    fn name(&self) -> &'static str {
982        "16h5"
983    }
984    fn dimension(&self) -> usize {
985        4
986    } // 4x4 grid of bits (excluding border)
987
988    fn sample_points(&self) -> &[(f64, f64)] {
989        &crate::dictionaries::APRILTAG_16H5.sample_points
990    }
991
992    fn decode(&self, bits: u64) -> Option<(u32, u32, u8)> {
993        crate::dictionaries::APRILTAG_16H5
994            .decode(bits, 1) // Allow up to 1 bit error (16h5 has lower hamming distance)
995            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
996    }
997
998    fn decode_full(&self, bits: u64, max_hamming: u32) -> Option<(u32, u32, u8)> {
999        crate::dictionaries::APRILTAG_16H5
1000            .decode(bits, max_hamming)
1001            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1002    }
1003
1004    fn get_code(&self, id: u16) -> Option<u64> {
1005        crate::dictionaries::APRILTAG_16H5.get_code(id)
1006    }
1007
1008    fn num_codes(&self) -> usize {
1009        crate::dictionaries::APRILTAG_16H5.len()
1010    }
1011
1012    fn rotated_codes(&self) -> &[(u64, u16, u8)] {
1013        crate::dictionaries::APRILTAG_16H5.rotated_codes()
1014    }
1015}
1016
1017/// Decoder for the ArUco 4x4_50 family.
1018pub struct ArUco4x4_50;
1019
1020impl TagDecoder for ArUco4x4_50 {
1021    fn name(&self) -> &'static str {
1022        "4X4_50"
1023    }
1024    fn dimension(&self) -> usize {
1025        4
1026    }
1027
1028    fn sample_points(&self) -> &[(f64, f64)] {
1029        &crate::dictionaries::ARUCO_4X4_50.sample_points
1030    }
1031
1032    fn decode(&self, bits: u64) -> Option<(u32, u32, u8)> {
1033        crate::dictionaries::ARUCO_4X4_50
1034            .decode(bits, 1)
1035            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1036    }
1037
1038    fn decode_full(&self, bits: u64, max_hamming: u32) -> Option<(u32, u32, u8)> {
1039        crate::dictionaries::ARUCO_4X4_50
1040            .decode(bits, max_hamming)
1041            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1042    }
1043
1044    fn get_code(&self, id: u16) -> Option<u64> {
1045        crate::dictionaries::ARUCO_4X4_50.get_code(id)
1046    }
1047
1048    fn num_codes(&self) -> usize {
1049        crate::dictionaries::ARUCO_4X4_50.len()
1050    }
1051
1052    fn rotated_codes(&self) -> &[(u64, u16, u8)] {
1053        crate::dictionaries::ARUCO_4X4_50.rotated_codes()
1054    }
1055}
1056
1057/// Decoder for the ArUco 4x4_100 family.
1058pub struct ArUco4x4_100;
1059
1060impl TagDecoder for ArUco4x4_100 {
1061    fn name(&self) -> &'static str {
1062        "4X4_100"
1063    }
1064    fn dimension(&self) -> usize {
1065        4
1066    }
1067
1068    fn sample_points(&self) -> &[(f64, f64)] {
1069        &crate::dictionaries::ARUCO_4X4_100.sample_points
1070    }
1071
1072    fn decode(&self, bits: u64) -> Option<(u32, u32, u8)> {
1073        crate::dictionaries::ARUCO_4X4_100
1074            .decode(bits, 1)
1075            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1076    }
1077
1078    fn decode_full(&self, bits: u64, max_hamming: u32) -> Option<(u32, u32, u8)> {
1079        crate::dictionaries::ARUCO_4X4_100
1080            .decode(bits, max_hamming)
1081            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1082    }
1083
1084    fn get_code(&self, id: u16) -> Option<u64> {
1085        crate::dictionaries::ARUCO_4X4_100.get_code(id)
1086    }
1087
1088    fn num_codes(&self) -> usize {
1089        crate::dictionaries::ARUCO_4X4_100.len()
1090    }
1091
1092    fn rotated_codes(&self) -> &[(u64, u16, u8)] {
1093        crate::dictionaries::ARUCO_4X4_100.rotated_codes()
1094    }
1095}
1096
1097/// Generic decoder for any TagDictionary (static or custom).
1098pub struct GenericDecoder {
1099    dict: std::sync::Arc<crate::dictionaries::TagDictionary>,
1100}
1101
1102impl GenericDecoder {
1103    /// Create a new generic decoder from a dictionary.
1104    #[must_use]
1105    pub fn new(dict: crate::dictionaries::TagDictionary) -> Self {
1106        Self {
1107            dict: std::sync::Arc::new(dict),
1108        }
1109    }
1110}
1111
1112impl TagDecoder for GenericDecoder {
1113    fn name(&self) -> &str {
1114        &self.dict.name
1115    }
1116
1117    fn dimension(&self) -> usize {
1118        self.dict.dimension
1119    }
1120
1121    fn sample_points(&self) -> &[(f64, f64)] {
1122        &self.dict.sample_points
1123    }
1124
1125    fn decode(&self, bits: u64) -> Option<(u32, u32, u8)> {
1126        self.dict
1127            .decode(bits, self.dict.hamming_distance as u32)
1128            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1129    }
1130
1131    fn decode_full(&self, bits: u64, max_hamming: u32) -> Option<(u32, u32, u8)> {
1132        self.dict
1133            .decode(bits, max_hamming)
1134            .map(|(id, hamming, rot)| (u32::from(id), hamming, rot))
1135    }
1136
1137    fn get_code(&self, id: u16) -> Option<u64> {
1138        self.dict.get_code(id)
1139    }
1140
1141    fn num_codes(&self) -> usize {
1142        self.dict.len()
1143    }
1144
1145    fn rotated_codes(&self) -> &[(u64, u16, u8)] {
1146        self.dict.rotated_codes()
1147    }
1148}
1149
1150/// Convert a TagFamily enum to a boxed decoder instance.
1151#[must_use]
1152pub fn family_to_decoder(family: config::TagFamily) -> Box<dyn TagDecoder + Send + Sync> {
1153    match family {
1154        config::TagFamily::AprilTag36h11 => Box::new(AprilTag36h11),
1155        config::TagFamily::AprilTag16h5 => Box::new(AprilTag16h5),
1156        config::TagFamily::ArUco4x4_50 => Box::new(ArUco4x4_50),
1157        config::TagFamily::ArUco4x4_100 => Box::new(ArUco4x4_100),
1158    }
1159}
1160
1161#[cfg(test)]
1162#[allow(clippy::unwrap_used)]
1163mod tests {
1164    use super::*;
1165    use crate::dictionaries::rotate90;
1166    use proptest::prelude::*;
1167
1168    proptest! {
1169        #[test]
1170        fn test_rotation_invariants(bits in 0..u64::MAX) {
1171            let dim = 6;
1172            let r1 = rotate90(bits, dim);
1173            let r2 = rotate90(r1, dim);
1174            let r3 = rotate90(r2, dim);
1175            let r4 = rotate90(r3, dim);
1176
1177            // Mask to dim*dim bits to avoid noise in upper bits
1178            let mask = (1u64 << (dim * dim)) - 1;
1179            prop_assert_eq!(bits & mask, r4 & mask);
1180        }
1181
1182        #[test]
1183        fn test_hamming_robustness(
1184            id_idx in 0usize..10,
1185            rotation in 0..4usize,
1186            flip1 in 0..36usize,
1187            flip2 in 0..36usize
1188        ) {
1189            let decoder = AprilTag36h11;
1190            let dict = &*crate::dictionaries::APRILTAG_36H11;
1191            let orig_id = id_idx as u16;
1192            let orig_code = dict.get_code(orig_id).expect("valid ID");
1193
1194            // Apply rotation
1195            let mut test_bits = orig_code;
1196            for _ in 0..rotation {
1197                test_bits = rotate90(test_bits, 6);
1198            }
1199
1200            // Flip bits
1201            test_bits ^= 1 << flip1;
1202            test_bits ^= 1 << flip2;
1203
1204            let result = decoder.decode(test_bits);
1205            prop_assert!(result.is_some());
1206            let (decoded_id, _, _) = result.expect("Should decode valid pattern");
1207            prop_assert_eq!(decoded_id, u32::from(orig_id));
1208        }
1209
1210        #[test]
1211        fn test_false_positive_resistance(bits in 0..u64::MAX) {
1212            let decoder = AprilTag36h11;
1213            // Random bitstreams should rarely match any of the 587 codes
1214            if let Some((_id, hamming, _rot)) = decoder.decode(bits) {
1215                // If it decodes, it must have low hamming distance
1216                prop_assert!(hamming <= 4);
1217            }
1218        }
1219
1220        #[test]
1221        fn prop_homography_projection(
1222            src in prop::collection::vec((-100.0..100.0, -100.0..100.0), 4),
1223            dst in prop::collection::vec((0.0..1000.0, 0.0..1000.0), 4)
1224        ) {
1225            let src_pts = [
1226                [src[0].0, src[0].1],
1227                [src[1].0, src[1].1],
1228                [src[2].0, src[2].1],
1229                [src[3].0, src[3].1],
1230            ];
1231            let dst_pts = [
1232                [dst[0].0, dst[0].1],
1233                [dst[1].0, dst[1].1],
1234                [dst[2].0, dst[2].1],
1235                [dst[3].0, dst[3].1],
1236            ];
1237
1238            if let Some(h) = Homography::from_pairs(&src_pts, &dst_pts) {
1239                for i in 0..4 {
1240                    let p = h.project(src_pts[i]);
1241                    // Check for reasonable accuracy. 1e-4 is conservative for float precision
1242                    // issues in near-singular cases where from_pairs still returns Some.
1243                    prop_assert!((p[0] - dst_pts[i][0]).abs() < 1e-3,
1244                        "Point {}: project({:?}) -> {:?}, expected {:?}", i, src_pts[i], p, dst_pts[i]);
1245                    prop_assert!((p[1] - dst_pts[i][1]).abs() < 1e-3);
1246                }
1247            }
1248        }
1249    }
1250
1251    #[test]
1252    fn test_all_codes_decode() {
1253        let decoder = AprilTag36h11;
1254        for id in 0..587u16 {
1255            let code = crate::dictionaries::APRILTAG_36H11
1256                .get_code(id)
1257                .expect("valid ID");
1258            let result = decoder.decode(code);
1259            assert!(result.is_some());
1260            let (id_out, hamming_out, rot_out) = result.unwrap();
1261            assert_eq!(id_out, u32::from(id));
1262            assert_eq!(hamming_out, 0);
1263            assert_eq!(rot_out, 0);
1264        }
1265    }
1266
1267    #[test]
1268    fn test_grid_sampling() {
1269        let width = 64;
1270        let height = 64;
1271        let stride = 64;
1272        let mut data = vec![0u8; width * height];
1273
1274        // Draw a simulated 36h6 tag (6x6 bits)
1275        // Center at 32, 32. Tag size 36x36 pixels.
1276        // Each bit is 6x6 pixels.
1277        // We will set bit 0 (top-left) to 255 (white), others 0 (black).
1278        // Plus a white border for contrast calculation.
1279        // We need min/max to be different by > 20.
1280
1281        // Fill background with gray = 100
1282        data.fill(100);
1283
1284        // Set bit 0 region to 200 (White)
1285        // Canonical (-0.625, -0.625) -> Image coords.
1286        // Assume identity homography mapping:
1287        // -1 -> 14, +1 -> 50 (width 36 centered at 32)
1288        // scale = 18. offset = 32.
1289
1290        // Bit 0 center is -0.625.
1291        // Image x = 32 + 18 * -0.625 = 32 - 11.25 = 20.75
1292        // Let's paint a blob at 21, 21.
1293        for y in 18..24 {
1294            for x in 18..24 {
1295                data[y * width + x] = 200;
1296            }
1297        }
1298
1299        // Set another region (last bit) to 50 (Black) to ensure dynamic range
1300        // Last bit 35 is at (0.625, 0.625).
1301        // Image x = 32 + 18 * 0.625 = 43.25
1302        for y in 40..46 {
1303            for x in 40..46 {
1304                data[y * width + x] = 50;
1305            }
1306        }
1307
1308        let img = crate::image::ImageView::new(&data, width, height, stride).unwrap();
1309
1310        // Construct Homography that maps canonical [-1, 1] to image [14, 50]
1311        // This is a simple scaling matrix:
1312        // [ sx  0  tx ]
1313        // [ 0  sy  ty ]
1314        // [ 0   0   1 ]
1315        // sx = 18, tx = 32.
1316        let mut h = SMatrix::<f64, 3, 3>::identity();
1317        h[(0, 0)] = 18.0;
1318        h[(0, 2)] = 32.0;
1319        h[(1, 1)] = 18.0;
1320        h[(1, 2)] = 32.0;
1321        let homography = Homography { h };
1322
1323        let decoder = AprilTag36h11;
1324        let bits =
1325            sample_grid(&img, &homography, &decoder, 20.0).expect("Should sample successfully");
1326
1327        // bit 0 should be 1 (high intensity)
1328        assert_eq!(bits & 1, 1, "Bit 0 should be 1");
1329        // bit 35 should be 0 (low intensity)
1330        assert_eq!((bits >> 35) & 1, 0, "Bit 35 should be 0");
1331    }
1332
1333    #[test]
1334    fn test_homography_dlt() {
1335        let src = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]];
1336        let dst = [[10.0, 10.0], [20.0, 11.0], [19.0, 21.0], [9.0, 20.0]];
1337
1338        let h = Homography::from_pairs(&src, &dst).expect("DLT should succeed");
1339        for i in 0..4 {
1340            let p = h.project(src[i]);
1341            assert!((p[0] - dst[i][0]).abs() < 1e-6);
1342            assert!((p[1] - dst[i][1]).abs() < 1e-6);
1343        }
1344    }
1345
1346    // ========================================================================
1347    // END-TO-END DECODER ROBUSTNESS TESTS
1348    // ========================================================================
1349
1350    use crate::config::TagFamily;
1351    use crate::image::ImageView;
1352    use crate::quad::extract_quads_fast;
1353    use crate::segmentation::label_components_with_stats;
1354    use crate::test_utils::{TestImageParams, generate_test_image_with_params};
1355    use crate::threshold::ThresholdEngine;
1356    use bumpalo::Bump;
1357
1358    /// Run full pipeline from image to decoded tags.
1359    fn run_full_pipeline(tag_size: usize, canvas_size: usize, tag_id: u16) -> Vec<(u32, u32)> {
1360        let params = TestImageParams {
1361            family: TagFamily::AprilTag36h11,
1362            id: tag_id,
1363            tag_size,
1364            canvas_size,
1365            ..Default::default()
1366        };
1367
1368        let (data, _corners) = generate_test_image_with_params(&params);
1369        let img = ImageView::new(&data, canvas_size, canvas_size, canvas_size).unwrap();
1370
1371        let arena = Bump::new();
1372        let engine = ThresholdEngine::new();
1373        let stats = engine.compute_tile_stats(&arena, &img);
1374        let mut binary = vec![0u8; canvas_size * canvas_size];
1375        engine.apply_threshold(&arena, &img, &stats, &mut binary);
1376        let label_result =
1377            label_components_with_stats(&arena, &binary, canvas_size, canvas_size, true);
1378        let detections = extract_quads_fast(&arena, &img, &label_result);
1379
1380        let decoder = AprilTag36h11;
1381        let mut results = Vec::new();
1382
1383        for quad in &detections {
1384            let dst = [
1385                [quad.corners[0][0], quad.corners[0][1]],
1386                [quad.corners[1][0], quad.corners[1][1]],
1387                [quad.corners[2][0], quad.corners[2][1]],
1388                [quad.corners[3][0], quad.corners[3][1]],
1389            ];
1390
1391            if let Some(h) = Homography::square_to_quad(&dst)
1392                && let Some(bits) = sample_grid(&img, &h, &decoder, 20.0)
1393                && let Some((id, hamming, _rot)) = decoder.decode(bits)
1394            {
1395                results.push((id, hamming));
1396            }
1397        }
1398
1399        results
1400    }
1401
1402    /// Test E2E pipeline decodes correctly at varying sizes.
1403    #[test]
1404    fn test_e2e_decoding_at_varying_sizes() {
1405        let canvas_size = 640;
1406        let tag_sizes = [64, 100, 150, 200, 300];
1407        let test_id: u16 = 42;
1408
1409        for tag_size in tag_sizes {
1410            let decoded = run_full_pipeline(tag_size, canvas_size, test_id);
1411            let found = decoded.iter().any(|(id, _)| *id == u32::from(test_id));
1412
1413            if tag_size >= 64 {
1414                assert!(found, "Tag size {tag_size}: ID {test_id} not found");
1415            }
1416
1417            if found {
1418                let (_, hamming) = decoded
1419                    .iter()
1420                    .find(|(id, _)| *id == u32::from(test_id))
1421                    .unwrap();
1422                println!("Tag size {tag_size:>3}px: ID {test_id} with hamming {hamming}");
1423            }
1424        }
1425    }
1426
1427    /// Test that multiple tag IDs decode correctly.
1428    #[test]
1429    fn test_e2e_multiple_ids() {
1430        let canvas_size = 400;
1431        let tag_size = 150;
1432        let test_ids: [u16; 5] = [0, 42, 100, 200, 500];
1433
1434        for &test_id in &test_ids {
1435            let decoded = run_full_pipeline(tag_size, canvas_size, test_id);
1436            let found = decoded.iter().any(|(id, _)| *id == u32::from(test_id));
1437            assert!(found, "ID {test_id} not decoded");
1438
1439            let (_, hamming) = decoded
1440                .iter()
1441                .find(|(id, _)| *id == u32::from(test_id))
1442                .unwrap();
1443            assert_eq!(*hamming, 0, "ID {test_id} should have 0 hamming");
1444            println!("ID {test_id:>3}: Decoded with hamming {hamming}");
1445        }
1446    }
1447
1448    /// Test decoding with edge ID values.
1449    #[test]
1450    fn test_e2e_edge_ids() {
1451        let canvas_size = 400;
1452        let tag_size = 150;
1453        let edge_ids: [u16; 2] = [0, 586];
1454
1455        for &test_id in &edge_ids {
1456            let decoded = run_full_pipeline(tag_size, canvas_size, test_id);
1457            let found = decoded.iter().any(|(id, _)| *id == u32::from(test_id));
1458            assert!(found, "Edge ID {test_id} not decoded");
1459            println!("Edge ID {test_id}: Decoded");
1460        }
1461    }
1462}