Skip to main content

locus_core/
pose.rs

1//! 3D Pose Estimation (PnP) for fiducial markers.
2//!
3//! This module recovers the 6-DOF transformation between the camera and the tag.
4//! It supports:
5//! - **Fast Mode (IPPE)**: Infinitesimal Plane-Based Pose Estimation for low latency.
6//! - **Accurate Mode (Weighted LM)**: Iterative refinement weighted by sub-pixel uncertainty.
7
8#![allow(clippy::many_single_char_names, clippy::similar_names)]
9use crate::batch::{DetectionBatch, Pose6D};
10use crate::config::PoseEstimationMode;
11use crate::image::ImageView;
12use nalgebra::{Matrix3, Matrix6, UnitQuaternion, Vector3, Vector6};
13
14/// Camera intrinsics parameters.
15#[derive(Debug, Clone, Copy, PartialEq)]
16#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
17pub struct CameraIntrinsics {
18    /// Focal length in x (pixels).
19    pub fx: f64,
20    /// Focal length in y (pixels).
21    pub fy: f64,
22    /// Principal point x (pixels).
23    pub cx: f64,
24    /// Principal point y (pixels).
25    pub cy: f64,
26}
27
28impl CameraIntrinsics {
29    /// Create new intrinsics.
30    #[must_use]
31    pub fn new(fx: f64, fy: f64, cx: f64, cy: f64) -> Self {
32        Self { fx, fy, cx, cy }
33    }
34
35    /// Convert to a 3x3 matrix.
36    #[must_use]
37    pub fn as_matrix(&self) -> Matrix3<f64> {
38        Matrix3::new(self.fx, 0.0, self.cx, 0.0, self.fy, self.cy, 0.0, 0.0, 1.0)
39    }
40
41    /// Get inverse matrix.
42    #[must_use]
43    pub fn inv_matrix(&self) -> Matrix3<f64> {
44        Matrix3::new(
45            1.0 / self.fx,
46            0.0,
47            -self.cx / self.fx,
48            0.0,
49            1.0 / self.fy,
50            -self.cy / self.fy,
51            0.0,
52            0.0,
53            1.0,
54        )
55    }
56}
57
58// 3D Pose Estimation using PnP (Perspective-n-Point).
59/// A 3D pose representing rotation and translation.
60#[derive(Debug, Clone, Copy)]
61#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
62pub struct Pose {
63    /// 3x3 Rotation matrix.
64    pub rotation: Matrix3<f64>,
65    /// 3x1 Translation vector.
66    pub translation: Vector3<f64>,
67}
68
69impl Pose {
70    /// Create a new pose.
71    #[must_use]
72    pub fn new(rotation: Matrix3<f64>, translation: Vector3<f64>) -> Self {
73        Self {
74            rotation,
75            translation,
76        }
77    }
78
79    /// Project a 3D point into the image using this pose and intrinsics.
80    #[must_use]
81    pub fn project(&self, point: &Vector3<f64>, intrinsics: &CameraIntrinsics) -> [f64; 2] {
82        let p_cam = self.rotation * point + self.translation;
83        let x = (p_cam.x / p_cam.z) * intrinsics.fx + intrinsics.cx;
84        let y = (p_cam.y / p_cam.z) * intrinsics.fy + intrinsics.cy;
85        [x, y]
86    }
87}
88
89/// Estimate pose from tag detection using homography decomposition and refinement.
90///
91/// # Arguments
92/// * `intrinsics` - Camera intrinsics.
93/// * `corners` - Detected corners in image coordinates [[x, y]; 4].
94/// * `tag_size` - Physical size of the tag in world units (e.g., meters).
95/// * `img` - Optional image view (required for Accurate mode).
96/// * `mode` - Pose estimation mode (Fast vs Accurate).
97///
98/// # Returns
99/// A tuple containing:
100/// * `Option<Pose>`: The estimated pose (if successful).
101/// * `Option<[[f64; 6]; 6]>`: The estimated covariance matrix (if Accurate mode enabled).
102///
103/// # Panics
104/// Panics if SVD decomposition fails during orthogonalization (extremely rare).
105#[must_use]
106#[allow(clippy::missing_panics_doc)]
107#[tracing::instrument(skip_all, name = "pipeline::estimate_tag_pose")]
108pub fn estimate_tag_pose(
109    intrinsics: &CameraIntrinsics,
110    corners: &[[f64; 2]; 4],
111    tag_size: f64,
112    img: Option<&ImageView>,
113    mode: PoseEstimationMode,
114) -> (Option<Pose>, Option<[[f64; 6]; 6]>) {
115    estimate_tag_pose_with_config(
116        intrinsics,
117        corners,
118        tag_size,
119        img,
120        mode,
121        &crate::config::DetectorConfig::default(),
122    )
123}
124
125/// Estimate pose with explicit configuration for tuning parameters.
126#[must_use]
127#[allow(clippy::missing_panics_doc)]
128#[tracing::instrument(skip_all, name = "pipeline::estimate_tag_pose")]
129pub fn estimate_tag_pose_with_config(
130    intrinsics: &CameraIntrinsics,
131    corners: &[[f64; 2]; 4],
132    tag_size: f64,
133    img: Option<&ImageView>,
134    mode: PoseEstimationMode,
135    config: &crate::config::DetectorConfig,
136) -> (Option<Pose>, Option<[[f64; 6]; 6]>) {
137    // 1. Canonical Homography: Map canonical square [-1,1]x[-1,1] to image pixels.
138    let Some(h_poly) = crate::decoder::Homography::square_to_quad(corners) else {
139        return (None, None);
140    };
141    let h_pixel = h_poly.h;
142
143    // 2. Normalize Homography: H_norm = K_inv * H_pixel
144    let k_inv = intrinsics.inv_matrix();
145    let h_norm = k_inv * h_pixel;
146
147    // 3. Scale to Physical Model (Modern OpenCV 4.6+ Convention: Top-Left Origin)
148    // Legacy mapping: xc = x_obj * (2/s), yc = y_obj * (2/s)
149    // Modern mapping: xc = (2/s)*x_obj - 1, yc = (2/s)*y_obj - 1
150    // P_cam = H_norm * [ (2/s)*x_obj - 1, (2/s)*y_obj - 1, 1 ]^T
151    // P_cam = [ (2/s)*h1, (2/s)*h2, h3 - h1 - h2 ] * [x_obj, y_obj, 1 ]^T
152    let scaler = 2.0 / tag_size;
153    let mut h_metric = h_norm;
154    let h1 = h_norm.column(0).into_owned();
155    let h2 = h_norm.column(1).into_owned();
156    h_metric.column_mut(0).scale_mut(scaler);
157    h_metric.column_mut(1).scale_mut(scaler);
158    h_metric
159        .column_mut(2)
160        .copy_from(&(h_norm.column(2) - h1 - h2));
161
162    // 4. IPPE Core: Decompose Jacobian (first 2 image cols) into 2 potential poses.
163    let Some(candidates) = solve_ippe_square(&h_metric) else {
164        return (None, None);
165    };
166
167    // 5. Disambiguation: Choose the pose with lower reprojection error.
168    let best_pose = find_best_pose(intrinsics, corners, tag_size, &candidates);
169
170    // 6. Refinement: Levenberg-Marquardt (LM)
171    match (mode, img) {
172        (PoseEstimationMode::Accurate, Some(image)) => {
173            // Compute corner uncertainty from structure tensor
174            let uncertainty = crate::pose_weighted::compute_framework_uncertainty(
175                image,
176                corners,
177                &h_poly,
178                config.tikhonov_alpha_max,
179                config.sigma_n_sq,
180                config.structure_tensor_radius,
181            );
182            let (refined_pose, covariance) = crate::pose_weighted::refine_pose_lm_weighted(
183                intrinsics,
184                corners,
185                tag_size,
186                best_pose,
187                &uncertainty,
188            );
189            (Some(refined_pose), Some(covariance))
190        },
191        _ => {
192            // Fast mode (Identity weights)
193            (
194                Some(refine_pose_lm(
195                    intrinsics,
196                    corners,
197                    tag_size,
198                    best_pose,
199                    config.huber_delta_px,
200                )),
201                None,
202            )
203        },
204    }
205}
206
207/// Solves the IPPE-Square problem.
208/// Returns two possible poses ($R_a, t$) and ($R_b, t$) corresponding to the two minima of the PnP error function.
209///
210/// This uses an analytical approach derived from the homography Jacobian's SVD.
211/// The second solution handles the "Necker reversal" ambiguity inherent in planar pose estimation.
212fn solve_ippe_square(h: &Matrix3<f64>) -> Option<[Pose; 2]> {
213    // IpPE-Square Analytical Solution (Zero Alloc)
214    // Jacobian J = [h1, h2]
215    let h1 = h.column(0);
216    let h2 = h.column(1);
217
218    // 1. Compute B = J^T J (2x2 symmetric matrix)
219    //    [ a  c ]
220    //    [ c  b ]
221    let a = h1.dot(&h1);
222    let b = h2.dot(&h2);
223    let c = h1.dot(&h2);
224
225    // 2. Eigen Analysis of 2x2 Matrix B
226    //    Characteristic eq: lambda^2 - Tr(B)lambda + Det(B) = 0
227    let trace = a + b;
228    let det = a * b - c * c;
229    let delta = (trace * trace - 4.0 * det).max(0.0).sqrt();
230
231    // lambda1 >= lambda2
232    let s1_sq = (trace + delta) * 0.5;
233    let s2_sq = (trace - delta) * 0.5;
234
235    // Singular values sigma = sqrt(lambda)
236    let s1 = s1_sq.sqrt();
237    let s2 = s2_sq.sqrt();
238
239    // Check for Frontal View (Degeneracy: s1 ~= s2)
240    // We use a safe threshold relative to the max singular value.
241    if (s1 - s2).abs() < 1e-4 * s1 {
242        // Degenerate Case: Frontal View (J columns orthogonal & equal length)
243        // R = Gram-Schmidt orthonormalization of [h1, h2, h1xh2]
244
245        let mut r1 = h1.clone_owned();
246        let scale = 1.0 / r1.norm();
247        r1 *= scale;
248
249        // Orthogonalize r2 w.r.t r1
250        let mut r2 = h2 - r1 * (h2.dot(&r1));
251        r2 = r2.normalize();
252
253        let r3 = r1.cross(&r2);
254        let rot = Matrix3::from_columns(&[r1, r2, r3]);
255
256        // Translation: t = h3 * scale.
257        // Gamma (homography scale) is recovered from J.
258        // gamma * R = J => gamma = ||h1|| (roughly).
259        // We use average of singular values for robustness.
260        let gamma = (s1 + s2) * 0.5;
261        if gamma < 1e-8 {
262            return None;
263        } // Avoid div/0
264        let tz = 1.0 / gamma;
265        let t = h.column(2) * tz;
266
267        let pose = Pose::new(rot, t);
268        return Some([pose, pose]);
269    }
270
271    // 3. Recover Rotation A (Primary)
272    // We basically want R such that J ~ R * S_prj.
273    // Standard approach: R = U * V^T where J = U S V^T.
274    //
275    // Analytical 3x2 SVD reconstruction:
276    // U = [u1, u2], V = [v1, v2]
277    // U_i = J * v_i / s_i
278
279    // Eigenvectors of B (columns of V)
280    // For 2x2 matrix [a c; c b]:
281    // If c != 0:
282    //   v1 = [s1^2 - b, c], normalized
283    // else:
284    //   v1 = [1, 0] if a > b else [0, 1]
285
286    let v1 = if c.abs() > 1e-8 {
287        let v = nalgebra::Vector2::new(s1_sq - b, c);
288        v.normalize()
289    } else if a >= b {
290        nalgebra::Vector2::new(1.0, 0.0)
291    } else {
292        nalgebra::Vector2::new(0.0, 1.0)
293    };
294
295    // v2 is orthogonal to v1. For 2D, [-v1.y, v1.x]
296    let v2 = nalgebra::Vector2::new(-v1.y, v1.x);
297
298    // Compute Left Singular Vectors u1, u2 inside the 3D space
299    // u1 = J * v1 / s1
300    // u2 = J * v2 / s2
301    let j_v1 = h1 * v1.x + h2 * v1.y;
302    let j_v2 = h1 * v2.x + h2 * v2.y;
303
304    // Safe division check
305    if s1 < 1e-8 {
306        return None;
307    }
308    let u1 = j_v1 / s1;
309    let u2 = j_v2 / s2.max(1e-8); // Avoid div/0 for s2
310
311    // Reconstruct Rotation A
312    // R = U * V^T (extended to 3x3)
313    // The columns of R are r1, r2, r3.
314    // In SVD terms:
315    // [r1 r2] = [u1 u2] * [v1 v2]^T
316    // r1 = u1 * v1.x + u2 * v2.x
317    // r2 = u1 * v1.y + u2 * v2.y
318    let r1_a = u1 * v1.x + u2 * v2.x;
319    let r2_a = u1 * v1.y + u2 * v2.y;
320    let r3_a = r1_a.cross(&r2_a);
321    let rot_a = Matrix3::from_columns(&[r1_a, r2_a, r3_a]);
322
323    // Translation A
324    let gamma = (s1 + s2) * 0.5;
325    let tz = 1.0 / gamma;
326    let t_a = h.column(2) * tz;
327    let pose_a = Pose::new(rot_a, t_a);
328
329    // 4. Recover Rotation B (Second Solution)
330    // Necker Reversal: Reflect normal across line of sight.
331    // n_b = [-nx, -ny, nz] (approx).
332    // Better analytical dual from IPPE:
333    // The second solution corresponds to rotating U's second column?
334    //
335    // Let's stick to the robust normal reflection method which works well.
336    let n_a = rot_a.column(2);
337    // Safe normalize for n_b
338    let n_b_raw = Vector3::new(-n_a.x, -n_a.y, n_a.z);
339    let n_b = if n_b_raw.norm_squared() > 1e-8 {
340        n_b_raw.normalize()
341    } else {
342        // Fallback (should be impossible for unit vector n_a)
343        Vector3::z_axis().into_inner()
344    };
345
346    // Construct R_b using Gram-Schmidt from (h1, n_b)
347    // x_axis projection is h1 (roughly).
348    // x_b = (h1 - (h1.n)n).normalize
349    let h1_norm = h1.normalize();
350    let x_b_raw = h1_norm - n_b * h1_norm.dot(&n_b);
351    let x_b = if x_b_raw.norm_squared() > 1e-8 {
352        x_b_raw.normalize()
353    } else {
354        // Fallback: if h1 is parallel to n_b (degenerate), pick any orthogonal
355        let tangent = if n_b.x.abs() > 0.9 {
356            Vector3::y_axis().into_inner()
357        } else {
358            Vector3::x_axis().into_inner()
359        };
360        tangent.cross(&n_b).normalize()
361    };
362
363    let y_b = n_b.cross(&x_b);
364    let rot_b = Matrix3::from_columns(&[x_b, y_b, n_b]);
365    let pose_b = Pose::new(rot_b, t_a);
366
367    Some([pose_a, pose_b])
368}
369
370/// Use a Manifold-Aware Trust-Region Levenberg-Marquardt solver to refine the pose.
371///
372/// This upgrades the classic LM recipe to a SOTA production solver with three key improvements:
373///
374/// 1. **Huber M-Estimator (IRLS):** Wraps the reprojection residual in a Huber loss to
375///    dynamically down-weight corners with large errors (e.g. from motion blur or occlusion),
376///    preventing a single bad corner from corrupting the entire solution.
377///
378/// 2. **Marquardt Diagonal Scaling:** Damps each parameter proportionally to its own curvature
379///    via `D = diag(J^T W J)`, instead of a uniform identity matrix. This correctly handles the
380///    scale mismatch between rotational (radians) and translational (meters) parameters.
381///
382/// 3. **Nielsen's Gain Ratio Control:** Evaluates step quality using the ratio of actual vs.
383///    predicted cost reduction. Good steps shrink `lambda` aggressively (Gauss-Newton speed),
384///    while bad steps grow it with doubling-`nu` backoff (gradient descent safety). This is
385///    strictly superior to the heuristic `lambda *= 10 / 0.1` approach.
386#[allow(clippy::too_many_lines)]
387fn refine_pose_lm(
388    intrinsics: &CameraIntrinsics,
389    corners: &[[f64; 2]; 4],
390    tag_size: f64,
391    initial_pose: Pose,
392    huber_delta_px: f64,
393) -> Pose {
394    let huber_delta = huber_delta_px;
395
396    let mut pose = initial_pose;
397    let s = tag_size;
398    // Modern OpenCV 4.6+ Convention: Origin at Top-Left, CW winding
399    let obj_pts = [
400        Vector3::new(0.0, 0.0, 0.0), // 0: Top-Left
401        Vector3::new(s, 0.0, 0.0),   // 1: Top-Right
402        Vector3::new(s, s, 0.0),     // 2: Bottom-Right
403        Vector3::new(0.0, s, 0.0),   // 3: Bottom-Left
404    ];
405
406    // Nielsen's trust-region state. Start with small damping to encourage Gauss-Newton steps.
407    let mut lambda = 1e-3_f64;
408    let mut nu = 2.0_f64;
409    let mut current_cost = huber_cost(intrinsics, corners, &obj_pts, &pose, huber_delta);
410
411    // Allow up to 20 iterations; typically exits in 3-6 via the convergence gates below.
412    for _ in 0..20 {
413        // Build J^T W J (6x6) and J^T W r (6x1) with Huber IRLS weights.
414        // Layout: delta = [tx, ty, tz, rx, ry, rz] (translation then rotation in se(3)).
415        let mut jtj = Matrix6::<f64>::zeros();
416        let mut jtr = Vector6::<f64>::zeros();
417
418        for i in 0..4 {
419            let p_world = obj_pts[i];
420            let p_cam = pose.rotation * p_world + pose.translation;
421            let z_inv = 1.0 / p_cam.z;
422            let z_inv2 = z_inv * z_inv;
423
424            let u_est = intrinsics.fx * p_cam.x * z_inv + intrinsics.cx;
425            let v_est = intrinsics.fy * p_cam.y * z_inv + intrinsics.cy;
426
427            let res_u = corners[i][0] - u_est;
428            let res_v = corners[i][1] - v_est;
429
430            // Huber IRLS weight: w=1 inside the trust region, w=delta/r outside.
431            // Applied per 2D point (geometric pixel distance).
432            let r_norm = (res_u * res_u + res_v * res_v).sqrt();
433            let w = if r_norm <= huber_delta {
434                1.0
435            } else {
436                huber_delta / r_norm
437            };
438
439            // Jacobian of projection wrt Camera Point (du/dP_cam) (2x3):
440            // [ fx/z   0     -fx*x/z^2 ]
441            // [ 0      fy/z  -fy*y/z^2 ]
442            let du_dp = Vector3::new(
443                intrinsics.fx * z_inv,
444                0.0,
445                -intrinsics.fx * p_cam.x * z_inv2,
446            );
447            let dv_dp = Vector3::new(
448                0.0,
449                intrinsics.fy * z_inv,
450                -intrinsics.fy * p_cam.y * z_inv2,
451            );
452
453            // Jacobian of Camera Point wrt Pose Update (Lie Algebra) (3x6):
454            // d(exp(w)*P)/d(xi) at xi=0 = [ I | -[P]_x ]
455            let mut row_u = Vector6::zeros();
456            row_u[0] = du_dp[0];
457            row_u[1] = du_dp[1];
458            row_u[2] = du_dp[2];
459            row_u[3] = du_dp[1] * p_cam.z - du_dp[2] * p_cam.y;
460            row_u[4] = du_dp[2] * p_cam.x - du_dp[0] * p_cam.z;
461            row_u[5] = du_dp[0] * p_cam.y - du_dp[1] * p_cam.x;
462
463            let mut row_v = Vector6::zeros();
464            row_v[0] = dv_dp[0];
465            row_v[1] = dv_dp[1];
466            row_v[2] = dv_dp[2];
467            row_v[3] = dv_dp[1] * p_cam.z - dv_dp[2] * p_cam.y;
468            row_v[4] = dv_dp[2] * p_cam.x - dv_dp[0] * p_cam.z;
469            row_v[5] = dv_dp[0] * p_cam.y - dv_dp[1] * p_cam.x;
470
471            // Accumulate J^T W J and J^T W r with Huber weight.
472            jtj += w * (row_u * row_u.transpose() + row_v * row_v.transpose());
473            jtr += w * (row_u * res_u + row_v * res_v);
474        }
475
476        // Gate 1: Gradient convergence — solver is at a stationary point.
477        if jtr.amax() < 1e-8 {
478            break;
479        }
480
481        // Marquardt diagonal scaling: D = diag(J^T W J).
482        // Damps each DOF proportionally to its own curvature, correcting for the
483        // scale mismatch between rotational and translational gradient magnitudes.
484        let d_diag = Vector6::new(
485            jtj[(0, 0)].max(1e-8),
486            jtj[(1, 1)].max(1e-8),
487            jtj[(2, 2)].max(1e-8),
488            jtj[(3, 3)].max(1e-8),
489            jtj[(4, 4)].max(1e-8),
490            jtj[(5, 5)].max(1e-8),
491        );
492
493        // Solve (J^T W J + lambda * D) delta = J^T W r
494        let mut jtj_damped = jtj;
495        for k in 0..6 {
496            jtj_damped[(k, k)] += lambda * d_diag[k];
497        }
498
499        let delta = if let Some(chol) = jtj_damped.cholesky() {
500            chol.solve(&jtr)
501        } else {
502            // System is ill-conditioned; increase damping and retry.
503            lambda *= 10.0;
504            nu = 2.0;
505            continue;
506        };
507
508        // Nielsen's gain ratio: rho = actual_reduction / predicted_reduction.
509        // Predicted reduction from the quadratic model (Madsen et al. eq 3.9):
510        // L(0) - L(delta) = 0.5 * delta^T (lambda * D * delta + J^T W r)
511        let predicted_reduction = 0.5 * delta.dot(&(lambda * d_diag.component_mul(&delta) + jtr));
512
513        // Evaluate new pose via SE(3) exponential map (manifold-safe update).
514        let twist = Vector3::new(delta[3], delta[4], delta[5]);
515        let trans_update = Vector3::new(delta[0], delta[1], delta[2]);
516        let rot_update = nalgebra::Rotation3::new(twist).matrix().into_owned();
517        let new_pose = Pose::new(
518            rot_update * pose.rotation,
519            rot_update * pose.translation + trans_update,
520        );
521
522        let new_cost = huber_cost(intrinsics, corners, &obj_pts, &new_pose, huber_delta);
523        let actual_reduction = current_cost - new_cost;
524
525        let rho = if predicted_reduction > 1e-12 {
526            actual_reduction / predicted_reduction
527        } else {
528            0.0
529        };
530
531        if rho > 0.0 {
532            // Accept step: update state and shrink lambda toward Gauss-Newton regime.
533            pose = new_pose;
534            current_cost = new_cost;
535            // Nielsen's update rule: lambda scales by max(1/3, 1 - (2*rho - 1)^3).
536            lambda *= (1.0 - (2.0 * rho - 1.0).powi(3)).max(1.0 / 3.0);
537            nu = 2.0;
538
539            // Gate 2: Step size convergence — pose has stopped moving.
540            if delta.norm() < 1e-7 {
541                break;
542            }
543        } else {
544            // Reject step: grow lambda with doubling backoff to stay within trust region.
545            lambda *= nu;
546            nu *= 2.0;
547        }
548    }
549
550    pose
551}
552
553/// Computes the total Huber robust cost over all four corner reprojection residuals.
554///
555/// The Huber function is quadratic for `|r| <= delta` (L2 regime) and linear beyond
556/// (L1 regime), providing continuous differentiability at the transition point.
557fn huber_cost(
558    intrinsics: &CameraIntrinsics,
559    corners: &[[f64; 2]; 4],
560    obj_pts: &[Vector3<f64>; 4],
561    pose: &Pose,
562    delta: f64,
563) -> f64 {
564    let mut cost = 0.0;
565    for i in 0..4 {
566        let p = pose.project(&obj_pts[i], intrinsics);
567        let r_u = corners[i][0] - p[0];
568        let r_v = corners[i][1] - p[1];
569        // Huber on the 2D geometric distance, consistent with the IRLS weight computation.
570        let r_norm = (r_u * r_u + r_v * r_v).sqrt();
571        if r_norm <= delta {
572            cost += 0.5 * r_norm * r_norm;
573        } else {
574            cost += delta * (r_norm - 0.5 * delta);
575        }
576    }
577    cost
578}
579
580fn reprojection_error(
581    intrinsics: &CameraIntrinsics,
582    corners: &[[f64; 2]; 4],
583    obj_pts: &[Vector3<f64>; 4],
584    pose: &Pose,
585) -> f64 {
586    let mut err_sq = 0.0;
587    for i in 0..4 {
588        let p = pose.project(&obj_pts[i], intrinsics);
589        err_sq += (p[0] - corners[i][0]).powi(2) + (p[1] - corners[i][1]).powi(2);
590    }
591    err_sq
592}
593
594fn find_best_pose(
595    intrinsics: &CameraIntrinsics,
596    corners: &[[f64; 2]; 4],
597    tag_size: f64,
598    candidates: &[Pose; 2],
599) -> Pose {
600    // Need physical points for reprojection (Modern OpenCV 4.6+ Convention: Top-Left origin)
601    let s = tag_size;
602    let obj_pts = [
603        Vector3::new(0.0, 0.0, 0.0), // 0: Top-Left
604        Vector3::new(s, 0.0, 0.0),   // 1: Top-Right
605        Vector3::new(s, s, 0.0),     // 2: Bottom-Right
606        Vector3::new(0.0, s, 0.0),   // 3: Bottom-Left
607    ];
608
609    let err0 = reprojection_error(intrinsics, corners, &obj_pts, &candidates[0]);
610    let err1 = reprojection_error(intrinsics, corners, &obj_pts, &candidates[1]);
611
612    // Choose the candidate with lower reprojection error.
613    if err1 < err0 {
614        candidates[1]
615    } else {
616        candidates[0]
617    }
618}
619
620/// Refine poses for all valid candidates in the batch using the Structure of Arrays (SoA) layout.
621///
622/// This function operates only on the first `v` candidates in the batch, which must have been
623/// partitioned such that all valid candidates are in the range `[0..v]`.
624#[tracing::instrument(skip_all, name = "pipeline::pose_refinement")]
625pub fn refine_poses_soa(
626    batch: &mut DetectionBatch,
627    v: usize,
628    intrinsics: &CameraIntrinsics,
629    tag_size: f64,
630    img: Option<&ImageView>,
631    mode: PoseEstimationMode,
632) {
633    refine_poses_soa_with_config(
634        batch,
635        v,
636        intrinsics,
637        tag_size,
638        img,
639        mode,
640        &crate::config::DetectorConfig::default(),
641    );
642}
643
644/// Refine poses for all valid candidates with explicit config for tuning parameters.
645#[tracing::instrument(skip_all, name = "pipeline::pose_refinement")]
646pub fn refine_poses_soa_with_config(
647    batch: &mut DetectionBatch,
648    v: usize,
649    intrinsics: &CameraIntrinsics,
650    tag_size: f64,
651    img: Option<&ImageView>,
652    mode: PoseEstimationMode,
653    config: &crate::config::DetectorConfig,
654) {
655    use rayon::prelude::*;
656
657    // Process valid candidates in parallel.
658    // We collect into a temporary Vec to avoid unsafe parallel writes to the batch.
659    let poses: Vec<_> = (0..v)
660        .into_par_iter()
661        .map(|i| {
662            let corners = [
663                [
664                    f64::from(batch.corners[i][0].x),
665                    f64::from(batch.corners[i][0].y),
666                ],
667                [
668                    f64::from(batch.corners[i][1].x),
669                    f64::from(batch.corners[i][1].y),
670                ],
671                [
672                    f64::from(batch.corners[i][2].x),
673                    f64::from(batch.corners[i][2].y),
674                ],
675                [
676                    f64::from(batch.corners[i][3].x),
677                    f64::from(batch.corners[i][3].y),
678                ],
679            ];
680
681            let (pose_opt, _) =
682                estimate_tag_pose_with_config(intrinsics, &corners, tag_size, img, mode, config);
683
684            if let Some(pose) = pose_opt {
685                let q = UnitQuaternion::from_matrix(&pose.rotation);
686                let t = pose.translation;
687
688                // Data layout: [tx, ty, tz, qx, qy, qz, qw]
689                let mut data = [0.0f32; 7];
690                data[0] = t.x as f32;
691                data[1] = t.y as f32;
692                data[2] = t.z as f32;
693                data[3] = q.coords.x as f32;
694                data[4] = q.coords.y as f32;
695                data[5] = q.coords.z as f32;
696                data[6] = q.coords.w as f32;
697                Some(data)
698            } else {
699                None
700            }
701        })
702        .collect();
703
704    for (i, pose_data) in poses.into_iter().enumerate() {
705        if let Some(data) = pose_data {
706            batch.poses[i] = Pose6D { data, padding: 0.0 };
707        } else {
708            batch.poses[i] = Pose6D {
709                data: [0.0; 7],
710                padding: 0.0,
711            };
712        }
713    }
714}
715
716#[cfg(test)]
717mod tests {
718    use super::*;
719    use proptest::prelude::*;
720
721    #[test]
722    fn test_pose_projection() {
723        let intrinsics = CameraIntrinsics::new(500.0, 500.0, 320.0, 240.0);
724        let rotation = Matrix3::identity();
725        let translation = Vector3::new(0.0, 0.0, 2.0); // 2 meters away
726        let pose = Pose::new(rotation, translation);
727
728        let p_world = Vector3::new(0.1, 0.1, 0.0);
729        let p_img = pose.project(&p_world, &intrinsics);
730
731        // x = (0.1 / 2.0) * 500 + 320 = 0.05 * 500 + 320 = 25 + 320 = 345
732        // y = (0.1 / 2.0) * 500 + 240 = 265
733        assert!((p_img[0] - 345.0).abs() < 1e-6);
734        assert!((p_img[1] - 265.0).abs() < 1e-6);
735    }
736
737    #[test]
738    fn test_perfect_pose_estimation() {
739        let intrinsics = CameraIntrinsics::new(800.0, 800.0, 400.0, 300.0);
740        let gt_rot = Matrix3::identity(); // Facing straight
741        let gt_t = Vector3::new(0.1, -0.2, 1.5);
742        let gt_pose = Pose::new(gt_rot, gt_t);
743
744        let tag_size = 0.16; // 16cm
745        let s = tag_size;
746        // Modern OpenCV 4.6+ Convention: Top-Left origin
747        let obj_pts = [
748            Vector3::new(0.0, 0.0, 0.0), // 0: Top-Left
749            Vector3::new(s, 0.0, 0.0),   // 1: Top-Right
750            Vector3::new(s, s, 0.0),     // 2: Bottom-Right
751            Vector3::new(0.0, s, 0.0),   // 3: Bottom-Left
752        ];
753
754        let mut img_pts = [[0.0, 0.0]; 4];
755        for i in 0..4 {
756            img_pts[i] = gt_pose.project(&obj_pts[i], &intrinsics);
757        }
758
759        let (est_pose, _) = estimate_tag_pose(
760            &intrinsics,
761            &img_pts,
762            tag_size,
763            None,
764            PoseEstimationMode::Fast,
765        );
766        let est_pose = est_pose.expect("Pose estimation failed");
767
768        // Check translation
769        assert!((est_pose.translation.x - gt_t.x).abs() < 1e-3);
770        assert!((est_pose.translation.y - gt_t.y).abs() < 1e-3);
771        assert!((est_pose.translation.z - gt_t.z).abs() < 1e-3);
772
773        // Check rotation (identity)
774        let diff_rot = est_pose.rotation - gt_rot;
775        assert!(diff_rot.norm() < 1e-3);
776    }
777
778    proptest! {
779        #[test]
780        fn prop_intrinsics_inversion(
781            fx in 100.0..2000.0f64,
782            fy in 100.0..2000.0f64,
783            cx in 0.0..1000.0f64,
784            cy in 0.0..1000.0f64
785        ) {
786            let intrinsics = CameraIntrinsics::new(fx, fy, cx, cy);
787            let k = intrinsics.as_matrix();
788            let k_inv = intrinsics.inv_matrix();
789            let identity = k * k_inv;
790
791            let expected = Matrix3::<f64>::identity();
792            for i in 0..3 {
793                for j in 0..3 {
794                    prop_assert!((identity[(i, j)] - expected[(i, j)]).abs() < 1e-9);
795                }
796            }
797        }
798
799        #[test]
800        fn prop_pose_recovery_stability(
801            tx in -0.5..0.5f64,
802            ty in -0.5..0.5f64,
803            tz in 0.5..5.0f64, // Tag must be in front of camera
804            roll in -0.5..0.5f64,
805            pitch in -0.5..0.5f64,
806            yaw in -0.5..0.5f64,
807            noise in 0.0..0.1f64 // pixels of noise
808        ) {
809            let intrinsics = CameraIntrinsics::new(800.0, 800.0, 400.0, 300.0);
810            let translation = Vector3::new(tx, ty, tz);
811
812            // Create rotation from Euler angles using Rotation3
813            let r_obj = nalgebra::Rotation3::from_euler_angles(roll, pitch, yaw);
814            let rotation = r_obj.matrix().into_owned();
815            let gt_pose = Pose::new(rotation, translation);
816
817            let tag_size = 0.16;
818            let s = tag_size;
819            // Modern OpenCV 4.6+ Convention: Top-Left origin
820            let obj_pts = [
821                Vector3::new(0.0, 0.0, 0.0), // 0: Top-Left
822                Vector3::new(s, 0.0, 0.0),   // 1: Top-Right
823                Vector3::new(s, s, 0.0),     // 2: Bottom-Right
824                Vector3::new(0.0, s, 0.0),   // 3: Bottom-Left
825            ];
826
827            let mut img_pts = [[0.0, 0.0]; 4];
828            for i in 0..4 {
829                let p = gt_pose.project(&obj_pts[i], &intrinsics);
830                // Add tiny bit of noise
831                img_pts[i] = [p[0] + noise, p[1] + noise];
832            }
833
834            if let (Some(est_pose), _) = estimate_tag_pose(&intrinsics, &img_pts, tag_size, None, PoseEstimationMode::Fast) {
835                // Check if recovered pose is reasonably close
836                // Note: noise decreases accuracy, so we use a loose threshold
837                let t_err = (est_pose.translation - translation).norm();
838                prop_assert!(t_err < 0.1 + noise * 0.1, "Translation error {} too high for noise {}", t_err, noise);
839
840                let r_err = (est_pose.rotation - rotation).norm();
841                prop_assert!(r_err < 0.1 + noise * 0.1, "Rotation error {} too high for noise {}", r_err, noise);
842            }
843        }
844    }
845}