Skip to main content

locus_core/
pose.rs

1//! 3D Pose Estimation (PnP) for fiducial markers.
2//!
3//! This module recovers the 6-DOF transformation between the camera and the tag.
4//! It supports:
5//! - **Fast Mode (IPPE)**: Infinitesimal Plane-Based Pose Estimation for low latency.
6//! - **Accurate Mode (Weighted LM)**: Iterative refinement weighted by sub-pixel uncertainty.
7
8#![allow(clippy::many_single_char_names, clippy::similar_names)]
9use crate::config::PoseEstimationMode;
10use crate::image::ImageView;
11use nalgebra::{Matrix3, Matrix6, Vector3, Vector6};
12
13/// Camera intrinsics parameters.
14#[derive(Debug, Clone, Copy, PartialEq)]
15#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
16pub struct CameraIntrinsics {
17    /// Focal length in x (pixels).
18    pub fx: f64,
19    /// Focal length in y (pixels).
20    pub fy: f64,
21    /// Principal point x (pixels).
22    pub cx: f64,
23    /// Principal point y (pixels).
24    pub cy: f64,
25}
26
27impl CameraIntrinsics {
28    /// Create new intrinsics.
29    #[must_use]
30    pub fn new(fx: f64, fy: f64, cx: f64, cy: f64) -> Self {
31        Self { fx, fy, cx, cy }
32    }
33
34    /// Convert to a 3x3 matrix.
35    #[must_use]
36    pub fn as_matrix(&self) -> Matrix3<f64> {
37        Matrix3::new(self.fx, 0.0, self.cx, 0.0, self.fy, self.cy, 0.0, 0.0, 1.0)
38    }
39
40    /// Get inverse matrix.
41    #[must_use]
42    pub fn inv_matrix(&self) -> Matrix3<f64> {
43        Matrix3::new(
44            1.0 / self.fx,
45            0.0,
46            -self.cx / self.fx,
47            0.0,
48            1.0 / self.fy,
49            -self.cy / self.fy,
50            0.0,
51            0.0,
52            1.0,
53        )
54    }
55}
56
57// 3D Pose Estimation using PnP (Perspective-n-Point).
58/// A 3D pose representing rotation and translation.
59#[derive(Debug, Clone, Copy)]
60#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
61pub struct Pose {
62    /// 3x3 Rotation matrix.
63    pub rotation: Matrix3<f64>,
64    /// 3x1 Translation vector.
65    pub translation: Vector3<f64>,
66}
67
68impl Pose {
69    /// Create a new pose.
70    #[must_use]
71    pub fn new(rotation: Matrix3<f64>, translation: Vector3<f64>) -> Self {
72        Self {
73            rotation,
74            translation,
75        }
76    }
77
78    /// Project a 3D point into the image using this pose and intrinsics.
79    #[must_use]
80    pub fn project(&self, point: &Vector3<f64>, intrinsics: &CameraIntrinsics) -> [f64; 2] {
81        let p_cam = self.rotation * point + self.translation;
82        let x = (p_cam.x / p_cam.z) * intrinsics.fx + intrinsics.cx;
83        let y = (p_cam.y / p_cam.z) * intrinsics.fy + intrinsics.cy;
84        [x, y]
85    }
86}
87
88/// Estimate pose from tag detection using homography decomposition and refinement.
89///
90/// # Arguments
91/// * `intrinsics` - Camera intrinsics.
92/// * `corners` - Detected corners in image coordinates [[x, y]; 4].
93/// * `tag_size` - Physical size of the tag in world units (e.g., meters).
94/// * `img` - Optional image view (required for Accurate mode).
95/// * `mode` - Pose estimation mode (Fast vs Accurate).
96///
97/// # Returns
98/// A tuple containing:
99/// * `Option<Pose>`: The estimated pose (if successful).
100/// * `Option<[[f64; 6]; 6]>`: The estimated covariance matrix (if Accurate mode enabled).
101///
102/// # Panics
103/// Panics if SVD decomposition fails during orthogonalization (extremely rare).
104#[must_use]
105#[allow(clippy::missing_panics_doc)]
106pub fn estimate_tag_pose(
107    intrinsics: &CameraIntrinsics,
108    corners: &[[f64; 2]; 4],
109    tag_size: f64,
110    img: Option<&ImageView>,
111    mode: PoseEstimationMode,
112) -> (Option<Pose>, Option<[[f64; 6]; 6]>) {
113    // 1. Canonical Homography: Map canonical square [-1,1]x[-1,1] to image pixels.
114    let Some(h_poly) = crate::decoder::Homography::square_to_quad(corners) else {
115        return (None, None);
116    };
117    let h_pixel = h_poly.h;
118
119    // 2. Normalize Homography: H_norm = K_inv * H_pixel
120    let k_inv = intrinsics.inv_matrix();
121    let h_norm = k_inv * h_pixel;
122
123    // 3. Scale to Physical Model
124    let scaler = 2.0 / tag_size;
125    let mut h_metric = h_norm;
126    h_metric.column_mut(0).scale_mut(scaler);
127    h_metric.column_mut(1).scale_mut(scaler);
128
129    // 4. IPPE Core: Decompose Jacobian (first 2 image cols) into 2 potential poses.
130    let Some(candidates) = solve_ippe_square(&h_metric) else {
131        return (None, None);
132    };
133
134    // 5. Disambiguation: Choose the pose with lower reprojection error.
135    let best_pose = find_best_pose(intrinsics, corners, tag_size, &candidates);
136
137    // 6. Refinement: Levenberg-Marquardt (LM)
138    match (mode, img) {
139        (PoseEstimationMode::Accurate, Some(image)) => {
140            // Compute corner uncertainty from structure tensor
141            let uncertainty =
142                crate::pose_weighted::compute_framework_uncertainty(image, corners, &h_poly);
143            let (refined_pose, covariance) = crate::pose_weighted::refine_pose_lm_weighted(
144                intrinsics,
145                corners,
146                tag_size,
147                best_pose,
148                &uncertainty,
149            );
150            (Some(refined_pose), Some(covariance))
151        },
152        _ => {
153            // Fast mode (Identity weights)
154            (
155                Some(refine_pose_lm(intrinsics, corners, tag_size, best_pose)),
156                None,
157            )
158        },
159    }
160}
161
162/// Solves the IPPE-Square problem.
163/// Returns two possible poses ($R_a, t$) and ($R_b, t$) corresponding to the two minima of the PnP error function.
164///
165/// This uses an analytical approach derived from the homography Jacobian's SVD.
166/// The second solution handles the "Necker reversal" ambiguity inherent in planar pose estimation.
167fn solve_ippe_square(h: &Matrix3<f64>) -> Option<[Pose; 2]> {
168    // IpPE-Square Analytical Solution (Zero Alloc)
169    // Jacobian J = [h1, h2]
170    let h1 = h.column(0);
171    let h2 = h.column(1);
172
173    // 1. Compute B = J^T J (2x2 symmetric matrix)
174    //    [ a  c ]
175    //    [ c  b ]
176    let a = h1.dot(&h1);
177    let b = h2.dot(&h2);
178    let c = h1.dot(&h2);
179
180    // 2. Eigen Analysis of 2x2 Matrix B
181    //    Characteristic eq: lambda^2 - Tr(B)lambda + Det(B) = 0
182    let trace = a + b;
183    let det = a * b - c * c;
184    let delta = (trace * trace - 4.0 * det).max(0.0).sqrt();
185
186    // lambda1 >= lambda2
187    let s1_sq = (trace + delta) * 0.5;
188    let s2_sq = (trace - delta) * 0.5;
189
190    // Singular values sigma = sqrt(lambda)
191    let s1 = s1_sq.sqrt();
192    let s2 = s2_sq.sqrt();
193
194    // Check for Frontal View (Degeneracy: s1 ~= s2)
195    // We use a safe threshold relative to the max singular value.
196    if (s1 - s2).abs() < 1e-4 * s1 {
197        // Degenerate Case: Frontal View (J columns orthogonal & equal length)
198        // R = Gram-Schmidt orthonormalization of [h1, h2, h1xh2]
199
200        let mut r1 = h1.clone_owned();
201        let scale = 1.0 / r1.norm();
202        r1 *= scale;
203
204        // Orthogonalize r2 w.r.t r1
205        let mut r2 = h2 - r1 * (h2.dot(&r1));
206        r2 = r2.normalize();
207
208        let r3 = r1.cross(&r2);
209        let rot = Matrix3::from_columns(&[r1, r2, r3]);
210
211        // Translation: t = h3 * scale.
212        // Gamma (homography scale) is recovered from J.
213        // gamma * R = J => gamma = ||h1|| (roughly).
214        // We use average of singular values for robustness.
215        let gamma = (s1 + s2) * 0.5;
216        if gamma < 1e-8 {
217            return None;
218        } // Avoid div/0
219        let tz = 1.0 / gamma;
220        let t = h.column(2) * tz;
221
222        let pose = Pose::new(rot, t);
223        return Some([pose, pose]);
224    }
225
226    // 3. Recover Rotation A (Primary)
227    // We basically want R such that J ~ R * S_prj.
228    // Standard approach: R = U * V^T where J = U S V^T.
229    //
230    // Analytical 3x2 SVD reconstruction:
231    // U = [u1, u2], V = [v1, v2]
232    // U_i = J * v_i / s_i
233
234    // Eigenvectors of B (columns of V)
235    // For 2x2 matrix [a c; c b]:
236    // If c != 0:
237    //   v1 = [s1^2 - b, c], normalized
238    // else:
239    //   v1 = [1, 0] if a > b else [0, 1]
240
241    let v1 = if c.abs() > 1e-8 {
242        let v = nalgebra::Vector2::new(s1_sq - b, c);
243        v.normalize()
244    } else if a >= b {
245        nalgebra::Vector2::new(1.0, 0.0)
246    } else {
247        nalgebra::Vector2::new(0.0, 1.0)
248    };
249
250    // v2 is orthogonal to v1. For 2D, [-v1.y, v1.x]
251    let v2 = nalgebra::Vector2::new(-v1.y, v1.x);
252
253    // Compute Left Singular Vectors u1, u2 inside the 3D space
254    // u1 = J * v1 / s1
255    // u2 = J * v2 / s2
256    let j_v1 = h1 * v1.x + h2 * v1.y;
257    let j_v2 = h1 * v2.x + h2 * v2.y;
258
259    // Safe division check
260    if s1 < 1e-8 {
261        return None;
262    }
263    let u1 = j_v1 / s1;
264    let u2 = j_v2 / s2.max(1e-8); // Avoid div/0 for s2
265
266    // Reconstruct Rotation A
267    // R = U * V^T (extended to 3x3)
268    // The columns of R are r1, r2, r3.
269    // In SVD terms:
270    // [r1 r2] = [u1 u2] * [v1 v2]^T
271    // r1 = u1 * v1.x + u2 * v2.x
272    // r2 = u1 * v1.y + u2 * v2.y
273    let r1_a = u1 * v1.x + u2 * v2.x;
274    let r2_a = u1 * v1.y + u2 * v2.y;
275    let r3_a = r1_a.cross(&r2_a);
276    let rot_a = Matrix3::from_columns(&[r1_a, r2_a, r3_a]);
277
278    // Translation A
279    let gamma = (s1 + s2) * 0.5;
280    let tz = 1.0 / gamma;
281    let t_a = h.column(2) * tz;
282    let pose_a = Pose::new(rot_a, t_a);
283
284    // 4. Recover Rotation B (Second Solution)
285    // Necker Reversal: Reflect normal across line of sight.
286    // n_b = [-nx, -ny, nz] (approx).
287    // Better analytical dual from IPPE:
288    // The second solution corresponds to rotating U's second column?
289    //
290    // Let's stick to the robust normal reflection method which works well.
291    let n_a = rot_a.column(2);
292    // Safe normalize for n_b
293    let n_b_raw = Vector3::new(-n_a.x, -n_a.y, n_a.z);
294    let n_b = if n_b_raw.norm_squared() > 1e-8 {
295        n_b_raw.normalize()
296    } else {
297        // Fallback (should be impossible for unit vector n_a)
298        Vector3::z_axis().into_inner()
299    };
300
301    // Construct R_b using Gram-Schmidt from (h1, n_b)
302    // x_axis projection is h1 (roughly).
303    // x_b = (h1 - (h1.n)n).normalize
304    let h1_norm = h1.normalize();
305    let x_b_raw = h1_norm - n_b * h1_norm.dot(&n_b);
306    let x_b = if x_b_raw.norm_squared() > 1e-8 {
307        x_b_raw.normalize()
308    } else {
309        // Fallback: if h1 is parallel to n_b (degenerate), pick any orthogonal
310        let tangent = if n_b.x.abs() > 0.9 {
311            Vector3::y_axis().into_inner()
312        } else {
313            Vector3::x_axis().into_inner()
314        };
315        tangent.cross(&n_b).normalize()
316    };
317
318    let y_b = n_b.cross(&x_b);
319    let rot_b = Matrix3::from_columns(&[x_b, y_b, n_b]);
320    let pose_b = Pose::new(rot_b, t_a);
321
322    Some([pose_a, pose_b])
323}
324
325/// Use Levenberg-Marquardt to refine the pose by minimizing reprojection error.
326fn refine_pose_lm(
327    intrinsics: &CameraIntrinsics,
328    corners: &[[f64; 2]; 4],
329    tag_size: f64,
330    initial_pose: Pose,
331) -> Pose {
332    let mut pose = initial_pose;
333    let s = tag_size * 0.5;
334    let obj_pts = [
335        Vector3::new(-s, -s, 0.0),
336        Vector3::new(s, -s, 0.0),
337        Vector3::new(s, s, 0.0),
338        Vector3::new(-s, s, 0.0),
339    ];
340
341    // Damping factor lambda
342    let mut lambda = 0.01;
343    let mut current_err = reprojection_error(intrinsics, corners, &obj_pts, &pose);
344
345    // 3-5 iterations is usually enough for "Polish"
346    // Reduced to 4 for lower latency while maintaining accuracy.
347    for _ in 0..4 {
348        // Build Jacobian J (8x6) and residual r (8x1)
349        // 4 points * 2 coords = 8 residuals.
350        // 6 params (3 rot (lie algebra), 3 trans).
351        // J^T J (6x6) and J^T r (6x1).
352
353        // For "Zero Overhead" we construct JtJ directly accumulated.
354        let mut jtj = Matrix6::<f64>::zeros();
355        let mut jtr = Vector6::<f64>::zeros();
356
357        for i in 0..4 {
358            let p_world = obj_pts[i];
359            let p_cam = pose.rotation * p_world + pose.translation;
360            let z_inv = 1.0 / p_cam.z;
361            let z_inv2 = z_inv * z_inv;
362
363            // Project: u = fx * x/z + cx
364            let u_est = intrinsics.fx * p_cam.x * z_inv + intrinsics.cx;
365            let v_est = intrinsics.fy * p_cam.y * z_inv + intrinsics.cy;
366
367            let res_u = corners[i][0] - u_est;
368            let res_v = corners[i][1] - v_est;
369
370            // Jacobian of projection wrt Camera Point (du/dP_cam) (2x3)
371            // [ fx/z   0     -fx*x/z^2 ]
372            // [ 0      fy/z  -fy*y/z^2 ]
373            let du_dp = Vector3::new(
374                intrinsics.fx * z_inv,
375                0.0,
376                -intrinsics.fx * p_cam.x * z_inv2,
377            );
378            let dv_dp = Vector3::new(
379                0.0,
380                intrinsics.fy * z_inv,
381                -intrinsics.fy * p_cam.y * z_inv2,
382            );
383
384            // Jacobian of Camera Point wrt Pose Update (Lie Algebra) (3x6)
385            // d(exp(w)*P)/d(xi) at xi=0
386            // [ I  |  -[P]_x ]
387            // [ 1 0 0   0   z  -y ]
388            // [ 0 1 0  -z   0   x ]
389            // [ 0 0 1   y  -x   0 ]
390            //
391            // We compose du/dP * dP/dxi -> 1x6 row.
392
393            let mut row_u = Vector6::zeros();
394            // Translation part (du/dp * I)
395            row_u[0] = du_dp[0];
396            row_u[1] = du_dp[1];
397            row_u[2] = du_dp[2];
398            // Rotation part (du/dp * Skew(P))
399            // Skew(P) = [0 -z y; z 0 -x; -y x 0]
400            // du_dp * col0(S) = du_dp.y * z - du_dp.z * y
401            row_u[3] = du_dp[1] * p_cam.z - du_dp[2] * p_cam.y;
402            row_u[4] = du_dp[2] * p_cam.x - du_dp[0] * p_cam.z;
403            row_u[5] = du_dp[0] * p_cam.y - du_dp[1] * p_cam.x;
404
405            let mut row_v = Vector6::zeros();
406            row_v[0] = dv_dp[0];
407            row_v[1] = dv_dp[1];
408            row_v[2] = dv_dp[2];
409            row_v[3] = dv_dp[1] * p_cam.z - dv_dp[2] * p_cam.y;
410            row_v[4] = dv_dp[2] * p_cam.x - dv_dp[0] * p_cam.z;
411            row_v[5] = dv_dp[0] * p_cam.y - dv_dp[1] * p_cam.x;
412
413            // Accumulate JtJ and Jtr
414            // JtJ += row^T * row
415            jtj += row_u * row_u.transpose();
416            jtj += row_v * row_v.transpose();
417
418            jtr += row_u * res_u;
419            jtr += row_v * res_v;
420        }
421
422        // Dampen: (JtJ + lambda*I) delta = Jtr
423        for k in 0..6 {
424            jtj[(k, k)] += lambda; // Levenberg
425        }
426
427        // Solve
428        let decomposition = jtj.cholesky();
429        let delta = if let Some(chol) = decomposition {
430            chol.solve(&jtr)
431        } else {
432            // Ill-conditioned, increase lambda and skip
433            lambda *= 10.0;
434            continue;
435        };
436
437        // Update Pose
438        let update_twist = Vector3::new(delta[3], delta[4], delta[5]);
439        let update_trans = Vector3::new(delta[0], delta[1], delta[2]);
440
441        // Exp map for rotation update
442        let update_rot = nalgebra::Rotation3::new(update_twist).matrix().into_owned();
443
444        let new_rot = update_rot * pose.rotation;
445        let new_trans = update_rot * pose.translation + update_trans;
446        let new_pose = Pose::new(new_rot, new_trans);
447
448        let new_err = reprojection_error(intrinsics, corners, &obj_pts, &new_pose);
449
450        if new_err < current_err {
451            pose = new_pose;
452            current_err = new_err;
453            lambda *= 0.1;
454        } else {
455            lambda *= 10.0;
456        }
457
458        if delta.norm() < 1e-6 {
459            break;
460        }
461    }
462
463    pose
464}
465
466fn reprojection_error(
467    intrinsics: &CameraIntrinsics,
468    corners: &[[f64; 2]; 4],
469    obj_pts: &[Vector3<f64>; 4],
470    pose: &Pose,
471) -> f64 {
472    let mut err_sq = 0.0;
473    for i in 0..4 {
474        let p = pose.project(&obj_pts[i], intrinsics);
475        err_sq += (p[0] - corners[i][0]).powi(2) + (p[1] - corners[i][1]).powi(2);
476    }
477    err_sq
478}
479
480fn find_best_pose(
481    intrinsics: &CameraIntrinsics,
482    corners: &[[f64; 2]; 4],
483    tag_size: f64,
484    candidates: &[Pose; 2],
485) -> Pose {
486    // Need physical points for reprojection
487    let s = tag_size * 0.5;
488    let obj_pts = [
489        Vector3::new(-s, -s, 0.0),
490        Vector3::new(s, -s, 0.0),
491        Vector3::new(s, s, 0.0),
492        Vector3::new(-s, s, 0.0),
493    ];
494
495    let err0 = reprojection_error(intrinsics, corners, &obj_pts, &candidates[0]);
496    let err1 = reprojection_error(intrinsics, corners, &obj_pts, &candidates[1]);
497
498    // Choose the candidate with lower reprojection error.
499    if err1 < err0 {
500        candidates[1]
501    } else {
502        candidates[0]
503    }
504}
505
506#[cfg(test)]
507mod tests {
508    use super::*;
509    use proptest::prelude::*;
510
511    #[test]
512    fn test_pose_projection() {
513        let intrinsics = CameraIntrinsics::new(500.0, 500.0, 320.0, 240.0);
514        let rotation = Matrix3::identity();
515        let translation = Vector3::new(0.0, 0.0, 2.0); // 2 meters away
516        let pose = Pose::new(rotation, translation);
517
518        let p_world = Vector3::new(0.1, 0.1, 0.0);
519        let p_img = pose.project(&p_world, &intrinsics);
520
521        // x = (0.1 / 2.0) * 500 + 320 = 0.05 * 500 + 320 = 25 + 320 = 345
522        // y = (0.1 / 2.0) * 500 + 240 = 265
523        assert!((p_img[0] - 345.0).abs() < 1e-6);
524        assert!((p_img[1] - 265.0).abs() < 1e-6);
525    }
526
527    #[test]
528    fn test_perfect_pose_estimation() {
529        let intrinsics = CameraIntrinsics::new(800.0, 800.0, 400.0, 300.0);
530        let gt_rot = Matrix3::identity(); // Facing straight
531        let gt_t = Vector3::new(0.1, -0.2, 1.5);
532        let gt_pose = Pose::new(gt_rot, gt_t);
533
534        let tag_size = 0.16; // 16cm
535        let s = tag_size * 0.5;
536        let obj_pts = [
537            Vector3::new(-s, -s, 0.0),
538            Vector3::new(s, -s, 0.0),
539            Vector3::new(s, s, 0.0),
540            Vector3::new(-s, s, 0.0),
541        ];
542
543        let mut img_pts = [[0.0, 0.0]; 4];
544        for i in 0..4 {
545            img_pts[i] = gt_pose.project(&obj_pts[i], &intrinsics);
546        }
547
548        let (est_pose, _) = estimate_tag_pose(
549            &intrinsics,
550            &img_pts,
551            tag_size,
552            None,
553            PoseEstimationMode::Fast,
554        );
555        let est_pose = est_pose.expect("Pose estimation failed");
556
557        // Check translation
558        assert!((est_pose.translation.x - gt_t.x).abs() < 1e-3);
559        assert!((est_pose.translation.y - gt_t.y).abs() < 1e-3);
560        assert!((est_pose.translation.z - gt_t.z).abs() < 1e-3);
561
562        // Check rotation (identity)
563        let diff_rot = est_pose.rotation - gt_rot;
564        assert!(diff_rot.norm() < 1e-3);
565    }
566
567    proptest! {
568        #[test]
569        fn prop_intrinsics_inversion(
570            fx in 100.0..2000.0f64,
571            fy in 100.0..2000.0f64,
572            cx in 0.0..1000.0f64,
573            cy in 0.0..1000.0f64
574        ) {
575            let intrinsics = CameraIntrinsics::new(fx, fy, cx, cy);
576            let k = intrinsics.as_matrix();
577            let k_inv = intrinsics.inv_matrix();
578            let identity = k * k_inv;
579
580            let expected = Matrix3::<f64>::identity();
581            for i in 0..3 {
582                for j in 0..3 {
583                    prop_assert!((identity[(i, j)] - expected[(i, j)]).abs() < 1e-9);
584                }
585            }
586        }
587
588        #[test]
589        fn prop_pose_recovery_stability(
590            tx in -0.5..0.5f64,
591            ty in -0.5..0.5f64,
592            tz in 0.5..5.0f64, // Tag must be in front of camera
593            roll in -0.5..0.5f64,
594            pitch in -0.5..0.5f64,
595            yaw in -0.5..0.5f64,
596            noise in 0.0..0.1f64 // pixels of noise
597        ) {
598            let intrinsics = CameraIntrinsics::new(800.0, 800.0, 400.0, 300.0);
599            let translation = Vector3::new(tx, ty, tz);
600
601            // Create rotation from Euler angles using Rotation3
602            let r_obj = nalgebra::Rotation3::from_euler_angles(roll, pitch, yaw);
603            let rotation = r_obj.matrix().into_owned();
604            let gt_pose = Pose::new(rotation, translation);
605
606            let tag_size = 0.16;
607            let s = tag_size * 0.5;
608            let obj_pts = [
609                Vector3::new(-s, -s, 0.0),
610                Vector3::new(s, -s, 0.0),
611                Vector3::new(s, s, 0.0),
612                Vector3::new(-s, s, 0.0),
613            ];
614
615            let mut img_pts = [[0.0, 0.0]; 4];
616            for i in 0..4 {
617                let p = gt_pose.project(&obj_pts[i], &intrinsics);
618                // Add tiny bit of noise
619                img_pts[i] = [p[0] + noise, p[1] + noise];
620            }
621
622            if let (Some(est_pose), _) = estimate_tag_pose(&intrinsics, &img_pts, tag_size, None, PoseEstimationMode::Fast) {
623                // Check if recovered pose is reasonably close
624                // Note: noise decreases accuracy, so we use a loose threshold
625                let t_err = (est_pose.translation - translation).norm();
626                prop_assert!(t_err < 0.1 + noise * 0.1, "Translation error {} too high for noise {}", t_err, noise);
627
628                let r_err = (est_pose.rotation - rotation).norm();
629                prop_assert!(r_err < 0.1 + noise * 0.1, "Rotation error {} too high for noise {}", r_err, noise);
630            }
631        }
632    }
633}