Skip to main content

vision_calibration_optim/problems/
handeye.rs

1//! Hand-eye calibration for robot-mounted cameras.
2//!
3//! Default optimization state (fixed target):
4//! - per-camera intrinsics and distortion
5//! - per-camera extrinsics (camera-to-rig)
6//! - hand-eye transform (mode-dependent; see `HandEyeInit::handeye`)
7//! - fixed target pose (mode-dependent; see `HandEyeInit::target_poses`)
8//! - optional per-view robot pose corrections delta_i in se(3), with zero-mean priors
9//!
10//! Transform chain (eye-in-hand):
11//! `T_C_T_i = (T_B_E_i * X)^-1 * Y` where `X` is hand-eye and `Y` is target in base.
12//! With robot refinement: `T_B_E_i_corr = exp(delta_i) * T_B_E_i` (left-multiply).
13//!
14//! Residuals:
15//! - reprojection errors for observed target points using the robot pose chain
16//! - optional priors on delta_i (anisotropic rotation/translation sigmas)
17//! - when delta_i are enabled, delta_0 is fixed to zero to remove gauge freedom
18//!
19//! Legacy mode can relax per-view target poses, but that is discouraged for a
20//! physically fixed target because it weakens hand-eye observability.
21
22use crate::Error;
23use crate::backend::{BackendKind, BackendSolveOptions, SolveReport, solve_with_backend};
24use crate::ir::{
25    FactorKind, FixedMask, HandEyeMode, ManifoldKind, ProblemIR, ResidualBlock, RobustLoss,
26};
27use crate::params::distortion::{DISTORTION_DIM, pack_distortion, unpack_distortion};
28use crate::params::intrinsics::{INTRINSICS_DIM, pack_intrinsics, unpack_intrinsics};
29use crate::params::pose_se3::iso3_to_se3_dvec;
30use anyhow::ensure;
31type AnyhowResult<T> = anyhow::Result<T>;
32use nalgebra::DVector;
33use serde::{Deserialize, Serialize};
34use std::collections::HashMap;
35use vision_calibration_core::{
36    CameraFixMask, Iso3, PinholeCamera, Real, RigDataset, RigView, make_pinhole_camera,
37};
38
39/// Per-view robot metadata required by hand-eye calibration.
40#[derive(Debug, Clone, Serialize, Deserialize)]
41pub struct RobotPoseMeta {
42    /// Gripper pose expressed in the robot base frame (T_B_G).
43    #[serde(alias = "robot_pose")]
44    pub base_se3_gripper: Iso3,
45}
46
47/// Dataset wrapper for hand-eye optimization.
48#[derive(Debug, Clone, Serialize, Deserialize)]
49pub struct HandEyeDataset {
50    /// Multi-camera observations with robot metadata.
51    pub data: RigDataset<RobotPoseMeta>,
52    /// Hand-eye mode controlling transform chain.
53    pub mode: HandEyeMode,
54}
55
56impl HandEyeDataset {
57    /// Create dataset from views.
58    pub fn new(
59        views: Vec<RigView<RobotPoseMeta>>,
60        num_cameras: usize,
61        mode: HandEyeMode,
62    ) -> AnyhowResult<Self> {
63        ensure!(!views.is_empty(), "need at least one view");
64        for (idx, view) in views.iter().enumerate() {
65            ensure!(
66                view.obs.cameras.len() == num_cameras,
67                "view {} has {} cameras, expected {}",
68                idx,
69                view.obs.cameras.len(),
70                num_cameras
71            );
72        }
73        Ok(Self {
74            data: RigDataset { views, num_cameras },
75            mode,
76        })
77    }
78
79    /// Number of views.
80    pub fn num_views(&self) -> usize {
81        self.data.views.len()
82    }
83}
84
85/// Initial values for hand-eye calibration.
86///
87/// - `cam_to_rig`: camera pose in the rig frame (`T_C_R`)
88/// - `handeye`:
89///   - `EyeInHand`: gripper pose in the rig frame (`T_G_R`)
90///   - `EyeToHand`: rig pose in the base frame (`T_R_B`)
91/// - `target_poses`: fixed target pose(s) depending on the selected mode
92#[derive(Debug, Clone, Serialize, Deserialize)]
93pub struct HandEyeParams {
94    /// Per-camera intrinsics and distortion.
95    pub cameras: Vec<PinholeCamera>,
96    /// Per-camera extrinsics (camera-to-rig transforms).
97    pub cam_to_rig: Vec<Iso3>,
98    /// Hand-eye transform (mode-dependent).
99    ///
100    /// - `EyeInHand`: `handeye = gripper_from_rig` (`T_G_R`)
101    /// - `EyeToHand`: `handeye = rig_from_base` (`T_R_B`)
102    pub handeye: Iso3,
103    /// Calibration target poses.
104    ///
105    /// - Default (fixed target):
106    ///   - `EyeInHand`: the first pose is used as the initial `base_from_target` (`T_B_T`)
107    ///   - `EyeToHand`: the first pose is used as the initial `gripper_from_target` (`T_G_T`)
108    /// - Legacy (`relax_target_poses = true`): one pose per view is required.
109    pub target_poses: Vec<Iso3>,
110}
111
112/// Solve options for hand-eye calibration.
113#[derive(Debug, Clone, Serialize, Deserialize)]
114pub struct HandEyeSolveOptions {
115    /// Robust loss applied to reprojection residuals.
116    pub robust_loss: RobustLoss,
117
118    /// Default mask for fixing camera parameters (applied to all cameras).
119    pub default_fix: CameraFixMask,
120    /// Optional per-camera overrides (None = use default_fix).
121    pub camera_overrides: Vec<Option<CameraFixMask>>,
122    /// Per-camera extrinsics masking (length must match num_cameras).
123    pub fix_extrinsics: Vec<bool>,
124    /// Fix hand-eye transform (for testing with known hand-eye).
125    pub fix_handeye: bool,
126    /// View indices to fix (legacy per-view target mode only).
127    pub fix_target_poses: Vec<usize>,
128    /// Legacy mode: relax per-view target poses instead of a fixed target.
129    pub relax_target_poses: bool,
130    /// Refine robot poses with per-view se(3) corrections and strong priors.
131    ///
132    /// When enabled, delta_0 is fixed to zero for gauge consistency.
133    pub refine_robot_poses: bool,
134    /// Robot rotation prior sigma (radians).
135    pub robot_rot_sigma: Real,
136    /// Robot translation prior sigma (meters).
137    pub robot_trans_sigma: Real,
138}
139
140impl Default for HandEyeSolveOptions {
141    fn default() -> Self {
142        Self {
143            robust_loss: RobustLoss::None,
144            default_fix: CameraFixMask::default(), // k3 fixed by default
145            camera_overrides: Vec::new(),
146            fix_extrinsics: Vec::new(),
147            fix_handeye: false,
148            fix_target_poses: Vec::new(),
149            relax_target_poses: false,
150            refine_robot_poses: false,
151            robot_rot_sigma: std::f64::consts::PI / 360.0, // 0.5 deg
152            robot_trans_sigma: 1.0e-3,                     // 1 mm
153        }
154    }
155}
156
157/// Result of hand-eye calibration.
158#[derive(Debug, Clone, Serialize, Deserialize)]
159pub struct HandEyeEstimate {
160    /// Refined hand-eye parameters.
161    pub params: HandEyeParams,
162    /// Backend solve report.
163    pub report: SolveReport,
164    /// Optional per-view robot pose deltas (se(3) tangent, `[rx, ry, rz, tx, ty, tz]`).
165    pub robot_deltas: Option<Vec<[Real; 6]>>,
166    /// Mean reprojection error in pixels (computed post-optimization).
167    pub mean_reproj_error: f64,
168    /// Per-camera reprojection errors in pixels.
169    pub per_cam_reproj_errors: Vec<f64>,
170}
171
172/// Optimize hand-eye calibration using specified backend.
173///
174/// # Errors
175///
176/// Returns [`Error`] if IR construction or solver backend fails.
177pub fn optimize_handeye(
178    dataset: HandEyeDataset,
179    initial: HandEyeParams,
180    opts: HandEyeSolveOptions,
181    backend_opts: BackendSolveOptions,
182) -> Result<HandEyeEstimate, Error> {
183    let (ir, initial_map) = build_handeye_ir(&dataset, &initial, &opts)?;
184    let solution = solve_with_backend(BackendKind::TinySolver, &ir, &initial_map, &backend_opts)?;
185
186    // Extract per-camera calibrated parameters
187    let cameras = (0..dataset.data.num_cameras)
188        .map(|cam_idx| {
189            let intrinsics = unpack_intrinsics(
190                solution
191                    .params
192                    .get(&format!("cam/{}", cam_idx))
193                    .unwrap()
194                    .as_view(),
195            )?;
196            let distortion = unpack_distortion(
197                solution
198                    .params
199                    .get(&format!("dist/{}", cam_idx))
200                    .unwrap()
201                    .as_view(),
202            )?;
203            Ok(make_pinhole_camera(intrinsics, distortion))
204        })
205        .collect::<Result<Vec<_>, Error>>()?;
206
207    // Extract extrinsics
208    let cam_to_rig = (0..dataset.data.num_cameras)
209        .map(|i| {
210            let key = format!("extr/{}", i);
211            crate::params::pose_se3::se3_dvec_to_iso3(solution.params.get(&key).unwrap().as_view())
212        })
213        .collect::<Result<Vec<_>, Error>>()?;
214
215    // Extract hand-eye transform
216    let handeye = crate::params::pose_se3::se3_dvec_to_iso3(
217        solution.params.get("handeye").unwrap().as_view(),
218    )?;
219
220    // Extract target poses
221    let target_poses = if opts.relax_target_poses {
222        (0..dataset.num_views())
223            .map(|i| {
224                let key = format!("target/{}", i);
225                crate::params::pose_se3::se3_dvec_to_iso3(
226                    solution.params.get(&key).unwrap().as_view(),
227                )
228            })
229            .collect::<Result<Vec<_>, Error>>()?
230    } else {
231        let target_pose = crate::params::pose_se3::se3_dvec_to_iso3(
232            solution.params.get("target").unwrap().as_view(),
233        )?;
234        vec![target_pose; dataset.num_views()]
235    };
236
237    let robot_deltas = if opts.refine_robot_poses {
238        let mut deltas = Vec::with_capacity(dataset.num_views());
239        for i in 0..dataset.num_views() {
240            let key = format!("robot_delta/{}", i);
241            let delta_vec = solution.params.get(&key).unwrap();
242            deltas.push([
243                delta_vec[0],
244                delta_vec[1],
245                delta_vec[2],
246                delta_vec[3],
247                delta_vec[4],
248                delta_vec[5],
249            ]);
250        }
251        Some(deltas)
252    } else {
253        None
254    };
255
256    // Compute reprojection error
257    let params = HandEyeParams {
258        cameras,
259        cam_to_rig,
260        handeye,
261        target_poses,
262    };
263    let (mean_reproj_error, per_cam_reproj_errors) =
264        compute_handeye_reproj_error(&dataset, &params, robot_deltas.as_ref());
265
266    Ok(HandEyeEstimate {
267        params,
268        report: solution.solve_report,
269        robot_deltas,
270        mean_reproj_error,
271        per_cam_reproj_errors,
272    })
273}
274
275/// Compute per-camera reprojection error for hand-eye calibration result.
276///
277/// Returns (mean_error, per_camera_errors).
278fn compute_handeye_reproj_error(
279    dataset: &HandEyeDataset,
280    params: &HandEyeParams,
281    robot_deltas: Option<&Vec<[Real; 6]>>,
282) -> (f64, Vec<f64>) {
283    use crate::ir::HandEyeMode;
284    use nalgebra::{UnitQuaternion, Vector3};
285
286    let num_cameras = dataset.data.num_cameras;
287    let mut per_cam_sum = vec![0.0f64; num_cameras];
288    let mut per_cam_count = vec![0usize; num_cameras];
289
290    // Precompute cam_se3_rig transforms (inverse of cam_to_rig)
291    let cam_se3_rig: Vec<Iso3> = params.cam_to_rig.iter().map(|t| t.inverse()).collect();
292
293    // handeye_inv = handeye^-1
294    let handeye_inv = params.handeye.inverse();
295
296    // target_pose is assumed to be the first (and only) target pose for fixed-target mode
297    let target_pose = params
298        .target_poses
299        .first()
300        .cloned()
301        .unwrap_or(Iso3::identity());
302
303    for (view_idx, view) in dataset.data.views.iter().enumerate() {
304        let robot_pose = view.meta.base_se3_gripper;
305
306        // Apply robot delta if available
307        let robot_pose = if let Some(deltas) = robot_deltas {
308            let delta = &deltas[view_idx];
309            let rot_vec = Vector3::new(delta[0], delta[1], delta[2]);
310            let trans_vec = Vector3::new(delta[3], delta[4], delta[5]);
311            let angle = rot_vec.norm();
312            let delta_rot = if angle > 1e-12 {
313                UnitQuaternion::from_axis_angle(&nalgebra::Unit::new_normalize(rot_vec), angle)
314            } else {
315                UnitQuaternion::identity()
316            };
317            let delta_iso = Iso3::from_parts(nalgebra::Translation3::from(trans_vec), delta_rot);
318            delta_iso * robot_pose
319        } else {
320            robot_pose
321        };
322
323        for (cam_idx, cam_obs) in view.obs.cameras.iter().enumerate() {
324            let Some(obs) = cam_obs else {
325                continue;
326            };
327            let camera = &params.cameras[cam_idx];
328
329            for (pt3, pt2) in obs.points_3d.iter().zip(obs.points_2d.iter()) {
330                // Compute point in camera frame using hand-eye transform chain
331                let p_camera = match dataset.mode {
332                    HandEyeMode::EyeInHand => {
333                        // target -> base -> gripper -> rig -> camera
334                        let p_base = target_pose.transform_point(pt3);
335                        let p_gripper = robot_pose.inverse_transform_point(&p_base);
336                        let p_rig = handeye_inv.transform_point(&p_gripper);
337                        cam_se3_rig[cam_idx].transform_point(&p_rig)
338                    }
339                    HandEyeMode::EyeToHand => {
340                        // target -> gripper -> base -> rig -> camera
341                        let p_gripper = target_pose.transform_point(pt3);
342                        let p_base = robot_pose.transform_point(&p_gripper);
343                        let p_rig = params.handeye.transform_point(&p_base);
344                        cam_se3_rig[cam_idx].transform_point(&p_rig)
345                    }
346                };
347
348                // Project point
349                if let Some(proj) = camera.project_point_c(&p_camera.coords) {
350                    let err = (proj - *pt2).norm();
351                    per_cam_sum[cam_idx] += err;
352                    per_cam_count[cam_idx] += 1;
353                }
354            }
355        }
356    }
357
358    // Compute per-camera mean errors
359    let per_cam_reproj_errors: Vec<f64> = per_cam_sum
360        .iter()
361        .zip(per_cam_count.iter())
362        .map(|(&s, &c)| if c > 0 { s / c as f64 } else { 0.0 })
363        .collect();
364
365    // Compute overall mean
366    let total_sum: f64 = per_cam_sum.iter().sum();
367    let total_count: usize = per_cam_count.iter().sum();
368    let mean_reproj_error = if total_count > 0 {
369        total_sum / total_count as f64
370    } else {
371        0.0
372    };
373
374    (mean_reproj_error, per_cam_reproj_errors)
375}
376
377/// Build IR for hand-eye calibration.
378///
379/// State vector:
380/// - intrinsics/distortion, extrinsics, hand-eye transform
381/// - target pose (fixed by default, per-view in legacy mode)
382/// - optional per-view robot pose deltas (se(3) tangent)
383///
384/// Residuals:
385/// - per-point reprojection error using robot pose measurements
386/// - optional zero-mean priors on robot pose deltas
387fn build_handeye_ir(
388    dataset: &HandEyeDataset,
389    initial: &HandEyeParams,
390    opts: &HandEyeSolveOptions,
391) -> AnyhowResult<(ProblemIR, HashMap<String, DVector<f64>>)> {
392    ensure!(
393        initial.cameras.len() == dataset.data.num_cameras,
394        "intrinsics count {} != num_cameras {}",
395        initial.cameras.len(),
396        dataset.data.num_cameras
397    );
398    ensure!(
399        initial.cam_to_rig.len() == dataset.data.num_cameras,
400        "cam_to_rig count {} != num_cameras {}",
401        initial.cam_to_rig.len(),
402        dataset.data.num_cameras
403    );
404    ensure!(
405        !initial.target_poses.is_empty(),
406        "target_poses must contain at least one pose"
407    );
408    if opts.relax_target_poses {
409        ensure!(
410            initial.target_poses.len() == dataset.num_views(),
411            "target_poses count {} != num_views {}",
412            initial.target_poses.len(),
413            dataset.num_views()
414        );
415    }
416    ensure!(
417        opts.relax_target_poses || opts.fix_target_poses.is_empty(),
418        "fix_target_poses is only supported when relax_target_poses is true"
419    );
420    if opts.refine_robot_poses {
421        ensure!(
422            opts.robot_rot_sigma > 0.0,
423            "robot_rot_sigma must be positive"
424        );
425        ensure!(
426            opts.robot_trans_sigma > 0.0,
427            "robot_trans_sigma must be positive"
428        );
429    }
430
431    let mut ir = ProblemIR::new();
432    let mut initial_map = HashMap::new();
433
434    // 1. Per-camera intrinsics blocks
435    let mut cam_ids = Vec::new();
436    for cam_idx in 0..dataset.data.num_cameras {
437        let cam_fix = opts
438            .camera_overrides
439            .get(cam_idx)
440            .copied()
441            .flatten()
442            .unwrap_or(opts.default_fix);
443        let fixed = cam_fix.intrinsics.to_indices();
444        let fixed_mask = FixedMask::fix_indices(&fixed);
445
446        let key = format!("cam/{}", cam_idx);
447        let cam_id = ir.add_param_block(
448            &key,
449            INTRINSICS_DIM,
450            ManifoldKind::Euclidean,
451            fixed_mask,
452            None,
453        );
454        cam_ids.push(cam_id);
455        initial_map.insert(key, pack_intrinsics(&initial.cameras[cam_idx].k)?);
456    }
457
458    // 2. Per-camera distortion blocks
459    let mut dist_ids = Vec::new();
460    for cam_idx in 0..dataset.data.num_cameras {
461        let cam_fix = opts
462            .camera_overrides
463            .get(cam_idx)
464            .copied()
465            .flatten()
466            .unwrap_or(opts.default_fix);
467        let fixed = cam_fix.distortion.to_indices();
468        let fixed_mask = FixedMask::fix_indices(&fixed);
469
470        let key = format!("dist/{}", cam_idx);
471        let dist_id = ir.add_param_block(
472            &key,
473            DISTORTION_DIM,
474            ManifoldKind::Euclidean,
475            fixed_mask,
476            None,
477        );
478        dist_ids.push(dist_id);
479        initial_map.insert(key, pack_distortion(&initial.cameras[cam_idx].dist));
480    }
481
482    // 3. Per-camera extrinsics
483    let mut extr_ids = Vec::new();
484    for cam_idx in 0..dataset.data.num_cameras {
485        let fixed = if opts.fix_extrinsics.get(cam_idx).copied().unwrap_or(false) {
486            FixedMask::all_fixed(7)
487        } else {
488            FixedMask::all_free()
489        };
490        let key = format!("extr/{}", cam_idx);
491        let id = ir.add_param_block(&key, 7, ManifoldKind::SE3, fixed, None);
492        extr_ids.push(id);
493        initial_map.insert(key, iso3_to_se3_dvec(&initial.cam_to_rig[cam_idx]));
494    }
495
496    // 4. Hand-eye transform
497    let handeye_fixed = if opts.fix_handeye {
498        FixedMask::all_fixed(7)
499    } else {
500        FixedMask::all_free()
501    };
502    let handeye_id = ir.add_param_block("handeye", 7, ManifoldKind::SE3, handeye_fixed, None);
503    initial_map.insert("handeye".to_string(), iso3_to_se3_dvec(&initial.handeye));
504
505    // 5. Target pose (fixed by default) + residuals
506    let target_id = if opts.relax_target_poses {
507        None
508    } else {
509        let target_seed = initial.target_poses[0];
510        let id = ir.add_param_block("target", 7, ManifoldKind::SE3, FixedMask::all_free(), None);
511        initial_map.insert("target".to_string(), iso3_to_se3_dvec(&target_seed));
512        Some(id)
513    };
514
515    let robot_prior_sqrt_info = if opts.refine_robot_poses {
516        let rot = 1.0 / opts.robot_rot_sigma;
517        let trans = 1.0 / opts.robot_trans_sigma;
518        [rot, rot, rot, trans, trans, trans]
519    } else {
520        [0.0; 6]
521    };
522
523    for (view_idx, view) in dataset.data.views.iter().enumerate() {
524        let target_id = if opts.relax_target_poses {
525            let fixed = if opts.fix_target_poses.contains(&view_idx) {
526                FixedMask::all_fixed(7)
527            } else {
528                FixedMask::all_free()
529            };
530            let key = format!("target/{}", view_idx);
531            let target_id = ir.add_param_block(&key, 7, ManifoldKind::SE3, fixed, None);
532            initial_map.insert(key, iso3_to_se3_dvec(&initial.target_poses[view_idx]));
533            target_id
534        } else {
535            target_id.expect("target id should be set for fixed-target mode")
536        };
537
538        let robot_delta_id = if opts.refine_robot_poses {
539            let fixed = if view_idx == 0 {
540                FixedMask::all_fixed(6)
541            } else {
542                FixedMask::all_free()
543            };
544            let key = format!("robot_delta/{}", view_idx);
545            let id = ir.add_param_block(&key, 6, ManifoldKind::Euclidean, fixed, None);
546            initial_map.insert(key, DVector::from_element(6, 0.0));
547            let prior = ResidualBlock {
548                params: vec![id],
549                loss: RobustLoss::None,
550                factor: FactorKind::Se3TangentPrior {
551                    sqrt_info: robot_prior_sqrt_info,
552                },
553                residual_dim: 6,
554            };
555            ir.add_residual_block(prior);
556            Some(id)
557        } else {
558            None
559        };
560
561        // Convert robot pose to SE3 array for factor
562        let robot_se3 = iso3_to_se3_dvec(&view.meta.base_se3_gripper);
563        let robot_se3_array: [f64; 7] = [
564            robot_se3[0],
565            robot_se3[1],
566            robot_se3[2],
567            robot_se3[3],
568            robot_se3[4],
569            robot_se3[5],
570            robot_se3[6],
571        ];
572
573        // Add residuals for each camera observation
574        for (cam_idx, cam_obs) in view.obs.cameras.iter().enumerate() {
575            if let Some(obs) = cam_obs {
576                for (pt_idx, (pw, uv)) in obs.points_3d.iter().zip(&obs.points_2d).enumerate() {
577                    let residual = if let Some(robot_delta_id) = robot_delta_id {
578                        ResidualBlock {
579                            params: vec![
580                                cam_ids[cam_idx],
581                                dist_ids[cam_idx],
582                                extr_ids[cam_idx],
583                                handeye_id,
584                                target_id,
585                                robot_delta_id,
586                            ],
587                            loss: opts.robust_loss,
588                            factor: FactorKind::ReprojPointPinhole4Dist5HandEyeRobotDelta {
589                                pw: [pw.x, pw.y, pw.z],
590                                uv: [uv.x, uv.y],
591                                w: obs.weight(pt_idx),
592                                base_to_gripper_se3: robot_se3_array,
593                                mode: dataset.mode,
594                            },
595                            residual_dim: 2,
596                        }
597                    } else {
598                        ResidualBlock {
599                            params: vec![
600                                cam_ids[cam_idx],
601                                dist_ids[cam_idx],
602                                extr_ids[cam_idx],
603                                handeye_id,
604                                target_id,
605                            ],
606                            loss: opts.robust_loss,
607                            factor: FactorKind::ReprojPointPinhole4Dist5HandEye {
608                                pw: [pw.x, pw.y, pw.z],
609                                uv: [uv.x, uv.y],
610                                w: obs.weight(pt_idx),
611                                base_to_gripper_se3: robot_se3_array,
612                                mode: dataset.mode,
613                            },
614                            residual_dim: 2,
615                        }
616                    };
617                    ir.add_residual_block(residual);
618                }
619            }
620        }
621    }
622
623    ir.validate()?;
624    Ok((ir, initial_map))
625}
626
627#[cfg(test)]
628mod tests {
629    use super::*;
630    use nalgebra::{Translation3, UnitQuaternion};
631    use vision_calibration_core::{
632        BrownConrady5, CorrespondenceView, FxFyCxCySkew, Pt2, Pt3, RigView, RigViewObs,
633        make_pinhole_camera,
634    };
635
636    fn make_test_camera() -> PinholeCamera {
637        make_pinhole_camera(
638            FxFyCxCySkew {
639                fx: 600.0,
640                fy: 590.0,
641                cx: 320.0,
642                cy: 240.0,
643                skew: 0.0,
644            },
645            BrownConrady5::default(),
646        )
647    }
648
649    fn project_view(
650        camera: &PinholeCamera,
651        cam_se3_target: &Iso3,
652        board_pts: &[Pt3],
653    ) -> CorrespondenceView {
654        let pixels: Vec<Pt2> = board_pts
655            .iter()
656            .map(|p| {
657                let p_cam = cam_se3_target.transform_point(p);
658                camera
659                    .project_point_c(&p_cam.coords)
660                    .expect("point should be in front of camera")
661            })
662            .collect();
663
664        CorrespondenceView::new(board_pts.to_vec(), pixels).unwrap()
665    }
666
667    #[test]
668    fn compute_reproj_error_matches_ground_truth_chain() {
669        let camera = make_test_camera();
670        let handeye = Iso3::identity(); // gripper and camera frames coincide
671        let target_in_base =
672            Iso3::from_parts(Translation3::new(0.0, 0.0, 1.0), UnitQuaternion::identity());
673
674        // Simple square target
675        let board_pts = vec![
676            Pt3::new(0.0, 0.0, 0.0),
677            Pt3::new(0.1, 0.0, 0.0),
678            Pt3::new(0.1, 0.1, 0.0),
679            Pt3::new(0.0, 0.1, 0.0),
680        ];
681
682        let robot_poses = [
683            Iso3::identity(),
684            Iso3::from_parts(
685                Translation3::new(0.05, 0.0, 0.0),
686                UnitQuaternion::identity(),
687            ),
688        ];
689
690        let views: Vec<RigView<RobotPoseMeta>> = robot_poses
691            .iter()
692            .map(|robot_pose| {
693                // Camera pose: T_C_T = (T_B_G * T_G_C)^-1 * T_B_T
694                let cam_se3_target = (robot_pose * handeye).inverse() * target_in_base;
695                let obs = project_view(&camera, &cam_se3_target, &board_pts);
696
697                RigView {
698                    meta: RobotPoseMeta {
699                        base_se3_gripper: *robot_pose,
700                    },
701                    obs: RigViewObs {
702                        cameras: vec![Some(obs)],
703                    },
704                }
705            })
706            .collect();
707
708        let dataset = HandEyeDataset::new(views, 1, HandEyeMode::EyeInHand).unwrap();
709        let params = HandEyeParams {
710            cameras: vec![camera],
711            cam_to_rig: vec![Iso3::identity()],
712            handeye,
713            target_poses: vec![target_in_base],
714        };
715
716        let (mean, per_cam) = compute_handeye_reproj_error(&dataset, &params, None);
717        assert!(mean < 1e-9, "mean reproj err too large: {}", mean);
718        assert!(
719            per_cam[0] < 1e-9,
720            "per-cam reproj err too large: {}",
721            per_cam[0]
722        );
723    }
724}