use crate::{
Error,
distortion_fit::{
DistortionFitOptions, DistortionView, MetaHomography, estimate_distortion_from_homographies,
},
homography::HomographySolver,
zhang_intrinsics::PlanarIntrinsicsLinearInit,
};
use serde::{Deserialize, Serialize};
use vision_calibration_core::{
BrownConrady5, DistortionModel, FxFyCxCySkew, Mat3, PinholeCamera, PlanarDataset, Pt2, Real,
Vec3, make_pinhole_camera,
};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct IterativeIntrinsicsOptions {
pub iterations: usize,
pub distortion_opts: DistortionFitOptions,
pub zero_skew: bool,
}
impl Default for IterativeIntrinsicsOptions {
fn default() -> Self {
Self {
iterations: 2, distortion_opts: DistortionFitOptions::default(),
zero_skew: true,
}
}
}
pub fn estimate_intrinsics_iterative(
dataset: &PlanarDataset,
opts: IterativeIntrinsicsOptions,
) -> Result<PinholeCamera, Error> {
if dataset.views.len() < 3 {
return Err(Error::InsufficientData {
need: 3,
got: dataset.views.len(),
});
}
let target_points_2d: Vec<Vec<Pt2>> = dataset
.views
.iter()
.map(|v| v.obs.planar_points())
.collect();
let homographies_iter0: Vec<Mat3> = dataset
.views
.iter()
.zip(&target_points_2d)
.map(|(v, target)| HomographySolver::dlt(target, &v.obs.points_2d))
.collect::<Result<Vec<_>, Error>>()?;
let mut current_intrinsics =
PlanarIntrinsicsLinearInit::from_homographies(&homographies_iter0)?;
enforce_zero_skew(&mut current_intrinsics, opts.zero_skew);
let mut current_distortion = BrownConrady5 {
k1: 0.0,
k2: 0.0,
k3: 0.0,
p1: 0.0,
p2: 0.0,
iters: opts.distortion_opts.iters,
};
for _iter in 0..opts.iterations {
let k_mtx = current_intrinsics.k_matrix();
let k_inv = k_mtx.try_inverse().ok_or(Error::Singular)?;
let homographies_for_distortion: Vec<Mat3> = dataset
.views
.iter()
.zip(&target_points_2d)
.map(|(v, target)| {
let undistorted_pixels = undistort_pixels_to_pixels(
&v.obs.points_2d,
&k_mtx,
&k_inv,
¤t_distortion,
);
HomographySolver::dlt(target, &undistorted_pixels)
})
.collect::<Result<Vec<_>, Error>>()?;
let dist_views: Vec<DistortionView> = dataset
.views
.iter()
.zip(&homographies_for_distortion)
.map(|(v, h)| DistortionView::new(v.obs.clone(), MetaHomography { homography: *h }))
.collect();
current_distortion =
estimate_distortion_from_homographies(&k_mtx, &dist_views, opts.distortion_opts)?;
let undistorted_homographies: Vec<Mat3> = dataset
.views
.iter()
.zip(&target_points_2d)
.map(|(v, target)| {
let undistorted_pixels = undistort_pixels_to_pixels(
&v.obs.points_2d,
&k_mtx,
&k_inv,
¤t_distortion,
);
HomographySolver::dlt(target, &undistorted_pixels)
})
.collect::<Result<Vec<_>, Error>>()?;
current_intrinsics =
PlanarIntrinsicsLinearInit::from_homographies(&undistorted_homographies)?;
enforce_zero_skew(&mut current_intrinsics, opts.zero_skew);
}
Ok(make_pinhole_camera(current_intrinsics, current_distortion))
}
fn enforce_zero_skew(intrinsics: &mut FxFyCxCySkew<Real>, zero_skew: bool) {
if zero_skew {
intrinsics.skew = 0.0;
}
}
fn undistort_pixels_to_pixels(
pixels: &[Pt2],
k_mtx: &Mat3,
k_inv: &Mat3,
distortion: &BrownConrady5<Real>,
) -> Vec<Pt2> {
pixels
.iter()
.map(|p| {
let v_h = k_inv * Vec3::new(p.x, p.y, 1.0);
let n_dist = Pt2::new(v_h.x / v_h.z, v_h.y / v_h.z);
let n_undist = distortion.undistort(&n_dist);
let p_h = k_mtx * Vec3::new(n_undist.x, n_undist.y, 1.0);
Pt2::new(p_h.x / p_h.z, p_h.y / p_h.z)
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use nalgebra::{Translation3, UnitQuaternion};
use vision_calibration_core::synthetic::planar::{grid_points, project_views_all};
use vision_calibration_core::{Iso3, View};
fn make_ground_truth() -> (FxFyCxCySkew<Real>, BrownConrady5<Real>) {
let intr = FxFyCxCySkew {
fx: 800.0,
fy: 800.0,
cx: 640.0,
cy: 360.0,
skew: 0.0,
};
let dist = BrownConrady5 {
k1: -0.2,
k2: 0.05,
k3: 0.0,
p1: 0.001,
p2: -0.001,
iters: 8,
};
(intr, dist)
}
fn make_dataset(camera: &PinholeCamera, n_views: usize) -> PlanarDataset {
let board = grid_points(7, 7, 0.03);
let pose_params: &[(Real, Real, Real, Real, Real, Real)] = &[
(0.10, 0.00, 0.05, 0.10, -0.05, 1.00),
(-0.05, 0.15, -0.10, -0.05, 0.10, 1.20),
(0.20, -0.10, 0.00, 0.00, 0.00, 0.90),
(0.00, 0.20, 0.10, 0.08, 0.08, 1.10),
(-0.10, 0.10, -0.05, -0.08, -0.08, 1.05),
];
let poses: Vec<Iso3> = pose_params
.iter()
.take(n_views)
.map(|&(rx, ry, rz, tx, ty, tz)| {
Iso3::from_parts(
Translation3::new(tx, ty, tz),
UnitQuaternion::from_euler_angles(rx, ry, rz),
)
})
.collect();
let views = project_views_all(camera, &board, &poses).unwrap();
let views = views.into_iter().map(View::without_meta).collect();
PlanarDataset::new(views).unwrap()
}
#[test]
fn iterative_refinement_converges() {
let (intr_gt, dist_gt) = make_ground_truth();
let camera_gt = make_pinhole_camera(intr_gt, dist_gt);
let dataset = make_dataset(&camera_gt, 4);
let opts = IterativeIntrinsicsOptions {
iterations: 2,
distortion_opts: DistortionFitOptions {
fix_k3: true,
fix_tangential: false,
iters: 8,
},
zero_skew: true,
};
let result = estimate_intrinsics_iterative(&dataset, opts).unwrap();
let fx_err_pct = (result.k.fx - intr_gt.fx).abs() / intr_gt.fx * 100.0;
let fy_err_pct = (result.k.fy - intr_gt.fy).abs() / intr_gt.fy * 100.0;
let cx_err = (result.k.cx - intr_gt.cx).abs();
let cy_err = (result.k.cy - intr_gt.cy).abs();
assert!(fx_err_pct < 40.0, "fx error {:.1}% too large", fx_err_pct);
assert!(fy_err_pct < 40.0, "fy error {:.1}% too large", fy_err_pct);
assert!(cx_err < 80.0, "cx error {:.1}px too large", cx_err);
assert!(cy_err < 150.0, "cy error {:.1}px too large", cy_err);
assert!(
result.dist.k1.signum() == dist_gt.k1.signum(),
"k1 sign mismatch"
);
}
#[test]
fn iteration_improves_estimates() {
let (intr_gt, dist_gt) = make_ground_truth();
let camera_gt = make_pinhole_camera(intr_gt, dist_gt);
let dataset = make_dataset(&camera_gt, 4);
let opts0 = IterativeIntrinsicsOptions {
iterations: 0,
distortion_opts: DistortionFitOptions {
fix_k3: true,
fix_tangential: false,
iters: 8,
},
zero_skew: true,
};
let cam0 = estimate_intrinsics_iterative(&dataset, opts0).unwrap();
let opts2 = IterativeIntrinsicsOptions {
iterations: 2,
..opts0
};
let cam2 = estimate_intrinsics_iterative(&dataset, opts2).unwrap();
let rms0 = plane_homography_rms(&cam0, &dataset);
let rms2 = plane_homography_rms(&cam2, &dataset);
assert!(
rms2 < rms0,
"expected lower plane residual after iterations: rms0={rms0:.4}, rms2={rms2:.4}"
);
}
fn plane_homography_rms(camera: &PinholeCamera, dataset: &PlanarDataset) -> Real {
let k_mtx = camera.k.k_matrix();
let k_inv = k_mtx.try_inverse().expect("K invertible for tests");
let mut total_sq = 0.0;
let mut total_n = 0usize;
for view in &dataset.views {
let world = view.obs.planar_points();
let undistorted_pixels =
undistort_pixels_to_pixels(&view.obs.points_2d, &k_mtx, &k_inv, &camera.dist);
let h = HomographySolver::dlt(&world, &undistorted_pixels)
.expect("homography should solve on synthetic data");
for (pw, pu) in world.iter().zip(undistorted_pixels.iter()) {
let proj_h = h * Vec3::new(pw.x, pw.y, 1.0);
let proj = Pt2::new(proj_h.x / proj_h.z, proj_h.y / proj_h.z);
let dx = proj.x - pu.x;
let dy = proj.y - pu.y;
total_sq += dx * dx + dy * dy;
total_n += 1;
}
}
(total_sq / total_n as Real).sqrt()
}
}