#![allow(clippy::many_single_char_names, clippy::similar_names)]
use crate::batch::{DetectionBatch, Pose6D};
use crate::config::PoseEstimationMode;
use crate::image::ImageView;
use nalgebra::{Matrix2, Matrix3, Matrix6, Rotation3, UnitQuaternion, Vector3, Vector6};
#[derive(Debug, Clone, Copy, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DistortionCoeffs {
None,
#[cfg(feature = "non_rectified")]
BrownConrady {
k1: f64,
k2: f64,
p1: f64,
p2: f64,
k3: f64,
},
#[cfg(feature = "non_rectified")]
KannalaBrandt {
k1: f64,
k2: f64,
k3: f64,
k4: f64,
},
}
impl DistortionCoeffs {
#[must_use]
#[inline]
pub const fn is_distorted(&self) -> bool {
!matches!(self, Self::None)
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct CameraIntrinsics {
pub fx: f64,
pub fy: f64,
pub cx: f64,
pub cy: f64,
pub distortion: DistortionCoeffs,
}
impl CameraIntrinsics {
#[must_use]
pub fn new(fx: f64, fy: f64, cx: f64, cy: f64) -> Self {
Self {
fx,
fy,
cx,
cy,
distortion: DistortionCoeffs::None,
}
}
#[cfg(feature = "non_rectified")]
#[must_use]
#[allow(clippy::too_many_arguments)]
pub fn with_brown_conrady(
fx: f64,
fy: f64,
cx: f64,
cy: f64,
k1: f64,
k2: f64,
p1: f64,
p2: f64,
k3: f64,
) -> Self {
Self {
fx,
fy,
cx,
cy,
distortion: DistortionCoeffs::BrownConrady { k1, k2, p1, p2, k3 },
}
}
#[cfg(feature = "non_rectified")]
#[must_use]
#[allow(clippy::too_many_arguments)]
pub fn with_kannala_brandt(
fx: f64,
fy: f64,
cx: f64,
cy: f64,
k1: f64,
k2: f64,
k3: f64,
k4: f64,
) -> Self {
Self {
fx,
fy,
cx,
cy,
distortion: DistortionCoeffs::KannalaBrandt { k1, k2, k3, k4 },
}
}
#[must_use]
pub fn as_matrix(&self) -> Matrix3<f64> {
Matrix3::new(self.fx, 0.0, self.cx, 0.0, self.fy, self.cy, 0.0, 0.0, 1.0)
}
#[must_use]
pub fn inv_matrix(&self) -> Matrix3<f64> {
Matrix3::new(
1.0 / self.fx,
0.0,
-self.cx / self.fx,
0.0,
1.0 / self.fy,
-self.cy / self.fy,
0.0,
0.0,
1.0,
)
}
#[must_use]
pub fn undistort_pixel(&self, px: f64, py: f64) -> [f64; 2] {
match self.distortion {
DistortionCoeffs::None => [px, py],
#[cfg(feature = "non_rectified")]
DistortionCoeffs::BrownConrady { k1, k2, p1, p2, k3 } => {
let m = crate::camera::BrownConradyModel { k1, k2, p1, p2, k3 };
let xn = (px - self.cx) / self.fx;
let yn = (py - self.cy) / self.fy;
let [xu, yu] = crate::camera::CameraModel::undistort(&m, xn, yn);
[xu * self.fx + self.cx, yu * self.fy + self.cy]
},
#[cfg(feature = "non_rectified")]
DistortionCoeffs::KannalaBrandt { k1, k2, k3, k4 } => {
let m = crate::camera::KannalaBrandtModel { k1, k2, k3, k4 };
let xn = (px - self.cx) / self.fx;
let yn = (py - self.cy) / self.fy;
let [xu, yu] = crate::camera::CameraModel::undistort(&m, xn, yn);
[xu * self.fx + self.cx, yu * self.fy + self.cy]
},
}
}
#[must_use]
pub fn distort_normalized(&self, xn: f64, yn: f64) -> [f64; 2] {
let [xd, yd] = match self.distortion {
DistortionCoeffs::None => [xn, yn],
#[cfg(feature = "non_rectified")]
DistortionCoeffs::BrownConrady { k1, k2, p1, p2, k3 } => {
let m = crate::camera::BrownConradyModel { k1, k2, p1, p2, k3 };
crate::camera::CameraModel::distort(&m, xn, yn)
},
#[cfg(feature = "non_rectified")]
DistortionCoeffs::KannalaBrandt { k1, k2, k3, k4 } => {
let m = crate::camera::KannalaBrandtModel { k1, k2, k3, k4 };
crate::camera::CameraModel::distort(&m, xn, yn)
},
};
[xd * self.fx + self.cx, yd * self.fy + self.cy]
}
#[must_use]
#[cfg_attr(not(feature = "non_rectified"), allow(unused_variables))]
pub(crate) fn distortion_jacobian(&self, xn: f64, yn: f64) -> [[f64; 2]; 2] {
match self.distortion {
DistortionCoeffs::None => [[1.0, 0.0], [0.0, 1.0]],
#[cfg(feature = "non_rectified")]
DistortionCoeffs::BrownConrady { k1, k2, p1, p2, k3 } => {
let m = crate::camera::BrownConradyModel { k1, k2, p1, p2, k3 };
crate::camera::CameraModel::distort_jacobian(&m, xn, yn)
},
#[cfg(feature = "non_rectified")]
DistortionCoeffs::KannalaBrandt { k1, k2, k3, k4 } => {
let m = crate::camera::KannalaBrandtModel { k1, k2, k3, k4 };
crate::camera::CameraModel::distort_jacobian(&m, xn, yn)
},
}
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Pose {
pub rotation: Matrix3<f64>,
pub translation: Vector3<f64>,
}
impl Pose {
#[must_use]
pub fn new(rotation: Matrix3<f64>, translation: Vector3<f64>) -> Self {
Self {
rotation,
translation,
}
}
#[must_use]
pub fn project(&self, point: &Vector3<f64>, intrinsics: &CameraIntrinsics) -> [f64; 2] {
let p_cam = self.rotation * point + self.translation;
let z = p_cam.z.max(1e-4);
let x = (p_cam.x / z) * intrinsics.fx + intrinsics.cx;
let y = (p_cam.y / z) * intrinsics.fy + intrinsics.cy;
[x, y]
}
}
#[inline]
pub(crate) fn symmetrize_jtj6(jtj: &mut Matrix6<f64>) {
for r in 1..6 {
for c in 0..r {
jtj[(r, c)] = jtj[(c, r)];
}
}
}
#[inline]
pub(crate) fn projection_jacobian(
x_z: f64,
y_z: f64,
z_inv: f64,
intrinsics: &CameraIntrinsics,
) -> (f64, f64, f64, f64, f64, f64, f64, f64, f64, f64) {
let fx = intrinsics.fx;
let fy = intrinsics.fy;
(
fx * z_inv,
-fx * x_z * z_inv,
-fx * x_z * y_z,
fx * (x_z * x_z + 1.0),
-fx * y_z,
fy * z_inv,
-fy * y_z * z_inv,
-fy * (y_z * y_z + 1.0),
fy * y_z * x_z,
fy * x_z,
)
}
#[must_use]
#[allow(clippy::missing_panics_doc)]
#[tracing::instrument(skip_all, name = "pipeline::estimate_tag_pose")]
pub fn estimate_tag_pose(
intrinsics: &CameraIntrinsics,
corners: &[[f64; 2]; 4],
tag_size: f64,
img: Option<&ImageView>,
mode: PoseEstimationMode,
) -> (Option<Pose>, Option<[[f64; 6]; 6]>) {
estimate_tag_pose_with_config(
intrinsics,
corners,
tag_size,
img,
mode,
&crate::config::DetectorConfig::default(),
None,
)
}
#[must_use]
#[allow(clippy::missing_panics_doc)]
#[tracing::instrument(skip_all, name = "pipeline::estimate_tag_pose")]
pub fn estimate_tag_pose_with_config(
intrinsics: &CameraIntrinsics,
corners: &[[f64; 2]; 4],
tag_size: f64,
img: Option<&ImageView>,
mode: PoseEstimationMode,
config: &crate::config::DetectorConfig,
external_covariances: Option<&[Matrix2<f64>; 4]>,
) -> (Option<Pose>, Option<[[f64; 6]; 6]>) {
let ideal_corners: [[f64; 2]; 4] = if intrinsics.distortion == DistortionCoeffs::None {
*corners
} else {
core::array::from_fn(|i| intrinsics.undistort_pixel(corners[i][0], corners[i][1]))
};
let Some(h_poly) = crate::decoder::Homography::square_to_quad(&ideal_corners) else {
return (None, None);
};
let h_pixel = h_poly.h;
let k_inv = intrinsics.inv_matrix();
let h_norm = k_inv * h_pixel;
let scaler = 2.0 / tag_size;
let mut h_metric = h_norm;
h_metric.column_mut(0).scale_mut(scaler);
h_metric.column_mut(1).scale_mut(scaler);
let Some(candidates) = solve_ippe_square(&h_metric) else {
return (None, None);
};
let best_pose = find_best_pose(intrinsics, &ideal_corners, tag_size, &candidates);
match (mode, img, external_covariances) {
(PoseEstimationMode::Accurate, _, Some(ext_covs)) => {
let (refined_pose, covariance) = crate::pose_weighted::refine_pose_lm_weighted(
intrinsics, corners, tag_size, best_pose, ext_covs,
);
(Some(refined_pose), Some(covariance))
},
(PoseEstimationMode::Accurate, Some(image), None) => {
let uncertainty = crate::pose_weighted::compute_framework_uncertainty(
image,
&ideal_corners,
&h_poly,
config.tikhonov_alpha_max,
config.sigma_n_sq,
config.structure_tensor_radius,
);
let (refined_pose, covariance) = crate::pose_weighted::refine_pose_lm_weighted(
intrinsics,
corners,
tag_size,
best_pose,
&uncertainty,
);
(Some(refined_pose), Some(covariance))
},
_ => {
(
Some(refine_pose_lm(
intrinsics,
corners,
tag_size,
best_pose,
config.huber_delta_px,
)),
None,
)
},
}
}
fn solve_ippe_square(h: &Matrix3<f64>) -> Option<[Pose; 2]> {
let h1 = h.column(0);
let h2 = h.column(1);
let a = h1.dot(&h1);
let b = h2.dot(&h2);
let c = h1.dot(&h2);
let trace = a + b;
let det = a * b - c * c;
let delta = (trace * trace - 4.0 * det).max(0.0).sqrt();
let s1_sq = (trace + delta) * 0.5;
let s2_sq = (trace - delta) * 0.5;
let s1 = s1_sq.sqrt();
let s2 = s2_sq.sqrt();
if (s1 - s2).abs() < 1e-4 * s1 {
let mut r1 = h1.clone_owned();
let scale = 1.0 / r1.norm();
r1 *= scale;
let mut r2 = h2 - r1 * (h2.dot(&r1));
r2 = r2.normalize();
let r3 = r1.cross(&r2);
let rot = Matrix3::from_columns(&[r1, r2, r3]);
let gamma = (s1 + s2) * 0.5;
if gamma < 1e-8 {
return None;
} let tz = 1.0 / gamma;
let t = h.column(2) * tz;
let pose = Pose::new(rot, t);
return Some([pose, pose]);
}
let v1 = if c.abs() > 1e-8 {
let v = nalgebra::Vector2::new(s1_sq - b, c);
v.normalize()
} else if a >= b {
nalgebra::Vector2::new(1.0, 0.0)
} else {
nalgebra::Vector2::new(0.0, 1.0)
};
let v2 = nalgebra::Vector2::new(-v1.y, v1.x);
let j_v1 = h1 * v1.x + h2 * v1.y;
let j_v2 = h1 * v2.x + h2 * v2.y;
if s1 < 1e-8 {
return None;
}
let u1 = j_v1 / s1;
let u2 = j_v2 / s2.max(1e-8);
let r1_a = u1 * v1.x + u2 * v2.x;
let r2_a = u1 * v1.y + u2 * v2.y;
let r3_a = r1_a.cross(&r2_a);
let rot_a = Matrix3::from_columns(&[r1_a, r2_a, r3_a]);
let gamma = (s1 + s2) * 0.5;
let tz = 1.0 / gamma;
let t_a = h.column(2) * tz;
let pose_a = Pose::new(rot_a, t_a);
let n_a = rot_a.column(2);
let n_b_raw = Vector3::new(-n_a.x, -n_a.y, n_a.z);
let n_b = if n_b_raw.norm_squared() > 1e-8 {
n_b_raw.normalize()
} else {
Vector3::z_axis().into_inner()
};
let h1_norm = h1.normalize();
let x_b_raw = h1_norm - n_b * h1_norm.dot(&n_b);
let x_b = if x_b_raw.norm_squared() > 1e-8 {
x_b_raw.normalize()
} else {
let tangent = if n_b.x.abs() > 0.9 {
Vector3::y_axis().into_inner()
} else {
Vector3::x_axis().into_inner()
};
tangent.cross(&n_b).normalize()
};
let y_b = n_b.cross(&x_b);
let rot_b = Matrix3::from_columns(&[x_b, y_b, n_b]);
let pose_b = Pose::new(rot_b, t_a);
Some([pose_a, pose_b])
}
pub(crate) fn centered_tag_corners(tag_size: f64) -> [Vector3<f64>; 4] {
let h = tag_size * 0.5;
[
Vector3::new(-h, -h, 0.0),
Vector3::new(h, -h, 0.0),
Vector3::new(h, h, 0.0),
Vector3::new(-h, h, 0.0),
]
}
#[inline]
fn project_with_distortion(p_cam: &Vector3<f64>, intrinsics: &CameraIntrinsics) -> [f64; 2] {
let z = p_cam.z.max(1e-4);
let xn = p_cam.x / z;
let yn = p_cam.y / z;
intrinsics.distort_normalized(xn, yn)
}
#[allow(clippy::too_many_lines)]
fn refine_pose_lm(
intrinsics: &CameraIntrinsics,
corners: &[[f64; 2]; 4],
tag_size: f64,
initial_pose: Pose,
huber_delta_px: f64,
) -> Pose {
let huber_delta = huber_delta_px;
let mut pose = initial_pose;
let obj_pts = centered_tag_corners(tag_size);
let mut lambda = 1e-3_f64;
let mut nu = 2.0_f64;
let mut current_cost = huber_cost(intrinsics, corners, &obj_pts, &pose, huber_delta);
for _ in 0..20 {
let mut jtj = Matrix6::<f64>::zeros();
let mut jtr = Vector6::<f64>::zeros();
for i in 0..4 {
let p_world = obj_pts[i];
let p_cam = pose.rotation * p_world + pose.translation;
let z = p_cam.z.max(1e-4);
let z_inv = 1.0 / z;
let xn = p_cam.x / z;
let yn = p_cam.y / z;
let [u_est, v_est] = intrinsics.distort_normalized(xn, yn);
let res_u = corners[i][0] - u_est;
let res_v = corners[i][1] - v_est;
let r_norm = (res_u * res_u + res_v * res_v).sqrt();
let w = if r_norm <= huber_delta {
1.0
} else {
huber_delta / r_norm
};
let jd = intrinsics.distortion_jacobian(xn, yn);
let du_dp = Vector3::new(
intrinsics.fx * jd[0][0] * z_inv,
intrinsics.fx * jd[0][1] * z_inv,
-intrinsics.fx * (jd[0][0] * xn + jd[0][1] * yn) * z_inv,
);
let dv_dp = Vector3::new(
intrinsics.fy * jd[1][0] * z_inv,
intrinsics.fy * jd[1][1] * z_inv,
-intrinsics.fy * (jd[1][0] * xn + jd[1][1] * yn) * z_inv,
);
let mut row_u = Vector6::zeros();
row_u[0] = du_dp[0];
row_u[1] = du_dp[1];
row_u[2] = du_dp[2];
row_u[3] = du_dp[1] * p_cam.z - du_dp[2] * p_cam.y;
row_u[4] = du_dp[2] * p_cam.x - du_dp[0] * p_cam.z;
row_u[5] = du_dp[0] * p_cam.y - du_dp[1] * p_cam.x;
let mut row_v = Vector6::zeros();
row_v[0] = dv_dp[0];
row_v[1] = dv_dp[1];
row_v[2] = dv_dp[2];
row_v[3] = dv_dp[1] * p_cam.z - dv_dp[2] * p_cam.y;
row_v[4] = dv_dp[2] * p_cam.x - dv_dp[0] * p_cam.z;
row_v[5] = dv_dp[0] * p_cam.y - dv_dp[1] * p_cam.x;
jtj += w * (row_u * row_u.transpose() + row_v * row_v.transpose());
jtr += w * (row_u * res_u + row_v * res_v);
}
if jtr.amax() < 1e-8 {
break;
}
let d_diag = Vector6::new(
jtj[(0, 0)].max(1e-8),
jtj[(1, 1)].max(1e-8),
jtj[(2, 2)].max(1e-8),
jtj[(3, 3)].max(1e-8),
jtj[(4, 4)].max(1e-8),
jtj[(5, 5)].max(1e-8),
);
let mut jtj_damped = jtj;
for k in 0..6 {
jtj_damped[(k, k)] += lambda * d_diag[k];
}
let delta = if let Some(chol) = jtj_damped.cholesky() {
chol.solve(&jtr)
} else {
lambda *= 10.0;
nu = 2.0;
continue;
};
let predicted_reduction = 0.5 * delta.dot(&(lambda * d_diag.component_mul(&delta) + jtr));
let twist = Vector3::new(delta[3], delta[4], delta[5]);
let trans_update = Vector3::new(delta[0], delta[1], delta[2]);
let rot_update = nalgebra::Rotation3::new(twist).matrix().into_owned();
let new_pose = Pose::new(
rot_update * pose.rotation,
rot_update * pose.translation + trans_update,
);
let new_cost = huber_cost(intrinsics, corners, &obj_pts, &new_pose, huber_delta);
let actual_reduction = current_cost - new_cost;
let rho = if predicted_reduction > 1e-12 {
actual_reduction / predicted_reduction
} else {
0.0
};
if rho > 0.0 {
pose = new_pose;
current_cost = new_cost;
lambda *= (1.0 - (2.0 * rho - 1.0).powi(3)).max(1.0 / 3.0);
nu = 2.0;
if delta.norm() < 1e-7 {
break;
}
} else {
lambda *= nu;
nu *= 2.0;
}
}
pose
}
fn huber_cost(
intrinsics: &CameraIntrinsics,
corners: &[[f64; 2]; 4],
obj_pts: &[Vector3<f64>; 4],
pose: &Pose,
delta: f64,
) -> f64 {
let mut cost = 0.0;
for i in 0..4 {
let p_cam = pose.rotation * obj_pts[i] + pose.translation;
let p = project_with_distortion(&p_cam, intrinsics);
let r_u = corners[i][0] - p[0];
let r_v = corners[i][1] - p[1];
let r_norm = (r_u * r_u + r_v * r_v).sqrt();
if r_norm <= delta {
cost += 0.5 * r_norm * r_norm;
} else {
cost += delta * (r_norm - 0.5 * delta);
}
}
cost
}
fn reprojection_error(
intrinsics: &CameraIntrinsics,
corners: &[[f64; 2]; 4],
obj_pts: &[Vector3<f64>; 4],
pose: &Pose,
) -> f64 {
let mut err_sq = 0.0;
for i in 0..4 {
let p_cam = pose.rotation * obj_pts[i] + pose.translation;
let p = project_with_distortion(&p_cam, intrinsics);
err_sq += (p[0] - corners[i][0]).powi(2) + (p[1] - corners[i][1]).powi(2);
}
err_sq
}
fn find_best_pose(
intrinsics: &CameraIntrinsics,
corners: &[[f64; 2]; 4],
tag_size: f64,
candidates: &[Pose; 2],
) -> Pose {
let obj_pts = centered_tag_corners(tag_size);
let err0 = reprojection_error(intrinsics, corners, &obj_pts, &candidates[0]);
let err1 = reprojection_error(intrinsics, corners, &obj_pts, &candidates[1]);
if err1 < err0 {
candidates[1]
} else {
candidates[0]
}
}
#[tracing::instrument(skip_all, name = "pipeline::pose_refinement")]
pub fn refine_poses_soa(
batch: &mut DetectionBatch,
v: usize,
intrinsics: &CameraIntrinsics,
tag_size: f64,
img: Option<&ImageView>,
mode: PoseEstimationMode,
) {
refine_poses_soa_with_config(
batch,
v,
intrinsics,
tag_size,
img,
mode,
&crate::config::DetectorConfig::default(),
);
}
#[tracing::instrument(skip_all, name = "pipeline::pose_refinement")]
pub fn refine_poses_soa_with_config(
batch: &mut DetectionBatch,
v: usize,
intrinsics: &CameraIntrinsics,
tag_size: f64,
img: Option<&ImageView>,
mode: PoseEstimationMode,
config: &crate::config::DetectorConfig,
) {
use rayon::prelude::*;
let poses: Vec<_> = (0..v)
.into_par_iter()
.map(|i| {
let corners = [
[
f64::from(batch.corners[i][0].x),
f64::from(batch.corners[i][0].y),
],
[
f64::from(batch.corners[i][1].x),
f64::from(batch.corners[i][1].y),
],
[
f64::from(batch.corners[i][2].x),
f64::from(batch.corners[i][2].y),
],
[
f64::from(batch.corners[i][3].x),
f64::from(batch.corners[i][3].y),
],
];
let mut ext_covs = [Matrix2::zeros(); 4];
let mut has_ext_covs = false;
#[allow(clippy::needless_range_loop)]
for j in 0..4 {
ext_covs[j] = Matrix2::new(
f64::from(batch.corner_covariances[i][j * 4]),
f64::from(batch.corner_covariances[i][j * 4 + 1]),
f64::from(batch.corner_covariances[i][j * 4 + 2]),
f64::from(batch.corner_covariances[i][j * 4 + 3]),
);
if ext_covs[j].norm_squared() > 1e-12 {
has_ext_covs = true;
}
}
let (pose_opt, _) = estimate_tag_pose_with_config(
intrinsics,
&corners,
tag_size,
img,
mode,
config,
if has_ext_covs { Some(&ext_covs) } else { None },
);
if let Some(pose) = pose_opt {
let rot = Rotation3::from_matrix_unchecked(pose.rotation);
let q = UnitQuaternion::from_rotation_matrix(&rot);
let t = pose.translation;
let mut data = [0.0f32; 7];
data[0] = t.x as f32;
data[1] = t.y as f32;
data[2] = t.z as f32;
data[3] = q.coords.x as f32;
data[4] = q.coords.y as f32;
data[5] = q.coords.z as f32;
data[6] = q.coords.w as f32;
Some(data)
} else {
None
}
})
.collect();
for (i, pose_data) in poses.into_iter().enumerate() {
if let Some(data) = pose_data {
batch.poses[i] = Pose6D { data, padding: 0.0 };
} else {
batch.poses[i] = Pose6D {
data: [0.0; 7],
padding: 0.0,
};
}
}
}
#[cfg(test)]
#[allow(clippy::expect_used, clippy::unwrap_used)]
mod tests {
use super::*;
use proptest::prelude::*;
#[test]
fn test_pose_projection() {
let intrinsics = CameraIntrinsics::new(500.0, 500.0, 320.0, 240.0);
let rotation = Matrix3::identity();
let translation = Vector3::new(0.0, 0.0, 2.0); let pose = Pose::new(rotation, translation);
let p_world = Vector3::new(0.1, 0.1, 0.0);
let p_img = pose.project(&p_world, &intrinsics);
assert!((p_img[0] - 345.0).abs() < 1e-6);
assert!((p_img[1] - 265.0).abs() < 1e-6);
}
#[test]
fn test_perfect_pose_estimation() {
let intrinsics = CameraIntrinsics::new(800.0, 800.0, 400.0, 300.0);
let gt_rot = Matrix3::identity(); let gt_t = Vector3::new(0.1, -0.2, 1.5);
let gt_pose = Pose::new(gt_rot, gt_t);
let tag_size = 0.16; let obj_pts = centered_tag_corners(tag_size);
let mut img_pts = [[0.0, 0.0]; 4];
for i in 0..4 {
img_pts[i] = gt_pose.project(&obj_pts[i], &intrinsics);
}
let (est_pose, _) = estimate_tag_pose(
&intrinsics,
&img_pts,
tag_size,
None,
PoseEstimationMode::Fast,
);
let est_pose = est_pose.expect("Pose estimation failed");
assert!((est_pose.translation.x - gt_t.x).abs() < 1e-3);
assert!((est_pose.translation.y - gt_t.y).abs() < 1e-3);
assert!((est_pose.translation.z - gt_t.z).abs() < 1e-3);
let diff_rot = est_pose.rotation - gt_rot;
assert!(diff_rot.norm() < 1e-3);
}
proptest! {
#[test]
fn prop_intrinsics_inversion(
fx in 100.0..2000.0f64,
fy in 100.0..2000.0f64,
cx in 0.0..1000.0f64,
cy in 0.0..1000.0f64
) {
let intrinsics = CameraIntrinsics::new(fx, fy, cx, cy);
let k = intrinsics.as_matrix();
let k_inv = intrinsics.inv_matrix();
let identity = k * k_inv;
let expected = Matrix3::<f64>::identity();
for i in 0..3 {
for j in 0..3 {
prop_assert!((identity[(i, j)] - expected[(i, j)]).abs() < 1e-9);
}
}
}
#[test]
fn prop_pose_recovery_stability(
tx in -0.5..0.5f64,
ty in -0.5..0.5f64,
tz in 0.5..5.0f64, roll in -0.5..0.5f64,
pitch in -0.5..0.5f64,
yaw in -0.5..0.5f64,
noise in 0.0..0.1f64 ) {
let intrinsics = CameraIntrinsics::new(800.0, 800.0, 400.0, 300.0);
let translation = Vector3::new(tx, ty, tz);
let r_obj = nalgebra::Rotation3::from_euler_angles(roll, pitch, yaw);
let rotation = r_obj.matrix().into_owned();
let gt_pose = Pose::new(rotation, translation);
let tag_size = 0.16;
let obj_pts = centered_tag_corners(tag_size);
let mut img_pts = [[0.0, 0.0]; 4];
for i in 0..4 {
let p = gt_pose.project(&obj_pts[i], &intrinsics);
img_pts[i] = [p[0] + noise, p[1] + noise];
}
if let (Some(est_pose), _) = estimate_tag_pose(&intrinsics, &img_pts, tag_size, None, PoseEstimationMode::Fast) {
let t_err = (est_pose.translation - translation).norm();
prop_assert!(t_err < 0.1 + noise * 0.1, "Translation error {} too high for noise {}", t_err, noise);
let r_err = (est_pose.rotation - rotation).norm();
prop_assert!(r_err < 0.1 + noise * 0.1, "Rotation error {} too high for noise {}", r_err, noise);
}
}
}
}