braid_mvg/lib.rs
1// Copyright 2016-2025 Andrew D. Straw.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT
5// or http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8//! Camera geometry and multi-view geometry (MVG) types and algorithms for the
9//! [Braid](https://strawlab.org/braid) tracking system.
10//!
11//! This crate provides camera modeling, geometric transformations, and
12//! multi-camera system support for 3D computer vision applications. It's
13//! specifically designed for use in the Braid multi-camera tracking system but
14//! can be used for general computer vision tasks.
15//!
16//! ## Features
17//!
18//! - Camera modeling with intrinsic and extrinsic parameters based on
19//! [`cam-geom`](https://docs.rs/cam-geom)
20//! - Lens distortion correction using OpenCV-compatible models based on
21//! [`opencv-ros-camera`](https://docs.rs/opencv-ros-camera)
22//! - Multi-camera system management and calibration
23//! - 3D point triangulation from multiple camera views
24//! - Point alignment algorithms (Kabsch-Umeyama, robust Arun)
25//! - Coordinate frame transformations between world, camera, and pixel spaces
26//! - [rerun.io](https://rerun.io) integration for 3D visualization (optional)
27//!
28//! ## Core Types
29//!
30//! - [`Camera`]: Individual camera with intrinsics and extrinsics
31//! - [`MultiCameraSystem`]: Collection of calibrated cameras
32//! - [`DistortedPixel`], [`UndistortedPixel`]: Pixel coordinate types
33//! - [`PointWorldFrame`], [`PointCameraFrame`]: 3D point types in different
34//! coordinate systems
35//!
36//! ## Coordinate Systems
37//!
38//! The crate uses three main coordinate systems:
39//! - **World Frame**: Global 3D coordinate system
40//! - **Camera Frame**: 3D coordinates relative to individual cameras
41//! - **Pixel Coordinates**: 2D image coordinates (distorted and undistorted)
42//!
43//! ## Example
44//!
45//! This example demonstrates a complete round-trip workflow: projecting a 3D point
46//! to 2D pixel coordinates in each camera, then reconstructing the 3D point from
47//! these 2D observations and comparing it to the original.
48//!
49//! ```rust
50//! use braid_mvg::{Camera, MultiCameraSystem, PointWorldFrame, extrinsics, make_default_intrinsics};
51//! use std::collections::BTreeMap;
52//! use nalgebra::Point3;
53//!
54//! // Create a multi-camera system with two cameras for triangulation
55//!
56//! // Camera 1: use default extrinsics (positioned at (1,2,3))
57//! let extrinsics1 = extrinsics::make_default_extrinsics::<f64>();
58//! let camera1 = Camera::new(640, 480, extrinsics1, make_default_intrinsics()).unwrap();
59//!
60//! // Camera 2: positioned with sufficient baseline for triangulation
61//! let translation2 = nalgebra::Point3::new(3.0, 2.0, 3.0);
62//! let rotation2 = nalgebra::UnitQuaternion::identity();
63//! let extrinsics2 = extrinsics::from_rquat_translation(rotation2, translation2);
64//! let camera2 = Camera::new(640, 480, extrinsics2, make_default_intrinsics()).unwrap();
65//!
66//! // Build the multi-camera system
67//! let mut cameras = BTreeMap::new();
68//! cameras.insert("cam1".to_string(), camera1);
69//! cameras.insert("cam2".to_string(), camera2);
70//! let system = MultiCameraSystem::new(cameras);
71//!
72//! // Define an original 3D point in world coordinates
73//! // Place it in front of both cameras at a reasonable distance
74//! let original_point = PointWorldFrame {
75//! coords: Point3::new(2.0, 2.0, 8.0)
76//! };
77//! println!("Original 3D point: {:?}", original_point.coords);
78//!
79//! // Step 1: Project the 3D point to 2D pixels in each camera
80//! let mut observations = Vec::new();
81//! println!("\nProjecting to 2D pixels:");
82//! for (cam_name, camera) in system.cams_by_name() {
83//! let pixel = camera.project_3d_to_pixel(&original_point);
84//! println!(" {}: pixel ({:.2}, {:.2})", cam_name, pixel.coords.x, pixel.coords.y);
85//! observations.push((cam_name.clone(), pixel));
86//! }
87//!
88//! // Step 2: Reconstruct the 3D point from the 2D observations
89//! println!("\nReconstructing 3D point from 2D observations...");
90//! let reconstructed_point = system.find3d(&observations)
91//! .expect("Triangulation should succeed with good observations");
92//!
93//! // Step 3: Compare original and reconstructed points
94//! println!("Reconstructed 3D point: {:?}", reconstructed_point.coords);
95//!
96//! let error = (original_point.coords - reconstructed_point.coords).norm();
97//! println!("3D reconstruction error: {error:.2e}");
98//!
99//! // With perfect cameras and no noise, reconstruction should be very accurate
100//! assert!(error < 1e-6, "Reconstruction error too large: {error:.2e}");
101//!
102//! // Verify that reprojection works correctly
103//! println!("\nVerifying reprojection accuracy:");
104//! for (cam_name, camera) in system.cams_by_name() {
105//! let reprojected_pixel = camera.project_3d_to_pixel(&reconstructed_point);
106//! let original_pixel = camera.project_3d_to_pixel(&original_point);
107//! let pixel_error = (reprojected_pixel.coords - original_pixel.coords).norm();
108//! println!(" {cam_name}: reprojection error {pixel_error:.2e} pixels");
109//! assert!(pixel_error < 1e-6, "Reprojection error too large for {cam_name}");
110//! }
111//!
112//! println!("ā Round-trip 3Dā2Dā3D reconstruction successful!");
113//! ```
114#![deny(rust_2018_idioms)]
115#![warn(missing_docs)]
116#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
117use thiserror::Error;
118
119use nalgebra as na;
120use nalgebra::geometry::{Point2, Point3};
121use nalgebra::{Dim, RealField, U1, U2, U3};
122
123use cam_geom::ExtrinsicParameters;
124use opencv_ros_camera::{Distortion, RosOpenCvIntrinsics};
125
126/// Error types that can occur during multi-view geometry operations.
127#[derive(Error, Debug)]
128pub enum MvgError {
129 /// Unknown or unsupported lens distortion model encountered.
130 ///
131 /// This error occurs when trying to work with a camera distortion model
132 /// that is not supported by the current implementation.
133 #[error("unknown distortion model")]
134 UnknownDistortionModel,
135 /// Rectification matrix is not supported by the current implementation.
136 ///
137 /// Rectification matrices are used in stereo vision but are not fully
138 /// supported by all operations in this crate.
139 #[error("rectification matrix not supported")]
140 RectificationMatrixNotSupported,
141 /// Insufficient points provided for the geometric operation.
142 ///
143 /// Many operations like triangulation require a minimum number of observations.
144 /// For example, 3D triangulation needs at least 2 camera views.
145 #[error("not enough points")]
146 NotEnoughPoints,
147 /// Invalid matrix or array dimensions for the operation.
148 ///
149 /// This occurs when input data has incompatible dimensions for the
150 /// requested mathematical operation.
151 #[error("invalid shape")]
152 InvalidShape,
153 /// Camera name not found in the multi-camera system.
154 ///
155 /// Thrown when referencing a camera by name that doesn't exist in the
156 /// current [`MultiCameraSystem`].
157 #[error("unknown camera")]
158 UnknownCamera,
159 /// Singular Value Decomposition failed during matrix operations.
160 ///
161 /// This can occur during camera calibration or 3D reconstruction when
162 /// the input data is degenerate or ill-conditioned.
163 #[error("SVD failed")]
164 SvdFailed,
165 /// Generic parsing error for configuration files or data formats.
166 #[error("Parsing error")]
167 ParseError,
168 /// Invalid rotation matrix (not orthogonal or determinant ā 1).
169 ///
170 /// Rotation matrices must be orthogonal with determinant +1 to represent
171 /// valid 3D rotations.
172 #[error("invalid rotation matrix")]
173 InvalidRotationMatrix,
174 /// Unsupported file format or schema version.
175 #[error("unsupported version")]
176 UnsupportedVersion,
177 /// Invalid rectification matrix parameters.
178 #[error("invalid rect matrix")]
179 InvalidRectMatrix,
180 /// Unsupported camera or parameter type.
181 #[error("unsupported type")]
182 UnsupportedType,
183 /// Rerun.io does not support this camera intrinsics model.
184 ///
185 /// Only available when the `rerun-io` feature is enabled.
186 /// Some complex distortion models cannot be exported to rerun.io format.
187 #[cfg(feature = "rerun-io")]
188 #[error("rerun does not support this model of camera intrinsics")]
189 RerunUnsupportedIntrinsics,
190 /// Multiple valid mathematical roots found where only one was expected.
191 ///
192 /// This can occur in polynomial root-finding algorithms used in
193 /// geometric computations.
194 #[error("multiple valid roots found")]
195 MultipleValidRootsFound,
196 /// No valid mathematical root found for the equation.
197 ///
198 /// This indicates that the geometric problem has no solution with
199 /// the given constraints.
200 #[error("no valid root found")]
201 NoValidRootFound,
202 /// I/O error during file operations.
203 #[error("IO error: {source}")]
204 Io {
205 /// The underlying I/O error.
206 #[from]
207 source: std::io::Error,
208 },
209 /// YAML serialization/deserialization error.
210 #[error("serde_yaml error: {source}")]
211 SerdeYaml {
212 /// The underlying YAML parsing error.
213 #[from]
214 source: serde_yaml::Error,
215 },
216 /// JSON serialization/deserialization error.
217 #[error("serde_json error: {source}")]
218 SerdeJson {
219 /// The underlying JSON parsing error.
220 #[from]
221 source: serde_json::Error,
222 },
223 /// SVG rendering or processing error.
224 #[error("SvgError: {}", error)]
225 SvgError {
226 /// The SVG error message.
227 error: &'static str,
228 },
229 /// Pseudo-inverse calculation error.
230 #[error("PinvError: {}", error)]
231 PinvError {
232 /// The pseudo-inverse error message.
233 error: String,
234 },
235 /// Error from the [`cam-geom`](https://docs.rs/cam-geom) crate.
236 #[error("cam_geom::Error: {source}")]
237 CamGeomError {
238 /// The underlying cam-geom error.
239 #[from]
240 source: cam_geom::Error,
241 },
242 /// Error from the [`opencv-ros-camera`](https://docs.rs/opencv-ros-camera) crate.
243 #[error("opencv_ros_camera::Error: {source}")]
244 OpencvRosError {
245 /// The underlying opencv-ros-camera error.
246 #[from]
247 source: opencv_ros_camera::Error,
248 },
249}
250
251/// Convenience type alias for results in multi-view geometry operations.
252pub type Result<M> = std::result::Result<M, MvgError>;
253
254pub mod pymvg_support;
255
256/// Camera intrinsic parameter utilities and operations.
257///
258/// This module centers around a convenience additions to
259/// [`opencv_ros_camera::RosOpenCvIntrinsics`].
260pub mod intrinsics;
261
262/// Camera extrinsic parameter utilities and factory functions.
263///
264/// This module provides functions for creating camera extrinsic parameters,
265/// centered around a convenience additions to
266/// [`cam_geom::ExtrinsicParameters`].`
267pub mod extrinsics;
268
269/// Point cloud alignment algorithms and utilities.
270///
271/// This module implements various algorithms for aligning point clouds and
272/// coordinate systems, including Kabsch-Umeyama and robust Arun methods.
273/// These are commonly used in camera calibration and 3D reconstruction.
274pub mod align_points;
275
276/// Integration with [rerun.io](https://rerun.io) for 3D visualization.
277///
278/// This module provides conversion utilities between braid-mvg types and
279/// rerun.io data structures, enabling 3D visualization of camera systems,
280/// point clouds, and tracking results.
281///
282/// **Note**: This module is only available when the `rerun-io` feature is enabled.
283#[cfg(feature = "rerun-io")]
284#[cfg_attr(docsrs, doc(cfg(feature = "rerun-io")))]
285pub mod rerun_io;
286
287mod camera;
288pub use crate::camera::{rq_decomposition, Camera};
289
290mod multi_cam_system;
291pub use crate::multi_cam_system::MultiCameraSystem;
292
293/// A 2D pixel coordinate in the distorted image space.
294///
295/// This represents pixel coordinates as they appear in the raw camera image,
296/// including the effects of lens distortion.
297#[derive(Debug, Clone)]
298pub struct DistortedPixel<R: RealField + Copy> {
299 /// The 2D pixel coordinates (x, y) in the distorted image.
300 pub coords: Point2<R>,
301}
302
303impl<R, IN> From<&cam_geom::Pixels<R, U1, IN>> for DistortedPixel<R>
304where
305 R: RealField + Copy,
306 IN: nalgebra::storage::Storage<R, U1, U2>,
307{
308 fn from(orig: &cam_geom::Pixels<R, U1, IN>) -> Self {
309 DistortedPixel {
310 coords: Point2::new(orig.data[(0, 0)], orig.data[(0, 1)]),
311 }
312 }
313}
314
315impl<R, IN> From<cam_geom::Pixels<R, U1, IN>> for DistortedPixel<R>
316where
317 R: RealField + Copy,
318 IN: nalgebra::storage::Storage<R, U1, U2>,
319{
320 fn from(orig: cam_geom::Pixels<R, U1, IN>) -> Self {
321 let orig_ref = &orig;
322 orig_ref.into()
323 }
324}
325
326impl<R> From<&DistortedPixel<R>> for cam_geom::Pixels<R, U1, na::storage::Owned<R, U1, U2>>
327where
328 R: RealField + Copy,
329 na::DefaultAllocator: na::allocator::Allocator<U1, U2>,
330{
331 fn from(orig: &DistortedPixel<R>) -> Self {
332 Self {
333 data: na::OMatrix::<R, U1, U2>::from_row_slice(&[orig.coords[0], orig.coords[1]]),
334 }
335 }
336}
337
338impl<R: RealField + Copy> DistortedPixel<R> {
339 /// Extract a single distorted pixel from a collection of pixels.
340 ///
341 /// This method allows you to extract one pixel coordinate from a larger
342 /// collection of pixel coordinates, such as those returned by batch
343 /// projection operations.
344 ///
345 /// # Arguments
346 ///
347 /// * `pixels` - A collection of pixel coordinates
348 /// * `i` - The index of the pixel to extract (0-based)
349 ///
350 /// # Example
351 ///
352 /// ```rust
353 /// use braid_mvg::DistortedPixel;
354 /// use cam_geom::Pixels;
355 /// use nalgebra::{Point2, OMatrix, U2};
356 ///
357 /// // This example would work with actual cam_geom::Pixels data
358 /// // let pixels = /* some cam_geom::Pixels instance */;
359 /// // let first_pixel = DistortedPixel::from_pixels(&pixels, 0);
360 /// ```
361 pub fn from_pixels<NPTS, IN>(pixels: &cam_geom::Pixels<R, NPTS, IN>, i: usize) -> Self
362 where
363 NPTS: Dim,
364 IN: nalgebra::storage::Storage<R, NPTS, U2>,
365 {
366 DistortedPixel {
367 coords: Point2::new(pixels.data[(i, 0)], pixels.data[(i, 1)]),
368 }
369 }
370}
371
372/// A 2D pixel coordinate in the undistorted (rectified) image space.
373///
374/// This represents pixel coordinates after lens distortion has been removed,
375/// corresponding to an ideal pinhole camera model.
376#[derive(Debug, Clone)]
377pub struct UndistortedPixel<R: RealField + Copy> {
378 /// The 2D pixel coordinates (x, y) in the undistorted image.
379 pub coords: Point2<R>,
380}
381
382impl<R, IN> From<&opencv_ros_camera::UndistortedPixels<R, U1, IN>> for UndistortedPixel<R>
383where
384 R: RealField + Copy,
385 IN: nalgebra::storage::Storage<R, U1, U2>,
386{
387 fn from(orig: &opencv_ros_camera::UndistortedPixels<R, U1, IN>) -> Self {
388 UndistortedPixel {
389 coords: Point2::new(orig.data[(0, 0)], orig.data[(0, 1)]),
390 }
391 }
392}
393
394impl<R, IN> From<opencv_ros_camera::UndistortedPixels<R, U1, IN>> for UndistortedPixel<R>
395where
396 R: RealField + Copy,
397 IN: nalgebra::storage::Storage<R, U1, U2>,
398{
399 fn from(orig: opencv_ros_camera::UndistortedPixels<R, U1, IN>) -> Self {
400 let orig_ref = &orig;
401 orig_ref.into()
402 }
403}
404
405impl<R> From<&UndistortedPixel<R>>
406 for opencv_ros_camera::UndistortedPixels<R, U1, na::storage::Owned<R, U1, U2>>
407where
408 R: RealField + Copy,
409 na::DefaultAllocator: na::allocator::Allocator<U1, U2>,
410{
411 fn from(orig: &UndistortedPixel<R>) -> Self {
412 Self {
413 data: na::OMatrix::<R, U1, U2>::from_row_slice(&[orig.coords[0], orig.coords[1]]),
414 }
415 }
416}
417
418/// A 3D point in the camera coordinate frame.
419///
420/// This represents a 3D point in the coordinate system of a specific camera.
421#[derive(Debug, Clone)]
422pub struct PointCameraFrame<R: RealField + Copy> {
423 /// The 3D coordinates (x, y, z) in the camera reference frame.
424 pub coords: Point3<R>,
425}
426
427impl<R, IN> From<&cam_geom::Points<cam_geom::coordinate_system::CameraFrame, R, U1, IN>>
428 for PointCameraFrame<R>
429where
430 R: RealField + Copy,
431 IN: nalgebra::storage::Storage<R, U1, U3>,
432{
433 fn from(orig: &cam_geom::Points<cam_geom::coordinate_system::CameraFrame, R, U1, IN>) -> Self {
434 PointCameraFrame {
435 coords: Point3::new(orig.data[(0, 0)], orig.data[(0, 1)], orig.data[(0, 2)]),
436 }
437 }
438}
439
440impl<R, IN> From<cam_geom::Points<cam_geom::coordinate_system::CameraFrame, R, U1, IN>>
441 for PointCameraFrame<R>
442where
443 R: RealField + Copy,
444 IN: nalgebra::storage::Storage<R, U1, U3>,
445{
446 fn from(orig: cam_geom::Points<cam_geom::coordinate_system::CameraFrame, R, U1, IN>) -> Self {
447 let orig_ref = &orig;
448 orig_ref.into()
449 }
450}
451
452impl<R> From<&PointCameraFrame<R>>
453 for cam_geom::Points<
454 cam_geom::coordinate_system::CameraFrame,
455 R,
456 U1,
457 na::storage::Owned<R, U1, U3>,
458 >
459where
460 R: RealField + Copy,
461 na::DefaultAllocator: na::allocator::Allocator<U1, U2>,
462{
463 fn from(orig: &PointCameraFrame<R>) -> Self {
464 Self::new(na::OMatrix::<R, U1, U3>::new(
465 orig.coords[0],
466 orig.coords[1],
467 orig.coords[2],
468 ))
469 }
470}
471
472/// A 3D point in the world coordinate frame.
473///
474/// This represents a 3D point in a global coordinate system that is independent
475/// of any specific camera.
476#[derive(Debug, Clone)]
477pub struct PointWorldFrame<R: RealField + Copy> {
478 /// The 3D coordinates (x, y, z) in the world reference frame.
479 pub coords: Point3<R>,
480}
481
482impl<R, IN> From<&cam_geom::Points<cam_geom::coordinate_system::WorldFrame, R, U1, IN>>
483 for PointWorldFrame<R>
484where
485 R: RealField + Copy,
486 IN: nalgebra::storage::Storage<R, U1, U3>,
487{
488 fn from(orig: &cam_geom::Points<cam_geom::coordinate_system::WorldFrame, R, U1, IN>) -> Self {
489 PointWorldFrame {
490 coords: Point3::new(orig.data[(0, 0)], orig.data[(0, 1)], orig.data[(0, 2)]),
491 }
492 }
493}
494
495impl<R, IN> From<cam_geom::Points<cam_geom::coordinate_system::WorldFrame, R, U1, IN>>
496 for PointWorldFrame<R>
497where
498 R: RealField + Copy,
499 IN: nalgebra::storage::Storage<R, U1, U3>,
500{
501 fn from(orig: cam_geom::Points<cam_geom::coordinate_system::WorldFrame, R, U1, IN>) -> Self {
502 let orig_ref = &orig;
503 orig_ref.into()
504 }
505}
506
507impl<R> From<&PointWorldFrame<R>>
508 for cam_geom::Points<
509 cam_geom::coordinate_system::WorldFrame,
510 R,
511 U1,
512 na::storage::Owned<R, U1, U3>,
513 >
514where
515 R: RealField + Copy,
516 na::DefaultAllocator: na::allocator::Allocator<U1, U2>,
517{
518 fn from(orig: &PointWorldFrame<R>) -> Self {
519 Self::new(na::OMatrix::<R, U1, U3>::new(
520 orig.coords[0],
521 orig.coords[1],
522 orig.coords[2],
523 ))
524 }
525}
526
527/// Compute the sum of elements in a vector.
528pub fn vec_sum<R: RealField + Copy>(vec: &[R]) -> R {
529 vec.iter().fold(na::convert(0.0), |acc, i| acc + *i)
530}
531
532/// A 3D world point with associated reprojection error statistics.
533///
534/// This structure extends [`PointWorldFrame`] with additional information about
535/// how well the reconstructed 3D point reprojects back to the original 2D
536/// observations in each camera. This is useful for quality assessment and
537/// outlier detection in 3D reconstruction.
538///
539/// # Reprojection Error
540///
541/// Reprojection error measures how far the reconstructed 3D point projects
542/// from the original 2D observations when projected back into each camera.
543/// Lower values indicate better reconstruction quality.
544///
545/// # Fields
546///
547/// - `point`: The reconstructed 3D point in world coordinates
548/// - `cum_reproj_dist`: Sum of reprojection distances across all cameras
549/// - `mean_reproj_dist`: Average reprojection distance per camera
550/// - `reproj_dists`: Individual reprojection distance for each camera
551///
552/// # Example
553///
554/// ```rust
555/// use braid_mvg::{PointWorldFrame, PointWorldFrameWithSumReprojError};
556/// use nalgebra::Point3;
557///
558/// let point = PointWorldFrame { coords: Point3::new(1.0, 2.0, 3.0) };
559/// let errors = vec![0.5, 0.3, 0.8]; // reprojection errors for 3 cameras
560///
561/// let point_with_error = PointWorldFrameWithSumReprojError::new(point, errors);
562/// println!("Mean reprojection error: {:.3}", point_with_error.mean_reproj_dist);
563/// ```
564#[derive(Debug, Clone)]
565pub struct PointWorldFrameWithSumReprojError<R: RealField + Copy> {
566 /// The reconstructed 3D point in world coordinates.
567 pub point: PointWorldFrame<R>,
568 /// Sum of reprojection distances from all cameras.
569 pub cum_reproj_dist: R,
570 /// Average reprojection distance per camera.
571 pub mean_reproj_dist: R,
572 /// Individual reprojection distances for each camera.
573 pub reproj_dists: Vec<R>,
574}
575
576impl<R: RealField + Copy> PointWorldFrameWithSumReprojError<R> {
577 /// Create a new point with reprojection error statistics.
578 ///
579 /// This constructor automatically computes the cumulative and mean
580 /// reprojection errors from the individual camera errors.
581 ///
582 /// # Arguments
583 ///
584 /// * `point` - The 3D point in world coordinates
585 /// * `reproj_dists` - Vector of reprojection distances, one per camera
586 ///
587 /// # Returns
588 ///
589 /// A new instance with computed error statistics
590 ///
591 /// # Example
592 ///
593 /// ```rust
594 /// use braid_mvg::{PointWorldFrame, PointWorldFrameWithSumReprojError};
595 /// use nalgebra::Point3;
596 ///
597 /// let point = PointWorldFrame { coords: Point3::new(0.0, 0.0, 5.0) };
598 /// let errors = vec![0.1, 0.2, 0.15]; // errors from 3 cameras
599 ///
600 /// let result = PointWorldFrameWithSumReprojError::new(point, errors);
601 /// assert!((result.mean_reproj_dist - 0.15f64).abs() < 1e-10);
602 /// assert!((result.cum_reproj_dist - 0.45f64).abs() < 1e-10);
603 /// ```
604 pub fn new(point: PointWorldFrame<R>, reproj_dists: Vec<R>) -> Self {
605 let cum_reproj_dist = vec_sum(&reproj_dists);
606 let n_cams: R = na::convert(reproj_dists.len() as f64);
607 let mean_reproj_dist = cum_reproj_dist / n_cams;
608 Self {
609 point,
610 cum_reproj_dist,
611 mean_reproj_dist,
612 reproj_dists,
613 }
614 }
615}
616
617/// A 3D world point that may or may not include reprojection error information.
618///
619/// This enum allows functions to return either a simple 3D point or a point
620/// with additional reprojection error statistics, depending on the operation
621/// performed and the level of detail requested.
622#[derive(Debug, Clone)]
623pub enum PointWorldFrameMaybeWithSumReprojError<R: RealField + Copy> {
624 /// A simple 3D point without error information.
625 Point(PointWorldFrame<R>),
626 /// A 3D point with reprojection error statistics.
627 WithSumReprojError(PointWorldFrameWithSumReprojError<R>),
628}
629
630impl<R: RealField + Copy> PointWorldFrameMaybeWithSumReprojError<R> {
631 /// Extract the 3D point coordinates regardless of the variant type.
632 ///
633 /// This method provides a uniform way to get the 3D point coordinates
634 /// whether the enum contains a simple point or a point with error statistics.
635 pub fn point(self) -> PointWorldFrame<R> {
636 use crate::PointWorldFrameMaybeWithSumReprojError::*;
637 match self {
638 Point(pt) => pt,
639 WithSumReprojError(pto) => pto.point,
640 }
641 }
642}
643
644/// A combined data structure containing a 3D world point and its 2D camera observations.
645///
646/// This structure packages together a 3D point (possibly with reprojection errors)
647/// and the corresponding 2D undistorted pixel observations from each camera.
648/// This is useful for algorithms that need to work with both the 3D structure
649/// and the 2D observations simultaneously.
650///
651/// # Use Cases
652///
653/// - Bundle adjustment optimization
654/// - Outlier detection and filtering
655/// - Tracking and correspondence validation
656/// - Quality assessment of triangulation results
657///
658/// # Example
659///
660/// ```rust
661/// use braid_mvg::{WorldCoordAndUndistorted2D, PointWorldFrame,
662/// PointWorldFrameMaybeWithSumReprojError, UndistortedPixel};
663/// use nalgebra::{Point2, Point3};
664///
665/// let point = PointWorldFrame { coords: Point3::new(1.0, 2.0, 3.0) };
666/// let maybe_point = PointWorldFrameMaybeWithSumReprojError::Point(point);
667///
668/// let observations = vec![
669/// ("cam1".to_string(), UndistortedPixel { coords: Point2::new(320.0, 240.0) }),
670/// ("cam2".to_string(), UndistortedPixel { coords: Point2::new(340.0, 250.0) }),
671/// ];
672///
673/// let combined = WorldCoordAndUndistorted2D::new(maybe_point, observations);
674/// ```
675#[derive(Debug, Clone)]
676pub struct WorldCoordAndUndistorted2D<R: RealField + Copy> {
677 /// The 3D world point (possibly with reprojection error information).
678 wc: PointWorldFrameMaybeWithSumReprojError<R>,
679 /// The 2D undistorted pixel observations from each camera.
680 /// Each entry is a (camera_name, pixel_coordinate) pair.
681 upoints: Vec<(String, UndistortedPixel<R>)>,
682}
683
684impl<R: RealField + Copy> WorldCoordAndUndistorted2D<R> {
685 /// Create a new combined data structure.
686 ///
687 /// # Arguments
688 ///
689 /// * `wc` - The 3D world coordinates (possibly with error information)
690 /// * `upoints` - Vector of (camera_name, undistorted_pixel) pairs
691 ///
692 /// # Returns
693 ///
694 /// A new instance combining the 3D and 2D information
695 pub fn new(
696 wc: PointWorldFrameMaybeWithSumReprojError<R>,
697 upoints: Vec<(String, UndistortedPixel<R>)>,
698 ) -> Self {
699 Self { wc, upoints }
700 }
701
702 /// Extract just the 3D point coordinates.
703 ///
704 /// # Returns
705 ///
706 /// The [`PointWorldFrame`] containing the 3D coordinates
707 pub fn point(self) -> PointWorldFrame<R> {
708 self.wc.point()
709 }
710
711 /// Decompose into the constituent 3D and 2D components.
712 ///
713 /// # Returns
714 ///
715 /// A tuple containing:
716 /// - The 3D world coordinates (possibly with error information)
717 /// - The vector of 2D observations from each camera
718 pub fn wc_and_upoints(
719 self,
720 ) -> (
721 PointWorldFrameMaybeWithSumReprojError<R>,
722 Vec<(String, UndistortedPixel<R>)>,
723 ) {
724 (self.wc, self.upoints)
725 }
726}
727
728/// Create default camera intrinsic parameters for testing and prototyping.
729///
730/// These parameters are not suitable for real applications - always perform
731/// proper camera calibration for production use.
732pub fn make_default_intrinsics<R: RealField + Copy>() -> RosOpenCvIntrinsics<R> {
733 let cx = na::convert(320.0);
734 let cy = na::convert(240.0);
735 let fx = na::convert(1000.0);
736 let skew = na::convert(0.0);
737 let fy = fx;
738 RosOpenCvIntrinsics::from_params(fx, skew, fy, cx, cy)
739}
740
741#[cfg(test)]
742mod tests {
743 use crate::*;
744
745 fn get_test_intrinsics() -> Vec<(String, RosOpenCvIntrinsics<f64>)> {
746 use na::Vector5;
747 let mut result = Vec::new();
748
749 for (name, dist) in &[
750 (
751 "linear",
752 Distortion::from_opencv_vec(Vector5::new(0.0, 0.0, 0.0, 0.0, 0.0)),
753 ),
754 (
755 "d1",
756 Distortion::from_opencv_vec(Vector5::new(0.1001, 0.2002, 0.3003, 0.4004, 0.5005)),
757 ),
758 ] {
759 for skew in &[0, 10] {
760 let fx = 100.0;
761 let fy = 100.0;
762 let cx = 320.0;
763 let cy = 240.0;
764
765 let cam = RosOpenCvIntrinsics::from_params_with_distortion(
766 fx,
767 *skew as f64,
768 fy,
769 cx,
770 cy,
771 dist.clone(),
772 );
773 result.push((format!("dist-{name}_skew{skew}"), cam));
774 }
775 }
776
777 result.push(("default".to_string(), make_default_intrinsics()));
778
779 result
780 }
781
782 pub(crate) fn get_test_cameras() -> Vec<(String, Camera<f64>)> {
783 let mut result = Vec::new();
784
785 use na::core::dimension::U4;
786 use na::core::OMatrix;
787
788 #[rustfmt::skip]
789 let pmat = OMatrix::<f64,U3,U4>::new(100.0, 0.0, 0.0, 0.01,
790 0.0, 100.0, 0.0, 0.01,
791 320.0, 240.0, 1.0, 0.01);
792 let cam = crate::Camera::from_pmat(640, 480, &pmat).expect("generate test cam from pmat");
793 result.insert(0, ("from-pmat-1".to_string(), cam));
794
795 let extrinsics = crate::extrinsics::make_default_extrinsics();
796 for (int_name, intrinsics) in get_test_intrinsics().into_iter() {
797 let name = format!("cam-{int_name}");
798 let cam = Camera::new(640, 480, extrinsics.clone(), intrinsics).unwrap();
799 result.push((name, cam));
800 }
801 result.push(("default-cam".to_string(), Camera::default()));
802
803 let mut result2 = vec![];
804 for (name, cam) in result {
805 if &name == "cam-dist-linear_skew0" {
806 result2.push((name, cam));
807 }
808 }
809 result2
810 }
811}