braid_mvg/multi_cam_system.rs
1// Copyright 2016-2025 Andrew D. Straw.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT
5// or http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8#![allow(non_snake_case)]
9
10use std::collections::BTreeMap;
11use std::io::Read;
12
13use na::{Matrix3, Vector3};
14use nalgebra as na;
15
16use na::RealField;
17#[allow(unused_imports)]
18use serde::{de::DeserializeOwned, Deserialize, Serialize};
19
20use cam_geom::{coordinate_system::WorldFrame, Ray};
21
22use crate::pymvg_support::PymvgMultiCameraSystemV1;
23use crate::{
24 Camera, MvgError, PointWorldFrame, PointWorldFrameWithSumReprojError, Result, UndistortedPixel,
25};
26
27/// A calibrated multi-camera system for 3D computer vision applications.
28///
29/// This structure manages a collection of cameras with known intrinsic and extrinsic
30/// parameters, providing high-level operations for 3D reconstruction, triangulation,
31/// and geometric analysis. It's the primary interface for multi-view geometry
32/// operations in the Braid system.
33///
34/// # Core Capabilities
35///
36/// - **3D Triangulation**: Reconstruct 3D points from 2D observations across cameras
37/// - **Reprojection Analysis**: Compute and analyze reprojection errors for quality assessment
38/// - **Geometric Validation**: Verify camera calibration quality and detect issues
39/// - **Format Conversion**: Import/export from various camera system formats (PyMVG, etc.)
40///
41/// # Mathematical Foundation
42///
43/// The system operates on the principle that multiple cameras observing the same
44/// 3D point provide redundant information that can be used to:
45///
46/// 1. **Triangulate** the 3D position via geometric intersection of viewing rays
47/// 2. **Validate** the reconstruction by reprojecting back to all cameras
48/// 3. **Optimize** camera parameters through bundle adjustment
49///
50/// # Camera Naming
51///
52/// Cameras are identified by string names within the system. This allows for
53/// flexible camera management and easy association of observations with specific cameras.
54///
55/// # Example
56///
57/// ```rust
58/// use braid_mvg::{Camera, MultiCameraSystem, PointWorldFrame, extrinsics, make_default_intrinsics, UndistortedPixel};
59/// use std::collections::BTreeMap;
60/// use nalgebra::{Point3, Point2};
61///
62/// // Create cameras
63/// let camera1 = Camera::new(640, 480,
64/// extrinsics::make_default_extrinsics::<f64>(),
65/// make_default_intrinsics::<f64>())?;
66/// let camera2 = Camera::new(640, 480,
67/// extrinsics::make_default_extrinsics::<f64>(),
68/// make_default_intrinsics::<f64>())?;
69///
70/// // Build multi-camera system
71/// let mut cameras = BTreeMap::new();
72/// cameras.insert("cam1".to_string(), camera1);
73/// cameras.insert("cam2".to_string(), camera2);
74/// let system = MultiCameraSystem::new(cameras);
75///
76/// // Create observations for triangulation as slice of tuples
77/// let observations = vec![
78/// ("cam1".to_string(), UndistortedPixel { coords: Point2::new(320.0, 240.0) }),
79/// ("cam2".to_string(), UndistortedPixel { coords: Point2::new(320.0, 240.0) }),
80/// ];
81///
82/// // Use for 3D triangulation
83/// if let Ok(point_3d) = system.find3d(&observations) {
84/// println!("Reconstructed 3D point: {:?}", point_3d.coords);
85/// }
86/// # Ok::<(), braid_mvg::MvgError>(())
87/// ```
88#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
89pub struct MultiCameraSystem<R: RealField + Serialize + Copy> {
90 cams_by_name: BTreeMap<String, Camera<R>>,
91 comment: Option<String>,
92}
93
94impl<R> MultiCameraSystem<R>
95where
96 R: RealField + Serialize + DeserializeOwned + Default + Copy,
97{
98 /// Export the camera system to PyMVG format via a writer.
99 ///
100 /// This method serializes the multi-camera system to PyMVG JSON format,
101 /// writing the result to the provided writer.
102 ///
103 /// # Arguments
104 ///
105 /// * `writer` - Writer to output the PyMVG JSON data
106 ///
107 /// # Returns
108 ///
109 /// `Ok(())` on success, or [`MvgError`] if serialization fails.
110 pub fn to_pymvg_writer<W: std::io::Write>(&self, writer: &mut W) -> Result<()> {
111 let sys = self.to_pymvg()?;
112 serde_json::to_writer(writer, &sys)?;
113 Ok(())
114 }
115 /// Create a multi-camera system from PyMVG JSON format.
116 ///
117 /// This constructor deserializes a multi-camera system from PyMVG JSON format,
118 /// reading from the provided reader.
119 ///
120 /// # Arguments
121 ///
122 /// * `reader` - Reader containing PyMVG JSON data
123 ///
124 /// # Returns
125 ///
126 /// A new [`MultiCameraSystem`] instance, or [`MvgError`] if parsing fails.
127 pub fn from_pymvg_json<Rd: Read>(reader: Rd) -> Result<Self> {
128 let pymvg_system: PymvgMultiCameraSystemV1<R> = serde_json::from_reader(reader)?;
129 MultiCameraSystem::from_pymvg(&pymvg_system)
130 }
131}
132
133impl<R: RealField + Default + Serialize + Copy> MultiCameraSystem<R> {
134 /// Create a new multi-camera system from a collection of cameras.
135 ///
136 /// # Arguments
137 ///
138 /// * `cams_by_name` - Map of camera names to [`Camera`] instances
139 ///
140 /// # Returns
141 ///
142 /// A new [`MultiCameraSystem`] instance
143 pub fn new(cams_by_name: BTreeMap<String, Camera<R>>) -> Self {
144 Self::new_inner(cams_by_name, None)
145 }
146
147 /// Get an optional comment describing this camera system.
148 ///
149 /// # Returns
150 ///
151 /// Optional reference to the comment string
152 #[inline]
153 pub fn comment(&self) -> Option<&String> {
154 self.comment.as_ref()
155 }
156
157 /// Get the collection of cameras in this system.
158 #[inline]
159 pub fn cams_by_name(&self) -> &BTreeMap<String, Camera<R>> {
160 &self.cams_by_name
161 }
162
163 /// Create a new multi-camera system with an optional comment.
164 ///
165 /// # Arguments
166 ///
167 /// * `cams_by_name` - Map of camera names to [`Camera`] instances
168 /// * `comment` - Descriptive comment for the camera system
169 ///
170 /// # Returns
171 ///
172 /// A new [`MultiCameraSystem`] instance
173 pub fn new_with_comment(cams_by_name: BTreeMap<String, Camera<R>>, comment: String) -> Self {
174 Self::new_inner(cams_by_name, Some(comment))
175 }
176
177 /// Internal constructor for creating a multi-camera system.
178 ///
179 /// # Arguments
180 ///
181 /// * `cams_by_name` - Map of camera names to [`Camera`] instances
182 /// * `comment` - Optional descriptive comment
183 ///
184 /// # Returns
185 ///
186 /// A new [`MultiCameraSystem`] instance
187 pub fn new_inner(cams_by_name: BTreeMap<String, Camera<R>>, comment: Option<String>) -> Self {
188 Self {
189 cams_by_name,
190 comment,
191 }
192 }
193
194 /// Get a camera by name.
195 ///
196 /// # Arguments
197 ///
198 /// * `name` - Name of the camera to retrieve
199 ///
200 /// # Returns
201 ///
202 /// Optional reference to the [`Camera`], or `None` if not found
203 #[inline]
204 pub fn cam_by_name(&self, name: &str) -> Option<&Camera<R>> {
205 self.cams_by_name.get(name)
206 }
207
208 /// Create a multi-camera system from a PyMVG data structure.
209 ///
210 /// # Arguments
211 ///
212 /// * `pymvg_system` - PyMVG multi-camera system data structure
213 ///
214 /// # Returns
215 ///
216 /// A new [`MultiCameraSystem`] instance, or [`MvgError`] if conversion fails
217 pub fn from_pymvg(pymvg_system: &PymvgMultiCameraSystemV1<R>) -> Result<Self> {
218 let mut cams = BTreeMap::new();
219 if pymvg_system.__pymvg_file_version__ != "1.0" {
220 return Err(MvgError::UnsupportedVersion);
221 }
222 for pymvg_cam in pymvg_system.camera_system.iter() {
223 let (name, cam) = Camera::from_pymvg(pymvg_cam)?;
224 cams.insert(name, cam);
225 }
226 Ok(Self::new(cams))
227 }
228
229 /// Convert this multi-camera system to PyMVG format.
230 ///
231 /// This method converts the camera system to PyMVG data structure format
232 /// for interoperability with PyMVG library and JSON serialization.
233 ///
234 /// # Returns
235 ///
236 /// A [`PymvgMultiCameraSystemV1`] structure, or [`MvgError`] if conversion fails
237 ///
238 /// # Example
239 ///
240 /// ```rust
241 /// use braid_mvg::{MultiCameraSystem, Camera, extrinsics, make_default_intrinsics};
242 /// use std::collections::BTreeMap;
243 ///
244 /// let mut cameras = BTreeMap::new();
245 /// cameras.insert("cam1".to_string(), Camera::new(640, 480,
246 /// extrinsics::make_default_extrinsics::<f64>(),
247 /// make_default_intrinsics::<f64>())?);
248 /// let system = MultiCameraSystem::new(cameras);
249 /// let pymvg_system = system.to_pymvg()?;
250 /// # Ok::<(), braid_mvg::MvgError>(())
251 /// ```
252 pub fn to_pymvg(&self) -> Result<PymvgMultiCameraSystemV1<R>> {
253 Ok(PymvgMultiCameraSystemV1 {
254 __pymvg_file_version__: "1.0".to_string(),
255 camera_system: self
256 .cams_by_name
257 .iter()
258 .map(|(name, cam)| cam.to_pymvg(name))
259 .collect(),
260 })
261 }
262
263 /// Find reprojection error of 3D coordinate into pixel coordinates.
264 ///
265 /// Note that this returns the reprojection distance of the *undistorted*
266 /// pixels.
267 pub fn get_reprojection_undistorted_dists(
268 &self,
269 points: &[(String, UndistortedPixel<R>)],
270 this_3d_pt: &PointWorldFrame<R>,
271 ) -> Result<Vec<R>> {
272 let this_dists = points
273 .iter()
274 .map(|(cam_name, orig)| {
275 Ok(na::distance(
276 &self
277 .cams_by_name
278 .get(cam_name)
279 .ok_or(MvgError::UnknownCamera)?
280 .project_3d_to_pixel(this_3d_pt)
281 .coords,
282 &orig.coords,
283 ))
284 })
285 .collect::<Result<Vec<R>>>()?;
286 Ok(this_dists)
287 }
288
289 /// Find 3D coordinate and cumulative reprojection distance using pixel coordinates from cameras
290 pub fn find3d_and_cum_reproj_dist(
291 &self,
292 points: &[(String, UndistortedPixel<R>)],
293 ) -> Result<PointWorldFrameWithSumReprojError<R>> {
294 let point = self.find3d(points)?;
295 let reproj_dists = self.get_reprojection_undistorted_dists(points, &point)?;
296 Ok(PointWorldFrameWithSumReprojError::new(point, reproj_dists))
297 }
298
299 /// Find 3D coordinate using pixel coordinates from cameras
300 pub fn find3d(&self, points: &[(String, UndistortedPixel<R>)]) -> Result<PointWorldFrame<R>> {
301 if points.len() < 2 {
302 return Err(MvgError::NotEnoughPoints);
303 }
304
305 self.find3d_air(points)
306 }
307
308 fn find3d_air(&self, points: &[(String, UndistortedPixel<R>)]) -> Result<PointWorldFrame<R>> {
309 let mut rays: Vec<Ray<WorldFrame, R>> = Vec::with_capacity(points.len());
310 for (name, xy) in points.iter() {
311 // Get camera.
312 let cam = self.cams_by_name.get(name).ok_or(MvgError::UnknownCamera)?;
313 // Get ray from point `xy` in camera coords.
314 let ray_cam = cam.intrinsics().undistorted_pixel_to_camera(&xy.into());
315 // Convert to world coords.
316 let ray = cam
317 .extrinsics()
318 .ray_camera_to_world(&ray_cam)
319 .to_single_ray();
320 rays.push(ray);
321 }
322
323 let coords = cam_geom::best_intersection_of_rays(&rays)?;
324 Ok(coords.into())
325 }
326
327 /// Apply a similarity transformation to all cameras in the system.
328 ///
329 /// This method applies the same similarity transformation (scale, rotation, translation)
330 /// to all cameras in the multi-camera system. This is commonly used for:
331 /// - Coordinate system alignment between different camera systems
332 /// - Scale recovery in structure-from-motion pipelines
333 /// - Aligning reconstructed coordinates with ground truth
334 ///
335 /// # Mathematical Details
336 ///
337 /// The transformation applies: `X' = s*R*X + t` to all camera positions and orientations.
338 ///
339 /// # Arguments
340 ///
341 /// * `s` - Uniform scale factor (must be positive)
342 /// * `rot` - 3×3 rotation matrix (must be orthogonal with determinant +1)
343 /// * `t` - 3×1 translation vector
344 ///
345 /// # Returns
346 ///
347 /// A new aligned [`MultiCameraSystem`], or [`MvgError`] if transformation fails
348 ///
349 /// # Errors
350 ///
351 /// Returns an error if:
352 /// - The rotation matrix is invalid
353 /// - The scale factor is non-positive
354 /// - Any camera transformation fails
355 ///
356 /// # Example
357 ///
358 /// ```rust
359 /// use braid_mvg::{MultiCameraSystem, Camera, extrinsics, make_default_intrinsics};
360 /// use nalgebra::{Matrix3, Vector3};
361 /// use std::collections::BTreeMap;
362 ///
363 /// let mut cameras = BTreeMap::new();
364 /// cameras.insert("cam1".to_string(), Camera::new(640, 480,
365 /// extrinsics::make_default_extrinsics::<f64>(),
366 /// make_default_intrinsics::<f64>())?);
367 /// let system = MultiCameraSystem::new(cameras);
368 ///
369 /// let scale = 2.0;
370 /// let rotation = Matrix3::identity();
371 /// let translation = Vector3::zeros();
372 /// let aligned_system = system.align(scale, rotation, translation)?;
373 /// # Ok::<(), braid_mvg::MvgError>(())
374 /// ```
375 pub fn align(&self, s: R, rot: Matrix3<R>, t: Vector3<R>) -> Result<Self> {
376 let comment = self.comment.clone();
377
378 let mut aligned = BTreeMap::new();
379
380 for (name, orig_cam) in self.cams_by_name.iter() {
381 let cam = orig_cam.align(s, rot, t)?;
382 aligned.insert(name.clone(), cam);
383 }
384
385 Ok(Self {
386 cams_by_name: aligned,
387 comment,
388 })
389 }
390}