cv_pinhole/lib.rs
1//! This crate seamlessly plugs into `cv-core` and provides pinhole camera models with and without distortion correction.
2//! It can be used to convert image coordinates into real 3d direction vectors (called bearings) pointing towards where
3//! the light came from that hit that pixel. It can also be used to convert backwards from the 3d back to the 2d
4//! using the `uncalibrate` method from the [`cv_core::CameraModel`] trait.
5
6#![no_std]
7
8#[cfg(feature = "alloc")]
9extern crate alloc;
10
11mod essential;
12
13pub use essential::*;
14
15use cv_core::nalgebra::{Matrix3, Point2, Point3, Vector2, Vector3};
16use cv_core::{
17 Bearing, CameraModel, CameraPoint, CameraToCamera, FeatureMatch, ImagePoint, KeyPoint, Pose,
18 Projective, TriangulatorRelative,
19};
20use derive_more::{AsMut, AsRef, Deref, DerefMut, From, Into};
21use num_traits::Float;
22
23/// A point in normalized image coordinates. This keypoint has been corrected
24/// for distortion and normalized based on the camrea intrinsic matrix.
25/// Please note that the intrinsic matrix accounts for the natural focal length
26/// and any magnification to the image. Ultimately, the key points must be
27/// represented by their position on the camera sensor and normalized to the
28/// focal length of the camera.
29#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, AsMut, AsRef, Deref, DerefMut, From, Into)]
30pub struct NormalizedKeyPoint(pub Point2<f64>);
31
32impl NormalizedKeyPoint {
33 /// Tries to convert the [`CameraPoint`] into a [`NormalizedKeyPoint`], but it may fail
34 /// in extreme conditions, in which case `None` is returned.
35 pub fn from_camera_point(point: CameraPoint) -> Option<Self> {
36 Point2::from_homogeneous(point.bearing_unnormalized()).map(Self)
37 }
38
39 /// Conceptually appends a `1.0` component to the normalized keypoint to create
40 /// a [`CameraPoint`] on the virtual image plane and then multiplies
41 /// the point by `depth`. This `z`/`depth` component must be the depth of
42 /// the keypoint in the direction the camera is pointing from the
43 /// camera's optical center.
44 ///
45 /// The `depth` is computed as the dot product of the unit camera norm
46 /// with the vector that represents the position delta of the point from
47 /// the camera.
48 pub fn with_depth(self, depth: f64) -> CameraPoint {
49 (self.coords * depth).push(depth).to_homogeneous().into()
50 }
51
52 /// Projects the keypoint out to the [`CameraPoint`] that is
53 /// `distance` away from the optical center of the camera. This
54 /// `distance` is defined as the norm of the vector that represents
55 /// the position delta of the point from the camera.
56 pub fn with_distance(self, distance: f64) -> CameraPoint {
57 (distance * *self.bearing()).to_homogeneous().into()
58 }
59
60 /// Get the virtual image point as a [`Point3`].
61 ///
62 /// The virtual image point is the point that is formed on the virtual
63 /// image plane at a depth 1.0 in front of the camera.
64 pub fn virtual_image_point(self) -> Point3<f64> {
65 self.coords.push(1.0).into()
66 }
67}
68
69impl Bearing for NormalizedKeyPoint {
70 fn bearing_unnormalized(&self) -> Vector3<f64> {
71 self.0.coords.push(1.0)
72 }
73
74 fn from_bearing_vector(bearing: Vector3<f64>) -> Self {
75 Self((bearing.xy() / bearing.z).into())
76 }
77}
78
79/// This contains intrinsic camera parameters as per
80/// [this Wikipedia page](https://en.wikipedia.org/wiki/Camera_resectioning#Intrinsic_parameters).
81///
82/// For a high quality camera, this may be sufficient to normalize image coordinates.
83/// Undistortion may also be necessary to normalize image coordinates.
84#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
85pub struct CameraIntrinsics {
86 pub focals: Vector2<f64>,
87 pub principal_point: Point2<f64>,
88 pub skew: f64,
89}
90
91impl CameraIntrinsics {
92 /// Creates camera intrinsics that would create an identity intrinsic matrix.
93 /// This would imply that the pixel positions have an origin at `0,0`,
94 /// the pixel distance unit is the focal length, pixels are square,
95 /// and there is no skew.
96 pub fn identity() -> Self {
97 Self {
98 focals: Vector2::new(1.0, 1.0),
99 skew: 0.0,
100 principal_point: Point2::new(0.0, 0.0),
101 }
102 }
103
104 pub fn focals(self, focals: Vector2<f64>) -> Self {
105 Self { focals, ..self }
106 }
107
108 pub fn focal(self, focal: f64) -> Self {
109 Self {
110 focals: Vector2::new(focal, focal),
111 ..self
112 }
113 }
114
115 pub fn principal_point(self, principal_point: Point2<f64>) -> Self {
116 Self {
117 principal_point,
118 ..self
119 }
120 }
121
122 pub fn skew(self, skew: f64) -> Self {
123 Self { skew, ..self }
124 }
125
126 #[rustfmt::skip]
127 pub fn matrix(&self) -> Matrix3<f64> {
128 Matrix3::new(
129 self.focals.x, self.skew, self.principal_point.x,
130 0.0, self.focals.y, self.principal_point.y,
131 0.0, 0.0, 1.0,
132 )
133 }
134}
135
136impl CameraModel for CameraIntrinsics {
137 type Projection = NormalizedKeyPoint;
138
139 /// Takes in a point from an image in pixel coordinates and
140 /// converts it to a [`NormalizedKeyPoint`].
141 ///
142 /// ```
143 /// use cv_core::{KeyPoint, CameraModel};
144 /// use cv_pinhole::{NormalizedKeyPoint, CameraIntrinsics};
145 /// use cv_core::nalgebra::{Vector2, Vector3, Point2};
146 /// let intrinsics = CameraIntrinsics {
147 /// focals: Vector2::new(800.0, 900.0),
148 /// principal_point: Point2::new(500.0, 600.0),
149 /// skew: 1.7,
150 /// };
151 /// let kp = KeyPoint(Point2::new(471.0, 322.0));
152 /// let nkp = intrinsics.calibrate(kp);
153 /// let calibration_matrix = intrinsics.matrix();
154 /// let distance = (kp.to_homogeneous() - calibration_matrix * nkp.to_homogeneous()).norm();
155 /// assert!(distance < 0.1);
156 /// ```
157 fn calibrate<P>(&self, point: P) -> NormalizedKeyPoint
158 where
159 P: ImagePoint,
160 {
161 let centered = point.image_point() - self.principal_point;
162 let y = centered.y / self.focals.y;
163 let x = (centered.x - self.skew * y) / self.focals.x;
164 NormalizedKeyPoint(Point2::new(x, y))
165 }
166
167 /// Converts a [`NormalizedKeyPoint`] back into pixel coordinates.
168 ///
169 /// ```
170 /// use cv_core::{KeyPoint, CameraModel};
171 /// use cv_pinhole::{NormalizedKeyPoint, CameraIntrinsics};
172 /// use cv_core::nalgebra::{Vector2, Vector3, Point2};
173 /// let intrinsics = CameraIntrinsics {
174 /// focals: Vector2::new(800.0, 900.0),
175 /// principal_point: Point2::new(500.0, 600.0),
176 /// skew: 1.7,
177 /// };
178 /// let kp = KeyPoint(Point2::new(471.0, 322.0));
179 /// let nkp = intrinsics.calibrate(kp);
180 /// let ukp = intrinsics.uncalibrate(nkp);
181 /// assert!((kp.0 - ukp.0).norm() < 1e-6);
182 /// ```
183 fn uncalibrate(&self, projection: NormalizedKeyPoint) -> KeyPoint {
184 let y = projection.y * self.focals.y;
185 let x = projection.x * self.focals.x + self.skew * projection.y;
186 let centered = Point2::new(x, y);
187 KeyPoint(centered + self.principal_point.coords)
188 }
189}
190
191/// This contains intrinsic camera parameters as per
192/// [this Wikipedia page](https://en.wikipedia.org/wiki/Camera_resectioning#Intrinsic_parameters).
193///
194/// This also performs undistortion by applying one radial distortion coefficient (K1).
195#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
196pub struct CameraIntrinsicsK1Distortion {
197 pub simple_intrinsics: CameraIntrinsics,
198 pub k1: f64,
199}
200
201impl CameraIntrinsicsK1Distortion {
202 /// Creates the camera intrinsics using simple intrinsics with no distortion and a K1 distortion coefficient.
203 pub fn new(simple_intrinsics: CameraIntrinsics, k1: f64) -> Self {
204 Self {
205 simple_intrinsics,
206 k1,
207 }
208 }
209}
210
211impl CameraModel for CameraIntrinsicsK1Distortion {
212 type Projection = NormalizedKeyPoint;
213
214 /// Takes in a point from an image in pixel coordinates and
215 /// converts it to a [`NormalizedKeyPoint`].
216 ///
217 /// ```
218 /// use cv_core::{KeyPoint, CameraModel};
219 /// use cv_pinhole::{NormalizedKeyPoint, CameraIntrinsics, CameraIntrinsicsK1Distortion};
220 /// use cv_core::nalgebra::{Vector2, Vector3, Point2};
221 /// let intrinsics = CameraIntrinsics {
222 /// focals: Vector2::new(800.0, 900.0),
223 /// principal_point: Point2::new(500.0, 600.0),
224 /// skew: 1.7,
225 /// };
226 /// let k1 = -0.164624;
227 /// let intrinsics = CameraIntrinsicsK1Distortion::new(
228 /// intrinsics,
229 /// k1,
230 /// );
231 /// let kp = KeyPoint(Point2::new(471.0, 322.0));
232 /// let nkp = intrinsics.calibrate(kp);
233 /// let simple_nkp = intrinsics.simple_intrinsics.calibrate(kp);
234 /// let distance = (nkp.0.coords - (simple_nkp.0.coords / (1.0 + k1 * simple_nkp.0.coords.norm_squared()))).norm();
235 /// assert!(distance < 0.1);
236 /// ```
237 fn calibrate<P>(&self, point: P) -> NormalizedKeyPoint
238 where
239 P: ImagePoint,
240 {
241 let NormalizedKeyPoint(distorted) = self.simple_intrinsics.calibrate(point);
242 let r2 = distorted.coords.norm_squared();
243 let undistorted = (distorted.coords / (1.0 + self.k1 * r2)).into();
244
245 NormalizedKeyPoint(undistorted)
246 }
247
248 /// Converts a [`NormalizedKeyPoint`] back into pixel coordinates.
249 ///
250 /// ```
251 /// use cv_core::{KeyPoint, CameraModel};
252 /// use cv_pinhole::{NormalizedKeyPoint, CameraIntrinsics, CameraIntrinsicsK1Distortion};
253 /// use cv_core::nalgebra::{Vector2, Vector3, Point2};
254 /// let intrinsics = CameraIntrinsics {
255 /// focals: Vector2::new(800.0, 900.0),
256 /// principal_point: Point2::new(500.0, 600.0),
257 /// skew: 1.7,
258 /// };
259 /// let intrinsics = CameraIntrinsicsK1Distortion::new(
260 /// intrinsics,
261 /// -0.164624,
262 /// );
263 /// let kp = KeyPoint(Point2::new(471.0, 322.0));
264 /// let nkp = intrinsics.calibrate(kp);
265 /// let ukp = intrinsics.uncalibrate(nkp);
266 /// assert!((kp.0 - ukp.0).norm() < 1e-6, "{:?}", (kp.0 - ukp.0).norm());
267 /// ```
268 fn uncalibrate(&self, projection: NormalizedKeyPoint) -> KeyPoint {
269 let NormalizedKeyPoint(undistorted) = projection;
270 // This was not easy to compute, but you can set up a quadratic to solve
271 // for r^2 with the undistorted keypoint. This is the result.
272 let u2 = undistorted.coords.norm_squared();
273 // This is actually r^2 * k1.
274 let r2_mul_k1 = -(2.0 * self.k1 * u2 + Float::sqrt(1.0 - 4.0 * self.k1 * u2) - 1.0)
275 / (2.0 * self.k1 * u2);
276 self.simple_intrinsics.uncalibrate(NormalizedKeyPoint(
277 (undistorted.coords * (1.0 + r2_mul_k1)).into(),
278 ))
279 }
280}
281
282/// This contains basic camera specifications that one could find on a
283/// manufacturer's website. This only contains parameters that cannot
284/// be changed about a camera. The focal length is not included since
285/// that can typically be changed and images can also be magnified.
286///
287/// All distance units should be in meters to avoid conversion issues.
288#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
289pub struct CameraSpecification {
290 pub pixels: Vector2<usize>,
291 pub pixel_dimensions: Vector2<f64>,
292}
293
294impl CameraSpecification {
295 /// Creates a [`CameraSpecification`] using the sensor dimensions.
296 pub fn from_sensor(pixels: Vector2<usize>, sensor_dimensions: Vector2<f64>) -> Self {
297 Self {
298 pixels,
299 pixel_dimensions: Vector2::new(
300 sensor_dimensions.x / pixels.x as f64,
301 sensor_dimensions.y / pixels.y as f64,
302 ),
303 }
304 }
305
306 /// Creates a [`CameraSpecification`] using the sensor width assuming a square pixel.
307 pub fn from_sensor_square(pixels: Vector2<usize>, sensor_width: f64) -> Self {
308 let pixel_width = sensor_width / pixels.x as f64;
309 Self {
310 pixels,
311 pixel_dimensions: Vector2::new(pixel_width, pixel_width),
312 }
313 }
314
315 /// Combines the [`CameraSpecification`] with a focal length to create a [`CameraIntrinsics`].
316 ///
317 /// This assumes square pixels and a perfectly centered principal point.
318 pub fn intrinsics_centered(&self, focal: f64) -> CameraIntrinsics {
319 CameraIntrinsics::identity()
320 .focal(focal)
321 .principal_point(self.pixel_dimensions.map(|p| p as f64 / 2.0 - 0.5).into())
322 }
323}
324
325/// Find the reprojection error in focal lengths of a feature match and a relative pose using the given triangulator.
326///
327/// If the feature match destructures as `FeatureMatch(a, b)`, then A is the camera of `a`, and B is the camera of `b`.
328/// The pose must transform the space of camera A into camera B. The triangulator will triangulate the 3d point from the
329/// perspective of camera A, and the pose will be used to transform the point into the perspective of camera B.
330///
331/// ```
332/// use cv_core::{CameraToCamera, CameraPoint, FeatureMatch, Pose};
333/// use cv_core::nalgebra::{Point3, IsometryMatrix3, Vector3, Rotation3};
334/// use cv_pinhole::NormalizedKeyPoint;
335/// // Create an arbitrary point in the space of camera A.
336/// let point_a = CameraPoint(Point3::new(0.4, -0.25, 5.0).to_homogeneous());
337/// // Create an arbitrary relative pose between two cameras A and B.
338/// let pose = CameraToCamera::from_parts(Vector3::new(0.1, 0.2, -0.5), Rotation3::identity());
339/// // Transform the point in camera A to camera B.
340/// let point_b = pose.transform(point_a);
341///
342/// // Convert the camera points to normalized image coordinates.
343/// let nkpa = NormalizedKeyPoint::from_camera_point(point_a).unwrap();
344/// let nkpb = NormalizedKeyPoint::from_camera_point(point_b).unwrap();
345///
346/// // Create a triangulator.
347/// let triangulator = cv_geom::MinSquaresTriangulator::new();
348///
349/// // Since the normalized keypoints were computed exactly, there should be no reprojection error.
350/// let errors = cv_pinhole::pose_reprojection_error(pose, FeatureMatch(nkpa, nkpb), triangulator).unwrap();
351/// let average_error = errors.iter().map(|v| v.norm()).sum::<f64>() * 0.5;
352/// assert!(average_error < 1e-6);
353/// ```
354pub fn pose_reprojection_error(
355 pose: CameraToCamera,
356 m: FeatureMatch<NormalizedKeyPoint>,
357 triangulator: impl TriangulatorRelative,
358) -> Option<[Vector2<f64>; 2]> {
359 let FeatureMatch(a, b) = m;
360 triangulator
361 .triangulate_relative(pose, a, b)
362 .and_then(|point_a| {
363 let reproject_a = NormalizedKeyPoint::from_camera_point(point_a)?;
364 let point_b = pose.transform(point_a);
365 let reproject_b = NormalizedKeyPoint::from_camera_point(point_b)?;
366 Some([a.0 - reproject_a.0, b.0 - reproject_b.0])
367 })
368}
369
370/// See [`pose_reprojection_error`].
371///
372/// This is a convenience function that simply finds the average reprojection error rather than all components.
373///
374/// ```
375/// use cv_core::{CameraToCamera, CameraPoint, FeatureMatch, Pose};
376/// use cv_core::nalgebra::{Point3, IsometryMatrix3, Vector3, Rotation3};
377/// use cv_pinhole::NormalizedKeyPoint;
378/// // Create an arbitrary point in the space of camera A.
379/// let point_a = CameraPoint(Point3::new(0.4, -0.25, 5.0).to_homogeneous());
380/// // Create an arbitrary relative pose between two cameras A and B.
381/// let pose = CameraToCamera::from_parts(Vector3::new(0.1, 0.2, -0.5), Rotation3::identity());
382/// // Transform the point in camera A to camera B.
383/// let point_b = pose.transform(point_a);
384///
385/// // Convert the camera points to normalized image coordinates.
386/// let nkpa = NormalizedKeyPoint::from_camera_point(point_a).unwrap();
387/// let nkpb = NormalizedKeyPoint::from_camera_point(point_b).unwrap();
388///
389/// // Create a triangulator.
390/// let triangulator = cv_geom::MinSquaresTriangulator::new();
391///
392/// // Since the normalized keypoints were computed exactly, there should be no reprojection error.
393/// let average_error = cv_pinhole::average_pose_reprojection_error(pose, FeatureMatch(nkpa, nkpb), triangulator).unwrap();
394/// assert!(average_error < 1e-6);
395/// ```
396pub fn average_pose_reprojection_error(
397 pose: CameraToCamera,
398 m: FeatureMatch<NormalizedKeyPoint>,
399 triangulator: impl TriangulatorRelative,
400) -> Option<f64> {
401 pose_reprojection_error(pose, m, triangulator)
402 .map(|errors| errors.iter().map(|v| v.norm()).sum::<f64>() * 0.5)
403}