1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
use crate::{CameraPoint, KeyPointWorldMatch, KeyPointsMatch, NormalizedKeyPoint, WorldPoint};
use core::cmp::Ordering;
use derive_more::{AsMut, AsRef, Deref, DerefMut, From, Into};
use nalgebra::{
    dimension::{U2, U3, U7},
    Isometry3, Matrix3, Matrix3x2, MatrixMN, Quaternion, Rotation3, Translation3, UnitQuaternion,
    Vector2, Vector3, Vector4, VectorN, SVD,
};
use sample_consensus::Model;

/// This contains a world pose, which is a pose of the world relative to the camera.
/// This maps [`WorldPoint`] into [`CameraPoint`], changing an absolute position into
/// a vector relative to the camera.
#[derive(Debug, Clone, Copy, PartialEq, AsMut, AsRef, Deref, DerefMut, From, Into)]
pub struct WorldPose(pub Isometry3<f64>);

impl Model<KeyPointWorldMatch> for WorldPose {
    fn residual(&self, data: &KeyPointWorldMatch) -> f32 {
        let WorldPose(iso) = *self;
        let KeyPointWorldMatch(image, world) = *data;

        let new_bearing = (iso * world.coords).normalize();
        let bearing_vector = image.to_homogeneous().normalize();
        (1.0 - bearing_vector.dot(&new_bearing)) as f32
    }
}

impl WorldPose {
    /// Computes difference between the image keypoint and the projected keypoint.
    pub fn projection_error(&self, correspondence: KeyPointWorldMatch) -> Vector2<f64> {
        let KeyPointWorldMatch(NormalizedKeyPoint(image), world) = correspondence;
        let NormalizedKeyPoint(projection) = self.project(world);
        image - projection
    }

    /// Projects the `WorldPoint` onto the camera as a `NormalizedKeyPoint`.
    pub fn project(&self, point: WorldPoint) -> NormalizedKeyPoint {
        self.transform(point).into()
    }

    /// Projects the [`WorldPoint`] into camera space as a [`CameraPoint`].
    pub fn transform(&self, WorldPoint(point): WorldPoint) -> CameraPoint {
        let WorldPose(iso) = *self;
        CameraPoint(iso * point)
    }

    /// Computes the Jacobian of the projection in respect to the `WorldPose`.
    /// The Jacobian is in the format:
    /// ```no_build,no_run
    /// | dx/dtx dy/dPx |
    /// | dx/dty dy/dPy |
    /// | dx/dtz dy/dPz |
    /// | dx/dqr dy/dqr |
    /// | dx/dqx dy/dqx |
    /// | dx/dqy dy/dqy |
    /// | dx/dqz dy/dqz |
    /// ```
    ///
    /// Where `t` refers to the translation vector and `q` refers to the unit quaternion.
    #[rustfmt::skip]
    pub fn projection_pose_jacobian(&self, point: WorldPoint) -> MatrixMN<f64, U7, U2> {
        let q = self.0.rotation.quaternion().coords;
        // World point (input)
        let p = point.0.coords;
        // Camera point (intermediate output)
        let pc = (self.0 * point.0).coords;

        // dP/dT (Jacobian of camera point in respect to translation component)
        let dp_dt = Matrix3::identity();

        // d/dQv (Qv x (Qv x P))
        let qv_qv_p = Matrix3::new(
            q.y * p.y + q.z * p.z,          q.y * p.x - 2.0 * q.x * p.y,    q.z * p.x - 2.0 * q.x * p.z,
            q.x * p.y - 2.0 * q.y * p.x,    q.x * p.x + q.z * p.z,          q.z * p.y - 2.0 * q.y * p.z,
            q.x * p.z - 2.0 * q.z * p.x,    q.y * p.z - 2.0 * q.z * p.y,    q.x * p.x + q.y * p.y
        );
        // d/dQv (Qv x P)
        let qv_p = Matrix3::new(
            0.0,    -p.z,   p.y,
            p.z,    0.0,    -p.x,
            -p.y,   p.x,    0.0,
        );
        // dP/dQv = d/dQv (2 * Qs * Qv x P + 2 * Qv x (Qv x P))
        // Jacobian of camera point in respect to vector component of quaternion
        let dp_dqv = 2.0 * (q.w * qv_p + qv_qv_p);

        // dP/Ds = d/Qs (2 * Qs * Qv x P)
        // Jacobian of camera point in respect to real component of quaternion
        let dp_ds = 2.0 * q.xyz().cross(&p);

        // dP/dT,Q (Jacobian of 3d camera point in respect to translation and quaternion)
        let dp_dtq = MatrixMN::<f64, U7, U3>::from_rows(&[
            dp_dt.row(0).into(),
            dp_dt.row(1).into(),
            dp_dt.row(2).into(),
            dp_dqv.row(0).into(),
            dp_dqv.row(1).into(),
            dp_dqv.row(2).into(),
            dp_ds.transpose(),
        ]);

        // 1 / pz
        let pcz = pc.z.recip();
        // - 1 / pz^2
        let npcz2 = -(pcz * pcz);

        // dK/dp (Jacobian of normalized image coordinate in respect to 3d camera point)
        let dk_dp = Matrix3x2::new(
            pcz,    0.0,
            0.0,    pcz,
            npcz2,  npcz2,
        );

        dp_dtq * dk_dp
    }

    pub fn to_vec(&self) -> VectorN<f64, U7> {
        let Self(iso) = *self;
        let t = iso.translation.vector;
        let rc = iso.rotation.quaternion().coords;
        t.push(rc.x).push(rc.y).push(rc.z).push(rc.w)
    }

    pub fn from_vec(v: VectorN<f64, U7>) -> Self {
        Self(Isometry3::from_parts(
            Translation3::from(Vector3::new(v[0], v[1], v[2])),
            UnitQuaternion::from_quaternion(Quaternion::from(Vector4::new(v[3], v[4], v[5], v[6]))),
        ))
    }
}

impl From<CameraPose> for WorldPose {
    fn from(camera: CameraPose) -> Self {
        Self(camera.inverse())
    }
}

/// This contains a camera pose, which is a pose of the camera relative to the world.
/// This transforms camera points (with depth as `z`) into world coordinates.
/// This also tells you where the camera is located and oriented in the world.
#[derive(Debug, Clone, Copy, PartialEq, AsMut, AsRef, Deref, DerefMut, From, Into)]
pub struct CameraPose(pub Isometry3<f64>);

impl From<WorldPose> for CameraPose {
    fn from(world: WorldPose) -> Self {
        Self(world.inverse())
    }
}

/// This contains a relative pose, which is a pose that transforms the [`CameraPoint`]
/// of one image into the corresponding [`CameraPoint`] of another image. This transforms
/// the point from the camera space of camera `A` to camera `B`.
///
/// Camera space for a given camera is defined as thus:
///
/// * Origin is the optical center
/// * Positive z axis is forwards
/// * Positive y axis is up
/// * Positive x axis is right
///
/// Note that this is a left-handed coordinate space.
#[derive(Debug, Clone, Copy, PartialEq, AsMut, AsRef, Deref, DerefMut, From, Into)]
pub struct RelativeCameraPose(pub Isometry3<f64>);

impl RelativeCameraPose {
    /// The relative pose transforms the point in camera space from camera `A` to camera `B`.
    pub fn transform(&self, CameraPoint(point): CameraPoint) -> CameraPoint {
        let Self(iso) = *self;
        CameraPoint(iso * point)
    }

    /// Generates an essential matrix corresponding to this relative camera pose.
    ///
    /// If a point `a` is transformed using [`RelativeCameraPose::transform`] into
    /// a point `b`, then the essential matrix returned by this method will
    /// give a residual of approximately `0.0` when you call
    /// `essential.residual(&KeyPointsMatch(a.into(), b.into()))`.
    ///
    /// See the documentation of [`EssentialMatrix`] for more information.
    ///
    /// ```
    /// # use cv_core::{RelativeCameraPose, CameraPoint, KeyPointsMatch};
    /// # use cv_core::sample_consensus::Model;
    /// # use cv_core::nalgebra::{Vector3, Point3, Isometry3, UnitQuaternion};
    /// let pose = RelativeCameraPose(Isometry3::from_parts(
    ///     Vector3::new(0.3, 0.4, 0.5).into(),
    ///     UnitQuaternion::from_euler_angles(0.2, 0.3, 0.4),
    /// ));
    /// let a = CameraPoint(Point3::new(0.5, 0.5, 3.0));
    /// let b = pose.transform(a);
    /// assert!(pose.essential_matrix().residual(&KeyPointsMatch(a.into(), b.into())) < 1e-6);
    /// ```
    pub fn essential_matrix(&self) -> EssentialMatrix {
        EssentialMatrix(
            self.0.translation.vector.cross_matrix()
                * *self.0.rotation.to_rotation_matrix().matrix(),
        )
    }
}

/// This stores an essential matrix, which is satisfied by the following constraint:
///
/// transpose(x') * E * x = 0
///
/// Where `x'` and `x` are homogeneous normalized image coordinates. You can get a
/// homogeneous normalized image coordinate by appending `1.0` to a `NormalizedKeyPoint`.
///
/// The essential matrix embodies the epipolar constraint between two images. Given that light
/// travels in a perfectly straight line (it will not, but for short distances it mostly does)
/// and assuming a pinhole camera model, for any point on the camera sensor, the light source
/// for that point exists somewhere along a line extending out from the bearing (direction
/// of travel) of that point. For a normalized image coordinate, that bearing is `(x, y, 1.0)`.
/// That is because normalized image coordinates exist on a virtual plane (the sensor)
/// a distance `z = 1.0` from the optical center (the location of the focal point) where
/// the unit of distance is the focal length. In epipolar geometry, the point on the virtual
/// plane is called an epipole. The line through 3d space created by the bearing that travels
/// from the optical center through the epipole is called an epipolar line.
///
/// If you look at every point along an epipolar line, each one of those points would show
/// up as a different point on the camera sensor of another image (if they are in view).
/// If you traced every point along this epipolar line to where it would appear on the sensor
/// of the camera (projection of the 3d points into normalized image coordinates), then
/// the points would form a straight line. This means that you can draw epipolar lines
/// that do not pass through the optical center of an image on that image.
///
/// The essential matrix makes it possible to create a vector that is perpendicular to all
/// bearings that are formed from the epipolar line on the second image's sensor. This is
/// done by computing `E * x`, where `x` is a homogeneous normalized image coordinate
/// from the first image. The transpose of the resulting vector then has a dot product
/// with the transpose of the second image coordinate `x'` which is equal to `0.0`.
/// This can be written as:
///
/// ```no_build,no_run
/// dot(transpose(E * x), x') = 0
/// ```
///
/// This can be re-written into the form given above:
///
/// ```no_build,no_run
/// transpose(x') * E * x = 0
/// ```
///
/// Where the first operation creates a pependicular vector to the epipoles on the first image
/// and the second takes the dot product which should result in 0.
///
/// With a `EssentialMatrix`, you can retrieve the rotation and translation given
/// one normalized image coordinate and one bearing that is scaled to the depth
/// of the point relative to the current reconstruction. This kind of point can be computed
/// using [`WorldPose::project_camera`] to convert a [`WorldPoint`] to a [`CameraPoint`].
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, AsMut, AsRef, Deref, DerefMut, From, Into)]
pub struct EssentialMatrix(pub Matrix3<f64>);

impl Model<KeyPointsMatch> for EssentialMatrix {
    fn residual(&self, data: &KeyPointsMatch) -> f32 {
        let Self(mat) = *self;
        let KeyPointsMatch(NormalizedKeyPoint(a), NormalizedKeyPoint(b)) = *data;

        // The result is a 1x1 matrix which we must get element 0 from.
        (b.to_homogeneous().transpose() * mat * a.to_homogeneous())[0] as f32
    }
}

impl EssentialMatrix {
    /// Returns two possible rotations for the essential matrix along with a translation
    /// bearing of arbitrary length. The translation bearing is not yet in the correct
    /// space and the inverse rotation (transpose) must be multiplied by the translation
    /// bearing to make the translation bearing be post-rotation. The translation's length
    /// is unknown and of unknown sign and must be solved for by using a prior.
    ///
    /// `epsilon` is the threshold by which the singular value decomposition is considered
    /// complete. Making this smaller may improve the precision. It is recommended to
    /// set this to no higher than `1e-6`.
    ///
    /// `max_iterations` is the maximum number of iterations that singular value decomposition
    /// will run on this matrix. Use this in soft realtime systems to cap the execution time.
    /// A `max_iterations` of `0` may execute indefinitely and is not recommended.
    ///
    /// ```
    /// # use cv_core::RelativeCameraPose;
    /// # use cv_core::nalgebra::{Isometry3, UnitQuaternion, Vector3};
    /// let pose = RelativeCameraPose(Isometry3::from_parts(
    ///     Vector3::new(-0.8, 0.4, 0.5).into(),
    ///     UnitQuaternion::from_euler_angles(0.2, 0.3, 0.4),
    /// ));
    /// // Get the possible poses for the essential matrix created from `pose`.
    /// let (rot_a, rot_b, t) = pose.essential_matrix().possible_poses_unfixed_bearing(1e-6, 50).unwrap();
    /// // The translation must be processed through the reverse rotation.
    /// let t_a = t;
    /// let t_b = t;
    /// // Extract vector from quaternion.
    /// let qcoord = |uquat: UnitQuaternion<f64>| uquat.quaternion().coords;
    /// // Convert rotations into quaternion form.
    /// let quat_a = UnitQuaternion::from(rot_a);
    /// let quat_b = UnitQuaternion::from(rot_b);
    /// // Compute residual via cosine distance of quaternions (guaranteed positive w).
    /// let a_res = quat_a.rotation_to(&pose.rotation).angle();
    /// let b_res = quat_b.rotation_to(&pose.rotation).angle();
    /// let a_close = a_res < 1e-4;
    /// let b_close = b_res < 1e-4;
    /// // At least one rotation is correct.
    /// assert!(a_close || b_close);
    /// // The translation points in the same (or reverse) direction
    /// let a_res = 1.0 - t_a.normalize().dot(&pose.translation.vector.normalize()).abs();
    /// let b_res = 1.0 - t_b.normalize().dot(&pose.translation.vector.normalize()).abs();
    /// let a_close = a_res < 1e-4;
    /// let b_close = b_res < 1e-4;
    /// assert!(a_close || b_close);
    /// ```
    pub fn possible_poses_unfixed_bearing(
        &self,
        epsilon: f64,
        max_iterations: usize,
    ) -> Option<(Rotation3<f64>, Rotation3<f64>, Vector3<f64>)> {
        let Self(essential) = *self;
        let essential = essential;

        // `W` from https://en.wikipedia.org/wiki/Essential_matrix#Finding_one_solution.
        let w = Matrix3::new(0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0);
        // Transpose of `W` from https://en.wikipedia.org/wiki/Essential_matrix#Finding_one_solution.
        let wt = w.transpose();

        // Perform SVD.
        let svd = SVD::try_new(essential, true, true, epsilon, max_iterations);
        // Extract only the U and V matrix from the SVD.
        let u_v_t = svd.map(|svd| {
            if let SVD {
                u: Some(u),
                v_t: Some(v_t),
                singular_values,
            } = svd
            {
                // Sort the singular vectors in U and V*.
                let mut sources: [usize; 3] = [0, 1, 2];
                sources.sort_unstable_by_key(|&ix| float_ord::FloatOrd(-singular_values[ix]));
                let mut sorted_u = Matrix3::zeros();
                let mut sorted_v_t = Matrix3::zeros();
                for (&ix, mut column) in sources.iter().zip(sorted_u.column_iter_mut()) {
                    column.copy_from(&u.column(ix));
                }
                for (&ix, mut row) in sources.iter().zip(sorted_v_t.row_iter_mut()) {
                    row.copy_from(&v_t.row(ix));
                }
                (sorted_u, sorted_v_t)
            } else {
                panic!("Didn't get U and V matrix in SVD");
            }
        });
        // Force the determinants to be positive. I do not know precisely
        // why this is done since it isn't apparent from the Wikipedia page
        // on this subject, but this is what TheiaSfM does in essential_matrix_utils.cc.
        let u_v_t = u_v_t.map(|(mut u, mut v_t)| {
            // Last column of U is undetermined since d = (a a 0).
            if u.determinant() < 0.0 {
                for n in u.column_mut(2).iter_mut() {
                    *n *= -1.0;
                }
            }
            // Last row of Vt is undetermined since d = (a a 0).
            if v_t.determinant() < 0.0 {
                for n in v_t.row_mut(2).iter_mut() {
                    *n *= -1.0;
                }
            }
            // Return positive determinant U and V*.
            (u, v_t)
        });
        // Compute the possible rotations and the bearing with no normalization.
        u_v_t.map(|(u, v_t)| {
            (
                Rotation3::from_matrix_unchecked(u * w * v_t),
                Rotation3::from_matrix_unchecked(u * wt * v_t),
                u.column(2).into_owned(),
            )
        })
    }

    /// See [`EssentialMatrix::possible_poses_unfixed_bearing`].
    ///
    /// This returns only the two rotations that are possible.
    ///
    /// ```
    /// # use cv_core::RelativeCameraPose;
    /// # use cv_core::nalgebra::{Isometry3, UnitQuaternion, Vector3};
    /// let pose = RelativeCameraPose(Isometry3::from_parts(
    ///     Vector3::new(-0.8, 0.4, 0.5).into(),
    ///     UnitQuaternion::from_euler_angles(0.2, 0.3, 0.4),
    /// ));
    /// // Get the possible rotations for the essential matrix created from `pose`.
    /// let rbs = pose.essential_matrix().possible_rotations(1e-6, 50).unwrap();
    /// let one_correct = rbs.iter().any(|&rot| {
    ///     let angle_residual =
    ///         UnitQuaternion::from(rot).rotation_to(&pose.rotation).angle();
    ///     angle_residual < 1e-4
    /// });
    /// assert!(one_correct);
    /// ```
    pub fn possible_rotations(
        &self,
        epsilon: f64,
        max_iterations: usize,
    ) -> Option<[Rotation3<f64>; 2]> {
        self.possible_poses_unfixed_bearing(epsilon, max_iterations)
            .map(|(rot_a, rot_b, _)| [rot_a, rot_b])
    }

    /// See [`EssentialMatrix::possible_poses_unfixed_bearing`].
    ///
    /// This returns the rotations and their corresponding post-rotation translation bearing.
    ///
    /// ```
    /// # use cv_core::RelativeCameraPose;
    /// # use cv_core::nalgebra::{Isometry3, UnitQuaternion, Vector3};
    /// let pose = RelativeCameraPose(Isometry3::from_parts(
    ///     Vector3::new(-0.8, 0.4, 0.5).into(),
    ///     UnitQuaternion::from_euler_angles(0.2, 0.3, 0.4),
    /// ));
    /// // Get the possible poses for the essential matrix created from `pose`.
    /// let rbs = pose.essential_matrix().possible_rotations_and_bearings(1e-6, 50).unwrap();
    /// let one_correct = rbs.iter().any(|&(rot, bearing)| {
    ///     let angle_residual =
    ///         UnitQuaternion::from(rot).rotation_to(&pose.rotation).angle();
    ///     let translation_residual =
    ///         1.0 - bearing.normalize().dot(&pose.translation.vector.normalize()).abs();
    ///     angle_residual < 1e-4 && translation_residual < 1e-4
    /// });
    /// assert!(one_correct);
    /// ```
    pub fn possible_rotations_and_bearings(
        &self,
        epsilon: f64,
        max_iterations: usize,
    ) -> Option<[(Rotation3<f64>, Vector3<f64>); 2]> {
        self.possible_poses_unfixed_bearing(epsilon, max_iterations)
            .map(|(rot_a, rot_b, t)| [(rot_a, t), (rot_b, t)])
    }

    /// Return the [`RelativeCameraPose`] that transforms a [`CameraPoint`] of image
    /// `A` (source of `a`) to the corresponding [`CameraPoint`] of image B (source of `b`).
    /// This determines the average expected translation from the points themselves and
    /// if the points agree with the rotation (points must be in front of the camera).
    /// The function takes an iterator containing tuples in the form `(depth, a, b)`:
    ///
    /// * `depth` - The actual depth (`z` axis, not distance) of normalized keypoint `a`
    /// * `a` - A keypoint from image `A`
    /// * `b` - A keypoint from image `B`
    ///
    /// `self` must satisfy the constraint:
    ///
    /// ```no_build,no_run
    /// transpose(homogeneous(a)) * E * homogeneous(b) = 0
    /// ```
    ///
    /// Also, `a` and `b` must be a correspondence.
    ///
    /// This will take the average translation over the entire iterator. This is done
    /// to smooth out noise and outliers (if present).
    ///
    /// `consensus_ratio` is the ratio of points which must be in front of the camera for the model
    /// to be accepted and return Some. Otherwise, None is returned.
    ///
    /// `max_iterations` is the maximum number of iterations that singular value decomposition
    /// will run on this matrix. Use this in soft realtime systems to cap the execution time.
    /// A `max_iterations` of `0` may execute indefinitely and is not recommended.
    ///
    /// `bearing_scale` is a function that is passed a translation bearing vector,
    /// an untranslated (but rotated) camera point, and a normalized key point
    /// where the actual point exists. It must return the scalar which the
    /// translation bearing vector must by multiplied by to get the actual translation.
    ///
    /// `correspondences` must provide an iterator of tuples containing the depth
    /// (distance along the positive Z axis) of camera A's point `a`, a point `a`
    /// from camera A, and a point `b` from camera B.
    ///
    /// This does not communicate which points were outliers.
    ///
    /// ```
    /// # use cv_core::{RelativeCameraPose, CameraPoint};
    /// # use cv_core::nalgebra::{Isometry3, UnitQuaternion, Vector3, Point3};
    /// let pose = RelativeCameraPose(Isometry3::from_parts(
    ///     Vector3::new(-0.8, 0.4, 0.5).into(),
    ///     UnitQuaternion::from_euler_angles(0.2, 0.3, 0.4),
    /// ));
    /// let a_point = CameraPoint(Point3::new(-1.0, 2.0, 3.0));
    /// let b_point = pose.transform(a_point);
    /// // Get the possible poses for the essential matrix created from `pose`.
    /// let estimate_pose = pose.essential_matrix().solve_pose(0.5, 1e-6, 50,
    ///     cv_core::geom::triangulate_bearing_reproject,
    ///     std::iter::once((a_point, b_point.into())),
    /// ).unwrap();
    ///
    /// let angle_residual =
    ///     estimate_pose.rotation.rotation_to(&pose.rotation).angle();
    /// let translation_residual = (pose.translation.vector - estimate_pose.translation.vector).norm();
    /// assert!(angle_residual < 1e-4 && translation_residual < 1e-4, "{}, {}, {:?}, {:?}", angle_residual, translation_residual, pose.translation.vector, estimate_pose.translation.vector);
    /// ```
    pub fn solve_pose(
        &self,
        consensus_ratio: f64,
        epsilon: f64,
        max_iterations: usize,
        bearing_scale: impl Fn(Vector3<f64>, CameraPoint, NormalizedKeyPoint) -> f64,
        correspondences: impl Iterator<Item = (CameraPoint, NormalizedKeyPoint)>,
    ) -> Option<RelativeCameraPose> {
        // Get the possible rotations and the translation
        self.possible_rotations_and_bearings(epsilon, max_iterations)
            .and_then(|poses| {
                // Get the net translation scale of points that agree with a and b
                // in addition to the number of points that agree with a and b.
                let (ts, total) = correspondences.fold(
                    ([(0.0, 0usize); 2], 0usize),
                    |(mut ts, total), (a, b)| {
                        let trans_and_agree = |(rot, bearing)| {
                            // Triangulate the position of the CameraPoint of b.
                            // We know the precise 3d position of the a point relative
                            // to camera A, but we do not know the
                            // 3d point in relation to camera B since the translation of
                            // the point is unknown. We do know the direction of translation
                            // of the point. We know only the rotation of the camera B
                            // relative to camera A and the epipolar point on camera B.
                            // What we will need to do is start by rotating the point in space.
                            // After rotating the point, we then need to solve for the translation
                            // that minimizes the reprojection error of the untranslated point as much
                            // as possible. See the documentation for reproject_along_translation
                            // to get more details on the process.
                            let untranslated = CameraPoint(rot * a.0);
                            let translation_scale = bearing_scale(bearing, untranslated, b);
                            // Now that we have the translation, we can just verify that the point
                            // is in front (z > 1.0) of the camera to see if it agrees with the model.
                            (
                                translation_scale,
                                translation_scale * bearing.z + untranslated.z > 1.0,
                            )
                        };

                        // Do it for both poses.
                        for (tn, &pose) in ts.iter_mut().zip(&poses) {
                            if let (scale, true) = trans_and_agree(pose) {
                                tn.0 += scale;
                                tn.1 += 1;
                            }
                        }

                        (ts, total + 1)
                    },
                );

                // Ensure that there is at least one point.
                if total == 0 {
                    return None;
                }

                // Ensure that the best one exceeds the consensus ratio.
                let best = core::cmp::max(ts[0].1, ts[1].1);
                if (best as f64 / total as f64) < consensus_ratio && best != 0 {
                    return None;
                }

                // TODO: Perhaps if its closer than this we should assume the frame itself is an outlier.
                let (rot, trans) = match ts[0].1.cmp(&ts[1].1) {
                    Ordering::Equal => return None,
                    Ordering::Greater => (poses[0].0, poses[0].1 * ts[0].0 / ts[0].1 as f64),
                    Ordering::Less => (poses[1].0, poses[1].1 * ts[1].0 / ts[1].1 as f64),
                };
                Some(RelativeCameraPose(Isometry3::from_parts(
                    trans.into(),
                    UnitQuaternion::from_rotation_matrix(&rot),
                )))
            })
    }
}