1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
use crate::{Bearing, CameraPoint, CameraToCamera, Pose, WorldPoint, WorldToCamera};

/// This trait is for algorithms which allow you to triangulate a point from two or more observances.
/// Each observance is a [`WorldToCamera`] and a [`Bearing`].
pub trait TriangulatorObservances {
    fn triangulate_observances<B: Bearing>(
        &self,
        pairs: impl IntoIterator<Item = (WorldToCamera, B)>,
    ) -> Option<WorldPoint>;
}

/// This trait allows you to take one relative pose from camera `A` to camera `B` and two bearings `a` and `b` from
/// their respective cameras to triangulate a point from the perspective of camera `A`.
pub trait TriangulatorRelative {
    fn triangulate_relative<A: Bearing, B: Bearing>(
        &self,
        relative_pose: CameraToCamera,
        a: A,
        b: B,
    ) -> Option<CameraPoint>;
}

impl<T> TriangulatorRelative for T
where
    T: TriangulatorObservances,
{
    fn triangulate_relative<A: Bearing, B: Bearing>(
        &self,
        CameraToCamera(pose): CameraToCamera,
        a: A,
        b: B,
    ) -> Option<CameraPoint> {
        use core::iter::once;

        // We use the first camera as the "world".
        // The first pose maps the first camera to itself (the world).
        // The second pose maps the first camera (the world) to the second camera (the camera).
        // This is how we convert the `CameraToCamera` into a `WorldToCamera`.
        self.triangulate_observances(
            once((WorldToCamera::identity(), a.bearing()))
                .chain(once((WorldToCamera(pose), b.bearing()))),
        )
        .map(|p| CameraPoint(p.0))
    }
}