camera-intrinsic-model 0.8.0

Camera Intrinsic Models
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
use super::generic_model::*;

use image::{DynamicImage, ImageBuffer};
use nalgebra as na;
use rayon::prelude::*;

/// Returns xmap and ymap for remaping
///
/// # Arguments
///
/// * `camera_model` - any camera model
/// * `projection_mat` - new camera matrix for the undistorted image
/// * `new_w_h` - new image width and height for the undistorted image
/// * `rotation` - Optional rotation, normally for stereo rectify
///
/// # Examples
///
/// ```
/// use camera_intrinsic_model::*;
/// let model = model_from_json("data/eucm0.json");
/// let new_w_h = 1024;
/// let p = model.estimate_new_camera_matrix_for_undistort(0.0, Some((new_w_h, new_w_h)));
/// let (xmap, ymap) = model.init_undistort_map(&p, (new_w_h, new_w_h), None);
/// // let remaped = remap(&img, &xmap, &ymap);
/// ```
pub fn init_undistort_map(
    camera_model: &dyn CameraModel<f64>,
    projection_mat: &na::Matrix3<f64>,
    new_w_h: (u32, u32),
    rotation: Option<na::Rotation3<f64>>,
) -> (na::DMatrix<f32>, na::DMatrix<f32>) {
    if projection_mat.shape() != (3, 3) {
        panic!("projection matrix has the wrong shape");
    }
    let fx = projection_mat[(0, 0)];
    let fy = projection_mat[(1, 1)];
    let cx = projection_mat[(0, 2)];
    let cy = projection_mat[(1, 2)];
    let rmat_inv = rotation
        .unwrap_or(na::Rotation3::identity())
        .inverse()
        .matrix()
        .to_owned();
    let p3ds: Vec<na::Vector3<f64>> = (0..new_w_h.1)
        .into_par_iter()
        .flat_map(|y| {
            (0..new_w_h.0)
                .map(|x| {
                    rmat_inv * na::Vector3::new((x as f64 - cx) / fx, (y as f64 - cy) / fy, 1.0)
                })
                .collect::<Vec<na::Vector3<f64>>>()
        })
        .collect();
    let p2ds = camera_model.project(&p3ds);
    let (xvec, yvec): (Vec<f32>, Vec<f32>) = p2ds
        .iter()
        .map(|xy| {
            if let Some(xy) = xy {
                (xy[0] as f32, xy[1] as f32)
            } else {
                (f32::NAN, f32::NAN)
            }
        })
        .unzip();
    let xmap = na::DMatrix::from_vec(new_w_h.1 as usize, new_w_h.0 as usize, xvec);
    let ymap = na::DMatrix::from_vec(new_w_h.1 as usize, new_w_h.0 as usize, yvec);
    (xmap, ymap)
}

#[inline]
fn interpolate_bilinear_weight(x: f32, y: f32) -> (u16, u16) {
    if !(0.0..=65535.0).contains(&x) {
        panic!("x not in [0-65535]");
    }
    if !(0.0..=65535.0).contains(&y) {
        panic!("y not in [0-65535]");
    }
    const UPPER: f32 = 256.0;
    let x_weight = (UPPER * (x.ceil() - x)) as u16;
    let y_weight = (UPPER * (y.ceil() - y)) as u16;
    // 0-256
    (x_weight, y_weight)
}

/// Returns linear integer offsets and specialized weights for fast remapping using raw pointers.
///
/// This function pre-calculates the linear memory offset (`y * src_width + x`) and integer interpolation weights
/// (scaled by 256) for each pixel in the target image. This allows `fast_remap` to bypass stride calculations
/// and float operations in its hot loop.
///
/// # Arguments
///
/// * `xmap` - The map of X coordinates (float).
/// * `ymap` - The map of Y coordinates (float).
/// * `src_width` - The width of the source image (required for linear offset calculation).
pub fn compute_for_fast_remap(
    xmap: &na::DMatrix<f32>,
    ymap: &na::DMatrix<f32>,
    src_width: usize,
) -> Vec<(usize, u16, u16)> {
    let xy_pos_weight_vec: Vec<_> = xmap
        .iter()
        .zip(ymap.iter())
        .map(|(&x, &y)| {
            let (xw, yw) = interpolate_bilinear_weight(x, y);
            let x0 = x.floor() as usize;
            let y0 = y.floor() as usize;
            let offset = y0 * src_width + x0;
            (offset, xw, yw)
        })
        .collect();
    xy_pos_weight_vec
}

fn reinterpret_vec(input: Vec<[u8; 3]>) -> Vec<u8> {
    let len = input.len() * 3;
    let capacity = input.capacity() * 3;
    let ptr = input.as_ptr() as *mut u8;
    std::mem::forget(input); // prevent dropping original vec
    unsafe { Vec::from_raw_parts(ptr, len, capacity) }
}

/// Performs fast image remapping using pre-calculated offsets and integer weights.
///
/// This function uses raw pointer access and bit-shifts to achieve high performance.
/// It supports `ImageLuma8` (Mono8), `ImageRgb8`, and `ImageLuma16`.
///
/// # Safety
///
/// This function uses `unsafe` raw pointer access (`get_unchecked`). The `xy_pos_weight_vec` MUST be computed
/// using `compute_for_fast_remap` with the CORRECT `src_width` corresponding to `img`. If the offsets in
/// `xy_pos_weight_vec` are out of bounds for the provided `img` buffer, undefined behavior (likely segfault) will occur.
///
/// # Arguments
///
/// * `img` - The source image.
/// * `new_w_h` - The dimensions (width, height) of the target image.
/// * `xy_pos_weight_vec` - The pre-calculated offsets and weights from `compute_for_fast_remap`.
pub fn fast_remap(
    img: &DynamicImage,
    new_w_h: (u32, u32),
    xy_pos_weight_vec: &[(usize, u16, u16)],
) -> DynamicImage {
    match img {
        DynamicImage::ImageLuma8(image_buffer) => {
            DynamicImage::ImageLuma8(fast_remap_gray_u8(image_buffer, new_w_h, xy_pos_weight_vec))
        }
        DynamicImage::ImageRgb8(image_buffer) => {
            let src = image_buffer.as_raw();
            let src_stride = image_buffer.width() as usize * 3;

            let val: Vec<[u8; 3]> = xy_pos_weight_vec
                .par_iter()
                .map(|&(offset, xw0, yw0)| {
                    let off3 = offset * 3;
                    // For RGB, p00 corresponds to [r, g, b] at offset
                    // We can read each channel.
                    // Or read as chunky.

                    let xw1 = 256 - xw0 as u32;
                    let yw1 = 256 - yw0 as u32;
                    let xw0 = xw0 as u32;
                    let yw0 = yw0 as u32;

                    let w00 = xw0 * yw0;
                    let w10 = xw1 * yw0;
                    let w01 = xw0 * yw1;
                    let w11 = xw1 * yw1;

                    let mut v = [0, 0, 0]; // Initialize with 0s

                    unsafe {
                        // Channel 0
                        let p00 = *src.get_unchecked(off3) as u32;
                        let p10 = *src.get_unchecked(off3 + 3) as u32;
                        let p01 = *src.get_unchecked(off3 + src_stride) as u32;
                        let p11 = *src.get_unchecked(off3 + src_stride + 3) as u32;
                        v[0] = ((p00 * w00 + p10 * w10 + p01 * w01 + p11 * w11) >> 16) as u8;

                        // Channel 1
                        let p00 = *src.get_unchecked(off3 + 1) as u32;
                        let p10 = *src.get_unchecked(off3 + 4) as u32;
                        let p01 = *src.get_unchecked(off3 + src_stride + 1) as u32;
                        let p11 = *src.get_unchecked(off3 + src_stride + 4) as u32;
                        v[1] = ((p00 * w00 + p10 * w10 + p01 * w01 + p11 * w11) >> 16) as u8;

                        // Channel 2
                        let p00 = *src.get_unchecked(off3 + 2) as u32;
                        let p10 = *src.get_unchecked(off3 + 5) as u32;
                        let p01 = *src.get_unchecked(off3 + src_stride + 2) as u32;
                        let p11 = *src.get_unchecked(off3 + src_stride + 5) as u32;
                        v[2] = ((p00 * w00 + p10 * w10 + p01 * w01 + p11 * w11) >> 16) as u8;
                    }
                    v
                })
                .collect();
            let img = ImageBuffer::from_vec(new_w_h.0, new_w_h.1, reinterpret_vec(val)).unwrap();
            DynamicImage::ImageRgb8(img)
        }
        DynamicImage::ImageLuma16(image_buffer) => DynamicImage::ImageLuma16(fast_remap_gray_u16(
            image_buffer,
            new_w_h,
            xy_pos_weight_vec,
        )),
        _ => panic!("Only mono8, mono16, and rgb8 support fast remap."),
    }
}

macro_rules! fast_remap_gray {
    ($name:ident, $ty:ty) => {
        pub fn $name(
            image_buffer: &ImageBuffer<image::Luma<$ty>, Vec<$ty>>,
            new_w_h: (u32, u32),
            xy_pos_weight_vec: &[(usize, u16, u16)],
        ) -> ImageBuffer<image::Luma<$ty>, Vec<$ty>> {
            let src = image_buffer.as_raw();
            let src_stride = image_buffer.width() as usize;

            let val: Vec<$ty> = xy_pos_weight_vec
                .par_iter()
                .map(|&(offset, xw0, yw0)| unsafe {
                    let p00 = *src.get_unchecked(offset) as u32;
                    let p10 = *src.get_unchecked(offset + 1) as u32;
                    let p01 = *src.get_unchecked(offset + src_stride) as u32;
                    let p11 = *src.get_unchecked(offset + src_stride + 1) as u32;

                    let xw0 = xw0 as u32;
                    let yw0 = yw0 as u32;
                    let xw1 = 256 - xw0;
                    let yw1 = 256 - yw0;

                    ((p00 * xw0 * yw0 + p10 * xw1 * yw0 + p01 * xw0 * yw1 + p11 * xw1 * yw1) >> 16)
                        as $ty
                })
                .collect();

            ImageBuffer::from_vec(new_w_h.0, new_w_h.1, val).unwrap()
        }
    };
}

fast_remap_gray!(fast_remap_gray_u8, u8);
fast_remap_gray!(fast_remap_gray_u16, u16);

/// Returns xmap and ymap for remaping
///
/// # Arguments
///
/// * `camera_model` - any camera model
/// * `balance` - [0-1] zero means no black margin
/// * `new_image_w_h` - optional new image width and height, default using the original w, h
///
/// # Examples
///
/// ```
/// use camera_intrinsic_model::*;
/// let model = model_from_json("data/eucm0.json");
/// let new_w_h = 1024;
/// let p = model.estimate_new_camera_matrix_for_undistort(0.0, Some((new_w_h, new_w_h)));
/// let (xmap, ymap) = model.init_undistort_map(&p, (new_w_h, new_w_h), None);
/// ```
pub fn estimate_new_camera_matrix_for_undistort(
    camera_model: &dyn CameraModel<f64>,
    balance: f64,
    new_image_w_h: Option<(u32, u32)>,
) -> na::Matrix3<f64> {
    if !(0.0..=1.0).contains(&balance) {
        panic!("balance should be [0.0-1.0], got {}", balance);
    }
    let params = camera_model.params();
    let cx = params[2];
    let cy = params[3];
    let w = camera_model.width();
    let h = camera_model.height();
    let p2ds = vec![
        na::Vector2::new(cx, 0.0),
        na::Vector2::new(w - 1.0, cy),
        na::Vector2::new(cx, h - 1.0),
        na::Vector2::new(0.0, cy),
    ];
    let undist_pts = camera_model.unproject(&p2ds);
    let undist_pts = undist_pts.iter().map(|p| {
        let p = p.unwrap();
        na::Vector2::new(p.x / p.z, p.y / p.z)
    });
    let mut min_x = f64::MAX;
    let mut min_y = f64::MAX;
    let mut max_x = f64::MIN;
    let mut max_y = f64::MIN;
    for p in undist_pts {
        min_x = min_x.min(p.x);
        min_y = min_y.min(p.y);
        max_x = max_x.max(p.x);
        max_y = max_y.max(p.y);
    }
    min_x = min_x.abs();
    min_y = min_y.abs();
    let (new_w, new_h) = if let Some((new_w, new_h)) = new_image_w_h {
        (new_w, new_h)
    } else {
        (camera_model.width() as u32, camera_model.height() as u32)
    };
    let new_cx = new_w as f64 * min_x / (min_x + max_x);
    let new_cy = new_h as f64 * min_y / (min_y + max_y);
    let fx = new_w as f64 / (min_x + max_x);
    let fy = new_h as f64 / (min_y + max_y);
    let fmin = fx.min(fy);
    let fmax = fx.max(fy);
    let new_f = balance * fmin + (1.0 - balance) * fmax;

    let mut out = na::Matrix3::identity();
    out[(0, 0)] = new_f;
    out[(1, 1)] = new_f;
    out[(0, 2)] = new_cx;
    out[(1, 2)] = new_cy;
    out
}

/// Returns xmap and ymap for remaping
///
/// # Arguments
///
/// * `camera_model` - any camera model
/// * `balance` - [0-1] zero means no black margin
/// * `new_image_w_h` - optional new image width and height, default using the original w, h
///
/// # Examples
///
/// ```
/// use camera_intrinsic_model::*;
/// use nalgebra as na;
/// let model0 = model_from_json("data/eucm0.json");
/// let model1 = model_from_json("data/eucm1.json");
/// let tvec = na::Vector3::new(
///     -0.10098947190325333,
///     -0.0020811599784744455,
///     -0.0012888359197775197,
/// );
/// let quat = na::Quaternion::new(
///     0.9997158799903332,
///     0.02382966001551074,
///     -0.00032454324393309654,
///     0.00044863167728445325,
/// );
/// let rvec = na::UnitQuaternion::from_quaternion(quat).scaled_axis();
/// let (r0, r1, p) = stereo_rectify(&model0, &model1, &rvec, &tvec, None);
/// let image_w_h = (
///     model0.width().round() as u32,
///     model0.height().round() as u32,
/// );
/// let (xmap0, ymap0) = model0.init_undistort_map(&p, image_w_h, Some(r0));
/// let (xmap1, ymap1) = model1.init_undistort_map(&p, image_w_h, Some(r1));
/// ```
pub fn stereo_rectify(
    camera_model0: &GenericModel<f64>,
    camera_model1: &GenericModel<f64>,
    rvec: &na::Vector3<f64>,
    tvec: &na::Vector3<f64>,
    new_image_w_h: Option<(u32, u32)>,
) -> (na::Rotation3<f64>, na::Rotation3<f64>, na::Matrix3<f64>) {
    // compensate for rotation first
    let r_half_inv = -rvec / 2.0;

    // another rotation for compensating the translation
    // use translation to determine x axis rectify or y axis rectify
    let rotated_t = na::Rotation3::from_scaled_axis(r_half_inv) * tvec;
    let idx = if rotated_t.x.abs() > rotated_t.y.abs() {
        0
    } else {
        1
    };
    let axis_to_rectify = rotated_t[idx];
    let translation_norm = rotated_t.norm();
    let mut unit_vector = na::Vector3::zeros();
    if axis_to_rectify > 0.0 {
        unit_vector[idx] = 1.0;
    } else {
        unit_vector[idx] = -1.0;
    }

    let mut axis_for_rotation = rotated_t.cross(&unit_vector);
    let axis_norm = axis_for_rotation.norm();
    let mut rotation_for_compensating_translation = na::Rotation3::identity();
    if axis_norm > 0.0 {
        axis_for_rotation /= axis_norm;
        axis_for_rotation *= (axis_to_rectify.abs() / translation_norm).acos();
        rotation_for_compensating_translation = na::Rotation3::from_scaled_axis(axis_for_rotation);
    }

    let rotation_for_cam0 =
        rotation_for_compensating_translation * na::Rotation3::from_scaled_axis(-r_half_inv);
    let rotation_for_cam1 =
        rotation_for_compensating_translation * na::Rotation3::from_scaled_axis(r_half_inv);

    // new projection matrix
    let new_cam_mat0 = camera_model0.estimate_new_camera_matrix_for_undistort(0.0, new_image_w_h);
    let new_cam_mat1 = camera_model1.estimate_new_camera_matrix_for_undistort(0.0, new_image_w_h);
    let mut avg_new_mat = na::Matrix3::identity();
    let avg_f = (new_cam_mat0[(0, 0)] + new_cam_mat1[(0, 0)]) / 2.0;
    avg_new_mat[(0, 0)] = avg_f;
    avg_new_mat[(1, 1)] = avg_f;
    let avg_cx = (new_cam_mat0[(0, 2)] + new_cam_mat1[(0, 2)]) / 2.0;
    avg_new_mat[(0, 2)] = avg_cx;
    let avg_cy = (new_cam_mat0[(1, 2)] + new_cam_mat1[(1, 2)]) / 2.0;
    avg_new_mat[(1, 2)] = avg_cy;
    (rotation_for_cam0, rotation_for_cam1, avg_new_mat)
}