Skip to main content

scirs2/
vision.rs

1use pyo3::prelude::*;
2use pyo3::types::PyDict;
3use scirs2_core::ndarray::Array2;
4use scirs2_numpy::{IntoPyArray, PyArray2, PyArray3, PyArrayMethods};
5use scirs2_vision::error::VisionError;
6
7// Import vision functions
8use image::{DynamicImage, GrayImage, ImageBuffer, Luma, Rgb};
9use scirs2_vision::feature::canny::{canny, PreprocessMode};
10use scirs2_vision::{
11    bilateral_filter, clahe, detect_and_compute, equalize_histogram, find_homography,
12    gaussian_blur, harris_corners, labels_to_color_image, laplacian_edges, median_filter,
13    normalize_brightness, prewitt_edges, rgb_to_grayscale, rgb_to_hsv, sobel_edges, unsharp_mask,
14    watershed,
15};
16
17// ============================================================================
18// Helper Functions: NumPy ↔ DynamicImage conversion
19// ============================================================================
20
21/// Convert a NumPy array to a grayscale DynamicImage
22/// Expects array with values in [0, 255] range
23fn numpy_to_gray_image(arr: &Bound<'_, PyArray2<u8>>) -> Result<DynamicImage, VisionError> {
24    let binding = arr.readonly();
25    let array = binding.as_array();
26    let (height, width) = array.dim();
27
28    let mut img = GrayImage::new(width as u32, height as u32);
29    for y in 0..height {
30        for x in 0..width {
31            img.put_pixel(x as u32, y as u32, Luma([array[[y, x]]]));
32        }
33    }
34
35    Ok(DynamicImage::ImageLuma8(img))
36}
37
38/// Convert a NumPy array to an RGB DynamicImage
39/// Expects array with shape (height, width, 3) and values in [0, 255] range
40fn numpy_to_rgb_image(arr: &Bound<'_, PyArray3<u8>>) -> Result<DynamicImage, VisionError> {
41    let binding = arr.readonly();
42    let array = binding.as_array();
43    let shape = array.shape();
44
45    if shape.len() != 3 || shape[2] != 3 {
46        return Err(VisionError::InvalidParameter(
47            "Expected array with shape (height, width, 3)".to_string(),
48        ));
49    }
50
51    let height = shape[0];
52    let width = shape[1];
53
54    let mut img = ImageBuffer::new(width as u32, height as u32);
55    for y in 0..height {
56        for x in 0..width {
57            img.put_pixel(
58                x as u32,
59                y as u32,
60                Rgb([array[[y, x, 0]], array[[y, x, 1]], array[[y, x, 2]]]),
61            );
62        }
63    }
64
65    Ok(DynamicImage::ImageRgb8(img))
66}
67
68/// Convert a grayscale DynamicImage to a NumPy array
69fn gray_image_to_numpy(py: Python, img: &DynamicImage) -> Py<PyArray2<u8>> {
70    let gray = img.to_luma8();
71    let (width, height) = gray.dimensions();
72
73    let mut array = Array2::zeros((height as usize, width as usize));
74    for y in 0..height {
75        for x in 0..width {
76            array[[y as usize, x as usize]] = gray.get_pixel(x, y)[0];
77        }
78    }
79
80    array.into_pyarray(py).unbind()
81}
82
83/// Convert an RGB DynamicImage to a NumPy array with shape (height, width, 3)
84fn rgb_image_to_numpy(py: Python, img: &DynamicImage) -> Py<PyArray3<u8>> {
85    let rgb = img.to_rgb8();
86    let (width, height) = rgb.dimensions();
87
88    let mut array = scirs2_core::ndarray::Array3::zeros((height as usize, width as usize, 3));
89    for y in 0..height {
90        for x in 0..width {
91            let pixel = rgb.get_pixel(x, y);
92            array[[y as usize, x as usize, 0]] = pixel[0];
93            array[[y as usize, x as usize, 1]] = pixel[1];
94            array[[y as usize, x as usize, 2]] = pixel[2];
95        }
96    }
97
98    array.into_pyarray(py).unbind()
99}
100
101// ============================================================================
102// Preprocessing Functions
103// ============================================================================
104
105/// Apply bilateral filtering for edge-preserving noise reduction
106///
107/// Bilateral filtering smooths images while preserving edges by considering
108/// both spatial distance and intensity difference between pixels.
109///
110/// Args:
111///     image (np.ndarray): Input grayscale image (2D uint8 array)
112///     diameter (int): Diameter of pixel neighborhood (must be positive odd integer)
113///     sigma_space (float): Standard deviation for spatial Gaussian kernel
114///     sigma_color (float): Standard deviation for color/range Gaussian kernel
115///
116/// Returns:
117///     np.ndarray: Filtered grayscale image (2D uint8 array)
118#[pyfunction]
119#[pyo3(signature = (image, diameter, sigma_space, sigma_color))]
120fn bilateral_filter_py(
121    py: Python,
122    image: &Bound<'_, PyArray2<u8>>,
123    diameter: u32,
124    sigma_space: f32,
125    sigma_color: f32,
126) -> PyResult<Py<PyArray2<u8>>> {
127    let img = numpy_to_gray_image(image).map_err(|e| {
128        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
129    })?;
130
131    let filtered = bilateral_filter(&img, diameter, sigma_space, sigma_color).map_err(|e| {
132        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Bilateral filter error: {}", e))
133    })?;
134
135    Ok(gray_image_to_numpy(py, &filtered))
136}
137
138/// Apply Gaussian blur to reduce noise
139///
140/// Args:
141///     image (np.ndarray): Input grayscale image (2D uint8 array)
142///     sigma (float): Standard deviation of Gaussian kernel (must be positive)
143///
144/// Returns:
145///     np.ndarray: Blurred grayscale image (2D uint8 array)
146#[pyfunction]
147#[pyo3(signature = (image, sigma))]
148fn gaussian_blur_py(
149    py: Python,
150    image: &Bound<'_, PyArray2<u8>>,
151    sigma: f32,
152) -> PyResult<Py<PyArray2<u8>>> {
153    let img = numpy_to_gray_image(image).map_err(|e| {
154        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
155    })?;
156
157    let blurred = gaussian_blur(&img, sigma).map_err(|e| {
158        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Gaussian blur error: {}", e))
159    })?;
160
161    Ok(gray_image_to_numpy(py, &blurred))
162}
163
164/// Apply median filtering to remove salt-and-pepper noise
165///
166/// Args:
167///     image (np.ndarray): Input grayscale image (2D uint8 array)
168///     kernel_size (int): Size of square kernel (must be positive odd integer)
169///
170/// Returns:
171///     np.ndarray: Filtered grayscale image (2D uint8 array)
172#[pyfunction]
173#[pyo3(signature = (image, kernel_size))]
174fn median_filter_py(
175    py: Python,
176    image: &Bound<'_, PyArray2<u8>>,
177    kernel_size: u32,
178) -> PyResult<Py<PyArray2<u8>>> {
179    let img = numpy_to_gray_image(image).map_err(|e| {
180        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
181    })?;
182
183    let filtered = median_filter(&img, kernel_size).map_err(|e| {
184        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Median filter error: {}", e))
185    })?;
186
187    Ok(gray_image_to_numpy(py, &filtered))
188}
189
190/// Apply Contrast Limited Adaptive Histogram Equalization (CLAHE)
191///
192/// Args:
193///     image (np.ndarray): Input grayscale image (2D uint8 array)
194///     tile_size (int): Size of grid tiles (typically 8)
195///     clip_limit (float): Threshold for contrast limiting (1.0-4.0 typical)
196///
197/// Returns:
198///     np.ndarray: Contrast-enhanced grayscale image (2D uint8 array)
199#[pyfunction]
200#[pyo3(signature = (image, tile_size=8, clip_limit=2.0))]
201fn clahe_py(
202    py: Python,
203    image: &Bound<'_, PyArray2<u8>>,
204    tile_size: u32,
205    clip_limit: f32,
206) -> PyResult<Py<PyArray2<u8>>> {
207    let img = numpy_to_gray_image(image).map_err(|e| {
208        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
209    })?;
210
211    let enhanced = clahe(&img, tile_size, clip_limit).map_err(|e| {
212        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("CLAHE error: {}", e))
213    })?;
214
215    Ok(gray_image_to_numpy(py, &enhanced))
216}
217
218/// Apply histogram equalization to enhance contrast
219///
220/// Args:
221///     image (np.ndarray): Input grayscale image (2D uint8 array)
222///
223/// Returns:
224///     np.ndarray: Contrast-enhanced grayscale image (2D uint8 array)
225#[pyfunction]
226fn equalize_histogram_py(
227    py: Python,
228    image: &Bound<'_, PyArray2<u8>>,
229) -> PyResult<Py<PyArray2<u8>>> {
230    let img = numpy_to_gray_image(image).map_err(|e| {
231        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
232    })?;
233
234    let equalized = equalize_histogram(&img).map_err(|e| {
235        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!(
236            "Histogram equalization error: {}",
237            e
238        ))
239    })?;
240
241    Ok(gray_image_to_numpy(py, &equalized))
242}
243
244/// Normalize image brightness and contrast
245///
246/// Args:
247///     image (np.ndarray): Input grayscale image (2D uint8 array)
248///     min_out (float): Minimum output intensity (0.0 to 1.0)
249///     max_out (float): Maximum output intensity (0.0 to 1.0)
250///
251/// Returns:
252///     np.ndarray: Normalized grayscale image (2D uint8 array)
253#[pyfunction]
254#[pyo3(signature = (image, min_out=0.0, max_out=1.0))]
255fn normalize_brightness_py(
256    py: Python,
257    image: &Bound<'_, PyArray2<u8>>,
258    min_out: f32,
259    max_out: f32,
260) -> PyResult<Py<PyArray2<u8>>> {
261    let img = numpy_to_gray_image(image).map_err(|e| {
262        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
263    })?;
264
265    let normalized = normalize_brightness(&img, min_out, max_out).map_err(|e| {
266        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!(
267            "Normalize brightness error: {}",
268            e
269        ))
270    })?;
271
272    Ok(gray_image_to_numpy(py, &normalized))
273}
274
275/// Apply unsharp masking to enhance edges
276///
277/// Args:
278///     image (np.ndarray): Input grayscale image (2D uint8 array)
279///     sigma (float): Standard deviation of Gaussian blur
280///     amount (float): Strength of sharpening (typically 0.5 to 5.0)
281///
282/// Returns:
283///     np.ndarray: Sharpened grayscale image (2D uint8 array)
284#[pyfunction]
285#[pyo3(signature = (image, sigma=1.0, amount=1.0))]
286fn unsharp_mask_py(
287    py: Python,
288    image: &Bound<'_, PyArray2<u8>>,
289    sigma: f32,
290    amount: f32,
291) -> PyResult<Py<PyArray2<u8>>> {
292    let img = numpy_to_gray_image(image).map_err(|e| {
293        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
294    })?;
295
296    let sharpened = unsharp_mask(&img, sigma, amount).map_err(|e| {
297        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Unsharp mask error: {}", e))
298    })?;
299
300    Ok(gray_image_to_numpy(py, &sharpened))
301}
302
303// ============================================================================
304// Color Conversion Functions
305// ============================================================================
306
307/// Convert RGB image to grayscale
308///
309/// Args:
310///     image (np.ndarray): Input RGB image (3D uint8 array with shape (H, W, 3))
311///     weights (Optional[list]): Custom RGB weights as [r_weight, g_weight, b_weight] (default: None for standard conversion)
312///
313/// Returns:
314///     np.ndarray: Grayscale image (2D uint8 array)
315#[pyfunction]
316#[pyo3(signature = (image, weights=None))]
317fn rgb_to_grayscale_py(
318    py: Python,
319    image: &Bound<'_, PyArray3<u8>>,
320    weights: Option<[f32; 3]>,
321) -> PyResult<Py<PyArray2<u8>>> {
322    let img = numpy_to_rgb_image(image).map_err(|e| {
323        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
324    })?;
325
326    let gray = rgb_to_grayscale(&img, weights).map_err(|e| {
327        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("RGB to grayscale error: {}", e))
328    })?;
329
330    Ok(gray_image_to_numpy(py, &gray))
331}
332
333/// Convert RGB image to HSV color space
334///
335/// Args:
336///     image (np.ndarray): Input RGB image (3D uint8 array with shape (H, W, 3))
337///
338/// Returns:
339///     np.ndarray: HSV image (3D uint8 array with shape (H, W, 3))
340#[pyfunction]
341fn rgb_to_hsv_py(py: Python, image: &Bound<'_, PyArray3<u8>>) -> PyResult<Py<PyArray3<u8>>> {
342    let img = numpy_to_rgb_image(image).map_err(|e| {
343        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
344    })?;
345
346    let hsv = rgb_to_hsv(&img).map_err(|e| {
347        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("RGB to HSV error: {}", e))
348    })?;
349
350    Ok(rgb_image_to_numpy(py, &hsv))
351}
352
353// ============================================================================
354// Edge Detection Functions
355// ============================================================================
356
357/// Detect edges using Sobel operator
358///
359/// Args:
360///     image (np.ndarray): Input grayscale image (2D uint8 array)
361///     threshold (float): Edge detection threshold (0.0 to 1.0)
362///
363/// Returns:
364///     np.ndarray: Edge map (2D uint8 array)
365#[pyfunction]
366#[pyo3(signature = (image, threshold=0.1))]
367fn sobel_edges_py(
368    py: Python,
369    image: &Bound<'_, PyArray2<u8>>,
370    threshold: f32,
371) -> PyResult<Py<PyArray2<u8>>> {
372    let img = numpy_to_gray_image(image).map_err(|e| {
373        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
374    })?;
375
376    let edges = sobel_edges(&img, threshold).map_err(|e| {
377        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Sobel edges error: {}", e))
378    })?;
379
380    Ok(gray_image_to_numpy(py, &DynamicImage::ImageLuma8(edges)))
381}
382
383/// Detect edges using Canny edge detector
384///
385/// Args:
386///     image (np.ndarray): Input grayscale image (2D uint8 array)
387///     sigma (float): Gaussian blur sigma (default: 1.4)
388///     low_threshold (float): Low threshold for edge detection (0.0 to 1.0)
389///     high_threshold (float): High threshold for edge detection (0.0 to 1.0)
390///
391/// Returns:
392///     np.ndarray: Edge map (2D uint8 array)
393#[pyfunction]
394#[pyo3(signature = (image, sigma=1.4, low_threshold=0.05, high_threshold=0.15))]
395fn canny_edges_py(
396    py: Python,
397    image: &Bound<'_, PyArray2<u8>>,
398    sigma: f32,
399    low_threshold: f32,
400    high_threshold: f32,
401) -> PyResult<Py<PyArray2<u8>>> {
402    let img = numpy_to_gray_image(image).map_err(|e| {
403        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
404    })?;
405
406    let edges = canny(
407        &img,
408        sigma,
409        Some(low_threshold),
410        Some(high_threshold),
411        None,
412        false,
413        PreprocessMode::Reflect,
414    )
415    .map_err(|e| {
416        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Canny edges error: {}", e))
417    })?;
418
419    Ok(gray_image_to_numpy(py, &DynamicImage::ImageLuma8(edges)))
420}
421
422/// Detect edges using Prewitt operator
423///
424/// Args:
425///     image (np.ndarray): Input grayscale image (2D uint8 array)
426///     threshold (float): Edge detection threshold (0.0 to 1.0)
427///
428/// Returns:
429///     np.ndarray: Edge map (2D uint8 array)
430#[pyfunction]
431#[pyo3(signature = (image, threshold=0.1))]
432fn prewitt_edges_py(
433    py: Python,
434    image: &Bound<'_, PyArray2<u8>>,
435    threshold: f32,
436) -> PyResult<Py<PyArray2<u8>>> {
437    let img = numpy_to_gray_image(image).map_err(|e| {
438        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
439    })?;
440
441    let edges = prewitt_edges(&img, threshold).map_err(|e| {
442        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Prewitt edges error: {}", e))
443    })?;
444
445    Ok(gray_image_to_numpy(py, &DynamicImage::ImageLuma8(edges)))
446}
447
448/// Detect edges using Laplacian operator
449///
450/// Args:
451///     image (np.ndarray): Input grayscale image (2D uint8 array)
452///     threshold (float): Edge detection threshold (0.0 to 1.0)
453///     use_diagonal (bool): Whether to include diagonal neighbors in Laplacian kernel
454///
455/// Returns:
456///     np.ndarray: Edge map (2D uint8 array)
457#[pyfunction]
458#[pyo3(signature = (image, threshold=0.1, use_diagonal=true))]
459fn laplacian_edges_py(
460    py: Python,
461    image: &Bound<'_, PyArray2<u8>>,
462    threshold: f32,
463    use_diagonal: bool,
464) -> PyResult<Py<PyArray2<u8>>> {
465    let img = numpy_to_gray_image(image).map_err(|e| {
466        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
467    })?;
468
469    let edges = laplacian_edges(&img, threshold, use_diagonal).map_err(|e| {
470        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Laplacian edges error: {}", e))
471    })?;
472
473    Ok(gray_image_to_numpy(py, &DynamicImage::ImageLuma8(edges)))
474}
475
476// ============================================================================
477// Feature Detection Functions
478// ============================================================================
479
480/// Detect Harris corners
481///
482/// Args:
483///     image (np.ndarray): Input grayscale image (2D uint8 array)
484///     block_size (int): Size of block for corner detection (must be odd, typically 3 or 5)
485///     k (float): Harris detector parameter (typically 0.04 to 0.06)
486///     threshold (float): Corner detection threshold
487///
488/// Returns:
489///     np.ndarray: Image with corners marked (2D uint8 array)
490#[pyfunction]
491#[pyo3(signature = (image, block_size=3, k=0.04, threshold=100.0))]
492fn harris_corners_py(
493    py: Python,
494    image: &Bound<'_, PyArray2<u8>>,
495    block_size: usize,
496    k: f32,
497    threshold: f32,
498) -> PyResult<Py<PyArray2<u8>>> {
499    let img = numpy_to_gray_image(image).map_err(|e| {
500        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
501    })?;
502
503    let corners_img = harris_corners(&img, block_size, k, threshold).map_err(|e| {
504        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Harris corners error: {}", e))
505    })?;
506
507    // Convert GrayImage to numpy array
508    let (width, height) = corners_img.dimensions();
509    let mut array = Array2::zeros((height as usize, width as usize));
510    for y in 0..height {
511        for x in 0..width {
512            array[[y as usize, x as usize]] = corners_img.get_pixel(x, y)[0];
513        }
514    }
515
516    Ok(array.into_pyarray(py).unbind())
517}
518
519/// Detect SIFT keypoints and compute descriptors
520///
521/// Args:
522///     image (np.ndarray): Input grayscale image (2D uint8 array)
523///     max_features (int): Maximum number of features to detect
524///     contrast_threshold (float): Contrast threshold for feature detection
525///
526/// Returns:
527///     list: List of dictionaries with keypoint information (x, y, scale, orientation, descriptor)
528#[pyfunction]
529#[pyo3(signature = (image, max_features=500, contrast_threshold=0.03))]
530fn detect_and_compute_sift_py(
531    py: Python,
532    image: &Bound<'_, PyArray2<u8>>,
533    max_features: usize,
534    contrast_threshold: f32,
535) -> PyResult<Vec<Py<PyDict>>> {
536    let img = numpy_to_gray_image(image).map_err(|e| {
537        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
538    })?;
539
540    let descriptors = detect_and_compute(&img, max_features, contrast_threshold).map_err(|e| {
541        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("SIFT error: {}", e))
542    })?;
543
544    // Convert descriptors to Python dictionaries
545    let mut result = Vec::new();
546    for desc in descriptors {
547        let dict = PyDict::new(py);
548        dict.set_item("x", desc.keypoint.x)?;
549        dict.set_item("y", desc.keypoint.y)?;
550        dict.set_item("scale", desc.keypoint.scale)?;
551        dict.set_item("orientation", desc.keypoint.orientation)?;
552        dict.set_item("descriptor", desc.vector.into_pyarray(py))?;
553        result.push(dict.into());
554    }
555
556    Ok(result)
557}
558
559// ============================================================================
560// Image Segmentation Functions
561// ============================================================================
562
563/// Perform watershed segmentation
564///
565/// Args:
566///     image (np.ndarray): Input grayscale image (2D uint8 array)
567///     markers (Optional[np.ndarray]): Optional marker image (2D uint32 array)
568///     connectivity (int): Pixel connectivity (4 or 8)
569///
570/// Returns:
571///     np.ndarray: Segmented labels (2D uint32 array)
572#[pyfunction]
573#[pyo3(signature = (image, markers=None, connectivity=8))]
574fn watershed_py(
575    py: Python,
576    image: &Bound<'_, PyArray2<u8>>,
577    markers: Option<&Bound<'_, PyArray2<u32>>>,
578    connectivity: u8,
579) -> PyResult<Py<PyArray2<u32>>> {
580    let img = numpy_to_gray_image(image).map_err(|e| {
581        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Image conversion error: {}", e))
582    })?;
583
584    let marker_array =
585        markers.map(|m: &Bound<'_, PyArray2<u32>>| m.readonly().as_array().to_owned());
586
587    let labels = watershed(&img, marker_array.as_ref(), connectivity).map_err(|e| {
588        PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!("Watershed error: {}", e))
589    })?;
590
591    Ok(labels.into_pyarray(py).unbind())
592}
593
594/// Convert segmentation labels to color image for visualization
595///
596/// Args:
597///     labels (np.ndarray): Segmentation labels (2D uint32 array)
598///
599/// Returns:
600///     np.ndarray: Color image (3D uint8 array with shape (H, W, 3))
601#[pyfunction]
602fn labels_to_color_image_py(
603    py: Python,
604    labels: &Bound<'_, PyArray2<u32>>,
605) -> PyResult<Py<PyArray3<u8>>> {
606    let label_array = labels.readonly().as_array().to_owned();
607
608    // labels_to_color_image expects Array2<u32> and returns RgbImage directly (not Result)
609    let color_img = labels_to_color_image(&label_array, None);
610
611    Ok(rgb_image_to_numpy(py, &DynamicImage::ImageRgb8(color_img)))
612}
613
614// ============================================================================
615// Geometric Transform Functions
616// ============================================================================
617
618/// Find homography matrix from point correspondences using RANSAC
619///
620/// Args:
621///     src_points (list): List of source points as (x, y) tuples
622///     dst_points (list): List of destination points as (x, y) tuples
623///     threshold (float): RANSAC inlier threshold
624///     confidence (float): RANSAC confidence level (0.0 to 1.0)
625///
626/// Returns:
627///     tuple: (homography_matrix, inlier_mask)
628///         - homography_matrix: 3x3 numpy array
629///         - inlier_mask: list of booleans indicating inliers
630#[pyfunction]
631#[pyo3(signature = (src_points, dst_points, threshold=3.0, confidence=0.99))]
632fn find_homography_py(
633    py: Python<'_>,
634    src_points: Vec<(f64, f64)>,
635    dst_points: Vec<(f64, f64)>,
636    threshold: f64,
637    confidence: f64,
638) -> PyResult<(Py<PyArray2<f64>>, Vec<bool>)> {
639    let (h, inliers) =
640        find_homography(&src_points, &dst_points, threshold, confidence).map_err(|e| {
641            PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(format!(
642                "Find homography error: {}",
643                e
644            ))
645        })?;
646
647    // h is a Homography struct with a matrix field
648    Ok((h.matrix.into_pyarray(py).unbind(), inliers))
649}
650
651// ============================================================================
652// Module Registration
653// ============================================================================
654
655pub fn register_module(m: &Bound<'_, PyModule>) -> PyResult<()> {
656    // Preprocessing functions
657    m.add_function(wrap_pyfunction!(bilateral_filter_py, m)?)?;
658    m.add_function(wrap_pyfunction!(gaussian_blur_py, m)?)?;
659    m.add_function(wrap_pyfunction!(median_filter_py, m)?)?;
660    m.add_function(wrap_pyfunction!(clahe_py, m)?)?;
661    m.add_function(wrap_pyfunction!(equalize_histogram_py, m)?)?;
662    m.add_function(wrap_pyfunction!(normalize_brightness_py, m)?)?;
663    m.add_function(wrap_pyfunction!(unsharp_mask_py, m)?)?;
664
665    // Color conversion functions
666    m.add_function(wrap_pyfunction!(rgb_to_grayscale_py, m)?)?;
667    m.add_function(wrap_pyfunction!(rgb_to_hsv_py, m)?)?;
668
669    // Edge detection functions
670    m.add_function(wrap_pyfunction!(sobel_edges_py, m)?)?;
671    m.add_function(wrap_pyfunction!(canny_edges_py, m)?)?;
672    m.add_function(wrap_pyfunction!(prewitt_edges_py, m)?)?;
673    m.add_function(wrap_pyfunction!(laplacian_edges_py, m)?)?;
674
675    // Feature detection functions
676    m.add_function(wrap_pyfunction!(harris_corners_py, m)?)?;
677    m.add_function(wrap_pyfunction!(detect_and_compute_sift_py, m)?)?;
678
679    // Segmentation functions
680    m.add_function(wrap_pyfunction!(watershed_py, m)?)?;
681    m.add_function(wrap_pyfunction!(labels_to_color_image_py, m)?)?;
682
683    // Geometric transform functions
684    m.add_function(wrap_pyfunction!(find_homography_py, m)?)?;
685
686    Ok(())
687}