//! SciRS2 NDImage Tutorial Guide - Step-by-Step Learning
//!
//! This tutorial provides a gentle introduction to image processing with scirs2-ndimage.
//! Perfect for beginners who want to learn image processing concepts while using this library.
//!
//! ## Tutorial Structure:
//! 1. Getting Started - Basic concepts and setup
//! 2. Working with Images - Loading, creating, and viewing images
//! 3. Image Enhancement - Filters and noise reduction
//! 4. Edge Detection - Finding boundaries and features
//! 5. Shape Analysis - Morphological operations
//! 6. Object Detection - Finding and measuring objects
//! 7. Image Transformations - Rotating, scaling, and warping
//! 8. Advanced Techniques - Domain-specific applications
//!
//! Run this example to see step-by-step tutorials with explanations.
use ndarray::{Array2, Array3, ArrayView2, Axis};
use scirs2_ndimage::{
error::NdimageResult, features::*, filters::*, interpolation::*, measurements::*,
morphology::*, segmentation::*,
};
#[allow(dead_code)]
fn main() -> NdimageResult<()> {
println!("π SciRS2 NDImage Tutorial Guide");
println!("================================\n");
// Tutorial 1: Getting Started
println!("π TUTORIAL 1: Getting Started with Image Processing");
tutorial_1_getting_started()?;
// Tutorial 2: Working with Images
println!("\nπΈ TUTORIAL 2: Working with Images");
tutorial_2_working_withimages()?;
// Tutorial 3: Image Enhancement
println!("\n⨠TUTORIAL 3: Image Enhancement and Noise Reduction");
tutorial_3image_enhancement()?;
// Tutorial 4: Edge Detection
println!("\nπ TUTORIAL 4: Edge Detection and Feature Finding");
tutorial_4_edge_detection()?;
// Tutorial 5: Shape Analysis
println!("\n㪠TUTORIAL 5: Shape Analysis with Morphology");
tutorial_5shape_analysis()?;
// Tutorial 6: Object Detection
println!("\nπ― TUTORIAL 6: Object Detection and Measurement");
tutorial_6_object_detection()?;
// Tutorial 7: Image Transformations
println!("\nπ TUTORIAL 7: Image Transformations");
tutorial_7_transformations()?;
// Tutorial 8: Advanced Techniques
println!("\nπ TUTORIAL 8: Advanced Techniques");
tutorial_8_advanced_techniques()?;
println!("\nπ Tutorial Complete!");
println!("Next steps:");
println!("- Explore the comprehensive_documentation.rs example for advanced features");
println!("- Try the domain-specific examples for your field of interest");
println!("- Read the API documentation for detailed parameter descriptions");
Ok(())
}
#[allow(dead_code)]
fn tutorial_1_getting_started() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Welcome to image processing with SciRS2!");
println!("In this library, images are represented as multidimensional arrays (ndarray).");
println!();
// Concept 1: What is a digital image?
println!("πΌοΈ CONCEPT: What is a digital image?");
println!("A digital image is a 2D array of numbers called pixels.");
println!("Each pixel represents the intensity or color at that location.");
println!();
// Create a simple image to demonstrate
println!("Let's create a simple 8x8 grayscale image:");
let simpleimage = Array2::fromshape_fn((8, 8), |(i, j)| {
// Create a checkerboard pattern
if (i + j) % 2 == 0 {
1.0
} else {
0.0
}
});
println!("Image shape: {:?}", simpleimage.dim());
println!("Image data type: f64 (64-bit floating point)");
println!("Pixel values range from 0.0 (black) to 1.0 (white)");
println!();
// Display part of the image as text
println!("Checkerboard pattern (first 4x4 corner):");
for i in 0..4 {
for j in 0..4 {
print!("{:.0} ", simpleimage[[i, j]]);
}
println!();
}
println!();
// Concept 2: Color images
println!("π CONCEPT: Color images");
println!("Color images have multiple channels (typically RGB).");
println!("They are represented as 3D arrays: (height, width, channels)");
let colorimage = Array3::fromshape_fn((4, 4, 3), |(i, j, c)| {
match c {
0 => {
if i < 2 {
1.0
} else {
0.0
}
} // Red channel
1 => {
if j < 2 {
1.0
} else {
0.0
}
} // Green channel
2 => {
if (i + j) % 2 == 0 {
1.0
} else {
0.0
}
} // Blue channel
_ => 0.0,
}
});
println!("Color image shape: {:?}", colorimage.dim());
println!("(height, width, channels) = (4, 4, 3)");
println!();
// Key takeaways
println!("π― KEY TAKEAWAYS:");
println!("β’ Images are arrays of numbers");
println!("β’ Grayscale: 2D array (height, width)");
println!("β’ Color: 3D array (height, width, channels)");
println!("β’ Pixel values typically range 0.0-1.0 or 0-255");
println!("β’ Higher dimensions possible for scientific imaging");
Ok(())
}
#[allow(dead_code)]
fn tutorial_2_working_withimages() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Learn how to create, modify, and inspect images.");
println!();
// Creating test images
println!("π οΈ CREATING TEST IMAGES");
println!();
// Example 1: Solid color image
println!("1. Creating a solid gray image:");
let grayimage = Array2::from_elem((50, 50), 0.5);
println!(
" Shape: {:?}, all pixels = 0.5 (middle gray)",
grayimage.dim()
);
println!();
// Example 2: Gradient image
println!("2. Creating a gradient image:");
let gradient = Array2::fromshape_fn((50, 50), |(i, j)| {
i as f64 / 49.0 // Vertical gradient from 0 to 1
});
println!(" Vertical gradient from black (top) to white (bottom)");
println!();
// Example 3: Geometric shapes
println!("3. Creating geometric shapes:");
let circleimage = create_circleimage(100, 100, 30.0);
println!(" Circle image: 100x100 with radius 30");
let squareimage = create_squareimage(80, 80, 20);
println!(" Square image: 80x80 with 20x20 square in center");
println!();
// Inspecting images
println!("π INSPECTING IMAGES");
println!();
println!("Image statistics for circle:");
let stats = computeimage_stats(&circleimage);
println!(
" Min: {:.3}, Max: {:.3}, Mean: {:.3}",
stats.0, stats.1, stats.2
);
println!();
// Modifying images
println!("π¨ MODIFYING IMAGES");
println!();
println!("1. Brightness adjustment (add constant):");
let brighter = &circleimage + 0.2;
let brighter_stats = computeimage_stats(&brighter);
println!(" New mean: {:.3} (was {:.3})", brighter_stats.2, stats.2);
println!();
println!("2. Contrast adjustment (multiply):");
let higher_contrast = &circleimage * 1.5;
let contrast_stats = computeimage_stats(&higher_contrast);
println!(" Higher contrast - wider range of values");
println!();
println!("3. Combining images:");
let combined = (&circleimage + &squareimage.view().into_dimensionality().unwrap()) / 2.0;
println!(" Average of circle and square images");
println!();
// Key takeaways
println!("π― KEY TAKEAWAYS:");
println!("β’ Use Array2::fromshape_fn() for procedural image generation");
println!("β’ Use Array2::from_elem() for solid colors");
println!("β’ Basic math operations work element-wise on images");
println!("β’ Always check image statistics to understand your data");
println!("β’ Shape and data type matter for processing operations");
Ok(())
}
#[allow(dead_code)]
fn tutorial_3image_enhancement() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Learn to improve image quality by reducing noise and enhancing details.");
println!();
// Create a noisy test image
let cleanimage = create_circleimage(64, 64, 20.0);
let noisyimage = add_realistic_noise(&cleanimage, 0.1);
println!("π NOISE AND WHY WE NEED FILTERS");
println!("Real images often contain noise from sensors, transmission, etc.");
let clean_stats = computeimage_stats(&cleanimage);
let noisy_stats = computeimage_stats(&noisyimage);
println!("Clean image mean: {:.3}", clean_stats.2);
println!("Noisy image mean: {:.3}", noisy_stats.2);
println!();
// Gaussian filter for noise reduction
println!("1οΈβ£ GAUSSIAN FILTER - Smooth noise reduction");
println!("The Gaussian filter reduces noise by averaging nearby pixels.");
println!("Larger sigma = more smoothing, but also more blur.");
println!();
let sigma_small = 1.0;
let gaussian_light =
gaussian_filter(&noisyimage, &[sigma_small, sigma_small], None, None, None)?;
let stats_light = computeimage_stats(&gaussian_light);
println!(
" Light smoothing (sigma={}): mean={:.3}",
sigma_small, stats_light.2
);
let sigma_large = 3.0;
let gaussian_heavy =
gaussian_filter(&noisyimage, &[sigma_large, sigma_large], None, None, None)?;
let stats_heavy = computeimage_stats(&gaussian_heavy);
println!(
" Heavy smoothing (sigma={}): mean={:.3}",
sigma_large, stats_heavy.2
);
println!();
// Median filter for salt-and-pepper noise
println!("2οΈβ£ MEDIAN FILTER - Remove outliers");
println!("The median filter is excellent for 'salt and pepper' noise.");
println!("It replaces each pixel with the median of its neighborhood.");
println!();
let median_filtered = median_filter(&noisyimage.view(), Some(&[3, 3]), None, None)?;
let median_stats = computeimage_stats(&median_filtered);
println!(" Median filter (3x3): mean={:.3}", median_stats.2);
println!(" Better at preserving edges than Gaussian filter");
println!();
// Bilateral filter for edge-preserving smoothing
println!("3οΈβ£ BILATERAL FILTER - Smart smoothing");
println!("Bilateral filter smooths noise but preserves edges.");
println!("It considers both spatial distance AND intensity difference.");
println!();
let bilateral_filtered = bilateral_filter(noisyimage.view(), 2.0, 0.1, Some(5))?;
let bilateral_stats = computeimage_stats(&bilateral_filtered);
println!(" Bilateral filter: mean={:.3}", bilateral_stats.2);
println!(" Preserves edges while reducing noise");
println!();
// Comparison and recommendations
println!("π FILTER COMPARISON");
println!("βββββββββββββββββββ¬βββββββββββββββββ¬ββββββββββββββββββββ");
println!("β Filter Type β Best For β Preserves Edges β");
println!("βββββββββββββββββββΌβββββββββββββββββΌββββββββββββββββββββ€");
println!("β Gaussian β General noise β No β");
println!("β Median β Salt & pepper β Better β");
println!("β Bilateral β Smart smoothingβ Yes β");
println!("βββββββββββββββββββ΄βββββββββββββββββ΄ββββββββββββββββββββ");
println!();
// Practical tips
println!("π‘ PRACTICAL TIPS:");
println!("β’ Start with light filtering, increase if needed");
println!("β’ Gaussian: sigma = 0.5-2.0 for light, 2.0-5.0 for heavy");
println!("β’ Median: 3x3 or 5x5 kernel sizes work well");
println!("β’ Bilateral: adjust intensity threshold for your image type");
println!("β’ Always compare before/after statistics");
Ok(())
}
#[allow(dead_code)]
fn tutorial_4_edge_detection() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Learn to find edges and boundaries in images - fundamental for computer vision.");
println!();
// Create an image with clear edges
let edge_testimage = create_edge_testimage(80, 80);
println!("π WHAT ARE EDGES?");
println!("Edges are locations where image intensity changes rapidly.");
println!("They correspond to object boundaries, texture changes, or lighting changes.");
println!();
// Gradient-based edge detection
println!("1οΈβ£ SOBEL EDGE DETECTION");
println!("Sobel filter detects edges by computing image gradients.");
println!("It uses convolution with special kernels to find intensity changes.");
println!();
let sobel_edges = sobel(&edge_testimage.view(), None, None, None)?;
let sobel_stats = computeimage_stats(&sobel_edges);
println!(
" Sobel result: {} edges detected (mean gradient: {:.3})",
count_edge_pixels(&sobel_edges, 0.1),
sobel_stats.2
);
println!(" Good for general edge detection");
println!();
// Laplacian edge detection
println!("2οΈβ£ LAPLACIAN EDGE DETECTION");
println!("Laplacian detects edges using the second derivative of intensity.");
println!("It finds zero-crossings which correspond to edge locations.");
println!();
let laplacian_edges = laplace(&edge_testimage.view(), None, None)?;
let laplacian_stats = computeimage_stats(&laplacian_edges);
println!(" Laplacian result: mean={:.3}", laplacian_stats.2);
println!(" More sensitive to noise, but can find thin edges");
println!();
// Canny edge detection
println!("3οΈβ£ CANNY EDGE DETECTION - The Gold Standard");
println!("Canny is a multi-step algorithm that produces clean, thin edges:");
println!(" 1. Gaussian smoothing (noise reduction)");
println!(" 2. Gradient computation");
println!(" 3. Non-maximum suppression (thin edges)");
println!(" 4. Double thresholding (strong/weak edges)");
println!(" 5. Edge tracking by hysteresis");
println!();
let canny_edges = canny(edge_testimage.view(), 1.0, 0.1, 0.3, None)?;
let canny_stats = computeimage_stats(&canny_edges);
println!(
" Canny result: {} strong edges detected",
count_edge_pixels(&canny_edges, 0.5)
);
println!(" Produces clean, connected edge contours");
println!();
// Advanced edge detection
println!("4οΈβ£ ADVANCED TECHNIQUES");
println!();
// Prewitt operator
let prewitt_edges = prewitt(&edge_testimage.view(), None, None, None)?;
println!(" Prewitt: Similar to Sobel, different kernel weights");
// Roberts cross-gradient
let roberts_edges = roberts_cross_gradient(&edge_testimage.view())?;
println!(" Roberts: Fast, simple 2x2 gradient operator");
println!();
// Parameter tuning guidance
println!("ποΈ PARAMETER TUNING GUIDE");
println!();
println!("Canny Parameters:");
println!("β’ sigma (smoothing): 0.5-2.0 (higher = less noise, fewer edges)");
println!("β’ lowthreshold: 0.05-0.2 (lower = more weak edges)");
println!("β’ highthreshold: 0.2-0.5 (lower = more strong edges)");
println!();
println!("General Guidelines:");
println!("β’ Start with Canny for best results");
println!("β’ Use Sobel for speed when quality is less critical");
println!("β’ Adjust thresholds based on your image contrast");
println!("β’ Pre-filter noisy images before edge detection");
println!();
// Practical applications
println!("π§ PRACTICAL APPLICATIONS:");
println!("β’ Object detection and recognition");
println!("β’ Medical image analysis (organ boundaries)");
println!("β’ Manufacturing quality control");
println!("β’ Autonomous vehicle navigation");
println!("β’ Document processing (text detection)");
Ok(())
}
#[allow(dead_code)]
fn tutorial_5shape_analysis() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Learn morphological operations to analyze and modify object shapes.");
println!();
// Create test images with different shapes
let binary_image = create_binaryshapesimage(80, 80);
println!("π¬ WHAT IS MATHEMATICAL MORPHOLOGY?");
println!("Morphology studies shapes using set theory operations.");
println!("It uses a 'structuring element' (small shape) to probe the image.");
println!("Binary morphology works on black/white images (0s and 1s).");
println!();
// Create structuring elements
println!("π§ STRUCTURING ELEMENTS");
let cross_3x3 = generate_binary_structure(3, 1)?;
let square_3x3 = generate_binary_structure(3, 2)?;
println!(" Cross 3x3: connectivity-1 (4-connected neighbors)");
println!(" Square 3x3: connectivity-2 (8-connected neighbors)");
println!();
// Basic operations
println!("1οΈβ£ EROSION - Shrinking shapes");
println!("Erosion removes pixels from object boundaries.");
println!("It makes objects smaller and can remove noise.");
println!();
let eroded = binary_erosion(
&binary_image.view(),
Some(&cross_3x3.view()),
None,
None,
None,
None,
)?;
let original_pixels = count_white_pixels(&binary_image);
let eroded_pixels = count_white_pixels(&eroded);
println!(" Original: {} white pixels", original_pixels);
println!(" After erosion: {} white pixels", eroded_pixels);
println!(
" Effect: Removed {} pixels from boundaries",
original_pixels - eroded_pixels
);
println!();
println!("2οΈβ£ DILATION - Growing shapes");
println!("Dilation adds pixels to object boundaries.");
println!("It makes objects larger and can fill gaps.");
println!();
let dilated = binary_dilation(
&binary_image.view(),
Some(&cross_3x3.view()),
None,
None,
None,
None,
)?;
let dilated_pixels = count_white_pixels(&dilated);
println!(" After dilation: {} white pixels", dilated_pixels);
println!(
" Effect: Added {} pixels to boundaries",
dilated_pixels - original_pixels
);
println!();
// Compound operations
println!("3οΈβ£ OPENING - Erosion followed by dilation");
println!("Opening smooths object contours and removes small objects.");
println!("Good for noise removal while preserving large shapes.");
println!();
let opened = binary_opening(
&binary_image.view(),
Some(&cross_3x3.view()),
None,
None,
None,
)?;
let opened_pixels = count_white_pixels(&opened);
println!(" After opening: {} white pixels", opened_pixels);
println!(" Removes small objects and smooths boundaries");
println!();
println!("4οΈβ£ CLOSING - Dilation followed by erosion");
println!("Closing fills holes and connects nearby objects.");
println!("Good for filling gaps while preserving object size.");
println!();
let closed = binary_closing(
&binary_image.view(),
Some(&cross_3x3.view()),
None,
None,
None,
)?;
let closed_pixels = count_white_pixels(&closed);
println!(" After closing: {} white pixels", closed_pixels);
println!(" Fills holes and connects broken parts");
println!();
// Advanced operations
println!("5οΈβ£ ADVANCED MORPHOLOGICAL OPERATIONS");
println!();
// Morphological gradient
let morph_gradient = morphological_gradient(&binary_image.view(), None, None)?;
let gradient_pixels = count_white_pixels(&morph_gradient);
println!(" Morphological gradient: {} edge pixels", gradient_pixels);
println!(" Shows object boundaries (dilation - erosion)");
println!();
// Top-hat transform
let tophat = white_tophat(&binary_image.view(), None, None)?;
let tophat_pixels = count_white_pixels(&tophat);
println!(" White top-hat: {} pixels", tophat_pixels);
println!(" Highlights small bright objects");
println!();
// Distance transform
println!("6οΈβ£ DISTANCE TRANSFORM");
println!("Computes distance from each pixel to nearest boundary.");
println!("Useful for shape analysis and watershed segmentation.");
println!();
use ndarray::IxDyn;
let binary_dyn = binary_image.clone().into_dimensionality::<IxDyn>().unwrap();
let (distances_) = distance_transform_edt(&binary_dyn, None, true, false)
.expect("Distance transform failed")?;
let max_distance = distances.fold(0.0, |acc, &x| acc.max(x));
println!(" Maximum distance: {:.1} pixels", max_distance);
println!(" Creates 'skeleton' representation of shapes");
println!();
// Practical applications
println!("π§ MORPHOLOGICAL OPERATIONS SUMMARY");
println!("βββββββββββββββ¬ββββββββββββββββββ¬ββββββββββββββββββββββββββ");
println!("β Operation β Effect β Use Case β");
println!("βββββββββββββββΌββββββββββββββββββΌββββββββββββββββββββββββββ€");
println!("β Erosion β Shrinks objects β Remove noise β");
println!("β Dilation β Grows objects β Fill gaps β");
println!("β Opening β Smooth + remove β Clean up binary images β");
println!("β Closing β Fill + connect β Complete broken objects β");
println!("β Gradient β Find boundaries β Edge detection β");
println!("β Top-hat β Find features β Defect detection β");
println!("βββββββββββββββ΄ββββββββββββββββββ΄ββββββββββββββββββββββββββ");
Ok(())
}
#[allow(dead_code)]
fn tutorial_6_object_detection() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Learn to find, label, and measure objects in images.");
println!();
// Create test image with multiple objects
let testimage = create_multi_objectimage(100, 100);
println!("π― OBJECT DETECTION PIPELINE");
println!("1. Threshold image to create binary mask");
println!("2. Clean up binary image with morphology");
println!("3. Label connected components");
println!("4. Measure object properties");
println!("5. Filter objects by size/shape criteria");
println!();
// Step 1: Thresholding
println!("1οΈβ£ THRESHOLDING - Separate objects from background");
println!();
let binary_mask = threshold_binary(&testimage.view(), 0.5)?;
let object_pixels = count_white_pixels(&binary_mask);
println!(
" Binary threshold at 0.5: {} object pixels",
object_pixels
);
// Otsu thresholding for automatic threshold selection
let otsu_result = otsu_threshold(&testimage.view())?;
let otsu_pixels = count_white_pixels(&otsu_result);
println!(" Otsu automatic threshold: {} object pixels", otsu_pixels);
println!(" Otsu finds optimal threshold automatically");
println!();
// Step 2: Morphological cleanup
println!("2οΈβ£ MORPHOLOGICAL CLEANUP");
println!();
let structure = generate_binary_structure(2, 1)?;
let cleaned = binary_opening(
&binary_mask.view(),
Some(&structure.view()),
None,
None,
None,
)?;
let cleaned_pixels = count_white_pixels(&cleaned);
println!(" After morphological opening: {} pixels", cleaned_pixels);
println!(" Removed {} noise pixels", object_pixels - cleaned_pixels);
println!();
// Step 3: Connected component labeling
println!("3οΈβ£ CONNECTED COMPONENT LABELING");
println!("Assigns unique labels to each connected object.");
println!();
let (labeledimage, num_objects) = label(&cleaned.view(), None)?;
println!(" Found {} distinct objects", num_objects);
println!(" Each object has a unique integer label (1, 2, 3, ...)");
println!();
// Step 4: Measure object properties
println!("4οΈβ£ MEASURING OBJECT PROPERTIES");
println!();
let properties = region_properties(&labeledimage.view(), Some(&testimage.view()))?;
println!(" Computed properties for {} objects:", properties.len());
println!();
println!(" Object Summary:");
println!(" βββββββββββ¬βββββββββββ¬ββββββββββββββββββ¬ββββββββββββββββββ");
println!(" β Label β Area β Centroid β Bounding Box β");
println!(" βββββββββββΌβββββββββββΌββββββββββββββββββΌββββββββββββββββββ€");
for (i, prop) in properties.iter().take(5).enumerate() {
println!(
" β {:7} β {:8.0} β ({:5.1}, {:5.1}) β ({:2}, {:2}) to ({:2}, {:2}) β",
i + 1,
prop.area,
prop.centroid[0],
prop.centroid[1],
prop.bbox[0],
prop.bbox[1],
prop.bbox[2],
prop.bbox[3]
);
}
println!(" βββββββββββ΄βββββββββββ΄ββββββββββββββββββ΄ββββββββββββββββββ");
println!();
// Step 5: Object filtering and analysis
println!("5οΈβ£ OBJECT FILTERING AND ANALYSIS");
println!();
let large_objects = properties
.iter()
.filter(|prop| prop.area > 50.0)
.collect::<Vec<_>>();
println!(" Large objects (area > 50): {}", large_objects.len());
let round_objects = properties
.iter()
.filter(|prop| {
let extent =
prop.area / ((prop.bbox[2] - prop.bbox[0]) * (prop.bbox[3] - prop.bbox[1])) as f64;
extent > 0.6 // Roughly round objects
})
.collect::<Vec<_>>();
println!(" Round objects (extent > 0.6): {}", round_objects.len());
println!();
// Advanced measurements
println!("6οΈβ£ ADVANCED MEASUREMENTS");
println!();
// Moments for shape analysis
let moments_result = moments(&testimage.view(), Some(&labeledimage.view()), None)?;
println!(" Computed image moments for shape analysis");
// Center of mass
let com = center_of_mass(&testimage.view(), Some(&labeledimage.view()), None)?;
println!(" Center of mass: ({:.1}, {:.1})", com[0], com[1]);
// Extrema finding
let extrema_result = extrema(&testimage.view(), Some(&labeledimage.view()), None)?;
println!(" Found {} extrema points", extrema_result.len());
println!();
// Summary and best practices
println!("π‘ OBJECT DETECTION BEST PRACTICES:");
println!("β’ Use appropriate thresholding for your image type");
println!("β’ Clean binary images with morphology before labeling");
println!("β’ Measure multiple properties to characterize objects");
println!("β’ Filter objects based on size, shape, or intensity");
println!("β’ Use watershed segmentation for touching objects");
println!("β’ Consider multi-scale analysis for complex scenes");
Ok(())
}
#[allow(dead_code)]
fn tutorial_7_transformations() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Learn to rotate, scale, and warp images for registration and augmentation.");
println!();
let testimage = create_test_patternimage(64, 64);
println!("π IMAGE TRANSFORMATIONS");
println!("Transformations change the spatial arrangement of pixels.");
println!("Used for: registration, augmentation, correction, artistic effects.");
println!();
// Basic transformations
println!("1οΈβ£ BASIC TRANSFORMATIONS");
println!();
// Translation (shift)
println!("Translation (shifting):");
let shifted = shift(
&testimage.view(),
&[5.0, -3.0],
InterpolationOrder::Linear,
BoundaryMode::Reflect,
None,
)?;
println!(" Shifted by (5, -3) pixels");
println!(" Use case: Image alignment, correcting camera shake");
println!();
// Rotation
println!("Rotation:");
let rotated_30 = rotate(&testimage.view(), 30.0, None, None, None, None, None)?;
let rotated_90 = rotate(&testimage.view(), 90.0, None, None, None, None, None)?;
println!(" Rotated by 30Β° and 90Β°");
println!(" Use case: Correcting image orientation, data augmentation");
println!();
// Scaling (zoom)
println!("Scaling (zooming):");
let zoomed_in = zoom(
&testimage,
&[2.0, 2.0],
InterpolationOrder::Linear,
BoundaryMode::Reflect,
None,
)?;
let zoomed_out = zoom(
&testimage,
&[0.5, 0.5],
InterpolationOrder::Linear,
BoundaryMode::Reflect,
None,
)?;
println!(" Zoomed in 2x and out 0.5x");
println!(
" Original: {:?}, Zoomed in: {:?}, Zoomed out: {:?}",
testimage.dim(),
zoomed_in.dim(),
zoomed_out.dim()
);
println!(" Use case: Multi-resolution analysis, image pyramids");
println!();
// Advanced transformations
println!("2οΈβ£ ADVANCED TRANSFORMATIONS");
println!();
// Affine transformation
println!("Affine transformation (combined operations):");
let affine_matrix = Array2::from_shape_vec(
(2, 3),
vec![
1.4, 0.3, 5.0, // Scale + shear + translation in X
-0.2, 1.2, -2.0, // Shear + scale + translation in Y
],
)?;
let affine_result = affine_transform(
&testimage.view(),
&affine_matrix.view(),
InterpolationOrder::Linear,
BoundaryMode::Constant,
Some(0.0),
)?;
println!(" Applied scaling, shearing, and translation in one step");
println!(" Matrix form allows precise control of transformation");
println!(" Use case: Correcting perspective, image registration");
println!();
// Interpolation methods comparison
println!("3οΈβ£ INTERPOLATION METHODS");
println!("When transforming images, we need to interpolate new pixel values.");
println!();
let angle = 15.0;
let nearest = rotate(
&testimage.view(),
angle,
None,
Some(InterpolationOrder::Nearest),
None,
None,
None,
)?;
let linear = rotate(
&testimage.view(),
angle,
None,
Some(InterpolationOrder::Linear),
None,
None,
None,
)?;
let cubic = rotate(
&testimage.view(),
angle,
None,
Some(InterpolationOrder::Cubic),
None,
None,
None,
)?;
println!(" Interpolation Quality Comparison (15Β° rotation):");
println!(" β’ Nearest neighbor: Fast, blocky artifacts");
println!(" β’ Linear: Good balance of speed and quality");
println!(" β’ Cubic: Smooth, but slower computation");
println!();
// Boundary handling
println!("4οΈβ£ BOUNDARY HANDLING");
println!("What happens at image edges during transformation?");
println!();
let boundary_test_angle = 45.0;
let constant = rotate(
&testimage.view(),
boundary_test_angle,
None,
None,
Some(BoundaryMode::Constant),
Some(0.0),
None,
)?;
let reflect = rotate(
&testimage.view(),
boundary_test_angle,
None,
None,
Some(BoundaryMode::Reflect),
None,
None,
)?;
let wrap = rotate(
&testimage.view(),
boundary_test_angle,
None,
None,
Some(BoundaryMode::Wrap),
None,
None,
)?;
println!(" Boundary Mode Effects (45Β° rotation):");
println!(" β’ Constant: Fill with specified value (often 0)");
println!(" β’ Reflect: Mirror image at boundaries");
println!(" β’ Wrap: Treat image as periodic/tiled");
println!(" β’ Nearest: Extend edge pixels");
println!();
// Coordinate mapping
println!("5οΈβ£ COORDINATE MAPPING");
println!("For complex transformations, specify exact coordinate mappings.");
println!();
// Create a custom coordinate mapping (barrel distortion correction)
let (height, width) = testimage.dim();
let center_y = height as f64 / 2.0;
let center_x = width as f64 / 2.0;
let coords = Array2::fromshape_fn((2, height * width), |(axis, idx)| {
let i = (idx / width) as f64;
let j = (idx % width) as f64;
// Simple barrel distortion correction
let dy = i - center_y;
let dx = j - center_x;
let r = (dx * dx + dy * dy).sqrt();
let distortion_factor = 1.0 + 0.0001 * r * r;
if axis == 0 {
center_y + dy / distortion_factor
} else {
center_x + dx / distortion_factor
}
});
let corrected = map_coordinates(
&testimage.view(),
&coords.view(),
InterpolationOrder::Linear,
BoundaryMode::Reflect,
None,
)?;
let corrected_2d = corrected.intoshape((height, width))?;
println!(" Applied barrel distortion correction using coordinate mapping");
println!(" Use case: Lens distortion correction, complex warping");
println!();
// Performance and best practices
println!("β‘ PERFORMANCE TIPS");
println!("β’ Use appropriate interpolation: nearest for labels, linear/cubic for images");
println!("β’ Consider image size: large transformations can be memory intensive");
println!("β’ For repeated transformations, pre-compute coordinate maps");
println!("β’ Use affine transforms when possible (faster than general mapping)");
println!("β’ Choose boundary modes based on your application needs");
println!();
println!("π§ COMMON USE CASES");
println!("β’ Medical imaging: Register scans from different time points");
println!("β’ Computer vision: Data augmentation for training");
println!("β’ Photography: Correct perspective distortion");
println!("β’ Remote sensing: Geometric correction of satellite images");
println!("β’ Manufacturing: Align parts for inspection");
Ok(())
}
#[allow(dead_code)]
fn tutorial_8_advanced_techniques() -> NdimageResult<()> {
println!("-------------------------------------------");
println!("Explore advanced image processing techniques and domain-specific applications.");
println!();
let testimage = create_complex_testimage(128, 128);
println!("π ADVANCED IMAGE PROCESSING TECHNIQUES");
println!();
// Watershed segmentation
println!("1οΈβ£ WATERSHED SEGMENTATION");
println!("Watershed treats the image as a topographic surface.");
println!("Water 'floods' from seed points, creating segmentation boundaries.");
println!();
// Create markers for watershed
let binary_image = threshold_binary(&testimage.view(), 0.6)?;
let distance_map = {
use ndarray::IxDyn;
let binary_dyn = binary_image.clone().into_dimensionality::<IxDyn>().unwrap();
let (distances_) = distance_transform_edt(&binary_dyn, None, true, false)
.expect("Distance transform failed")?;
distances.into_dimensionality::<ndarray::Ix2>().unwrap()
};
// Find local maxima as seeds
let markers = create_watershed_markers(&distance_map, 3.0);
let watershed_result = watershed(&testimage.view(), &markers.view(), None, None)?;
let num_regions = watershed_result.fold(0u32, |acc, &x| acc.max(x));
println!(" Watershed segmentation created {} regions", num_regions);
println!(" Use case: Separate touching objects, cell counting");
println!();
// Multi-scale analysis
println!("2οΈβ£ MULTI-SCALE ANALYSIS");
println!("Analyze images at different scales to capture various features.");
println!();
let scales = vec![1.0, 2.0, 4.0];
println!(" Analyzing at scales: {:?}", scales);
for (i, &sigma) in scales.iter().enumerate() {
let filtered = gaussian_filter(&testimage, &[sigma, sigma], None, None, None)?;
let edges = sobel(&filtered.view(), None, None, None)?;
let edge_count = count_edge_pixels(&edges, 0.1);
println!(
" Scale {} (Ο={}): {} edge pixels",
i + 1,
sigma,
edge_count
);
}
println!(" Different scales reveal different levels of detail");
println!();
// Texture analysis
println!("3οΈβ£ TEXTURE ANALYSIS");
println!("Characterize surface texture using statistical measures.");
println!();
// Local variance as texture measure
let texture_filter = |window: &ArrayView2<f64>| -> f64 {
let mean = window.sum() / window.len() as f64;
let variance = window.fold(0.0, |acc, &x| acc + (x - mean).powi(2)) / window.len() as f64;
variance
};
let texture_map = generic_filter(
&testimage.view(),
Some(&Array2::ones((5, 5))),
texture_filter,
None,
None,
None,
None,
)?;
let texture_stats = computeimage_stats(&texture_map);
println!(
" Local variance texture: mean={:.4}, max={:.4}",
texture_stats.2, texture_stats.1
);
println!(" High values indicate textured regions");
println!();
// Feature extraction
println!("4οΈβ£ FEATURE EXTRACTION");
println!("Extract meaningful descriptors for image analysis.");
println!();
// Harris corner detection
let corners = harris_corners(testimage.view(), 0.04, 3, 0.01, None)?;
let corner_count = corners.iter().filter(|&&x| x > 0.0).count();
println!(" Harris corners detected: {}", corner_count);
// Edge density in regions
let edges = canny(testimage.view(), 1.0, 0.1, 0.3, None)?;
let edge_density = edges.sum() / edges.len() as f64;
println!(" Edge density: {:.3}", edge_density);
println!();
// Quality assessment
println!("5οΈβ£ IMAGE QUALITY ASSESSMENT");
println!("Measure image quality without reference images.");
println!();
let sharpness = estimate_sharpness(&testimage);
let contrast = estimate_contrast(&testimage);
let noise_level = estimate_noise_level(&testimage);
println!(" Image quality metrics:");
println!(" β’ Sharpness: {:.3} (higher = sharper)", sharpness);
println!(" β’ Contrast: {:.3} (higher = more contrast)", contrast);
println!(" β’ Noise level: {:.3} (lower = less noise)", noise_level);
println!();
// Integration with other processing
println!("6οΈβ£ INTEGRATION EXAMPLES");
println!("Combining multiple techniques for robust analysis.");
println!();
println!(" Image enhancement pipeline:");
println!(" 1. Noise reduction β 2. Contrast enhancement β 3. Edge detection");
let denoised = gaussian_filter(&testimage, &[0.8, 0.8], None, None, None)?;
let enhanced = enhance_contrast(&denoised, 1.2)?;
let final_edges = canny(enhanced.view(), 1.0, 0.15, 0.3, None)?;
let final_edge_count = count_edge_pixels(&final_edges, 0.5);
println!(
" Pipeline result: {} high-quality edges detected",
final_edge_count
);
println!();
// Domain-specific applications preview
println!("π₯ DOMAIN-SPECIFIC APPLICATIONS");
println!();
println!(" Medical Imaging:");
println!(" β’ Vessel enhancement for angiography");
println!(" β’ Tumor detection and segmentation");
println!(" β’ Bone structure analysis");
println!();
println!(" Remote Sensing:");
println!(" β’ Vegetation index calculation (NDVI)");
println!(" β’ Water body detection");
println!(" β’ Cloud detection and removal");
println!();
println!(" Industrial Inspection:");
println!(" β’ Defect detection on surfaces");
println!(" β’ Dimensional measurement");
println!(" β’ Quality control automation");
println!();
println!("π‘ ADVANCED PROCESSING TIPS:");
println!("β’ Combine multiple techniques for robust results");
println!("β’ Use domain knowledge to tune parameters");
println!("β’ Validate results with ground truth when possible");
println!("β’ Consider computational cost vs. accuracy trade-offs");
println!("β’ Use appropriate color spaces for your application");
Ok(())
}
// Helper functions for creating test images and computing statistics
#[allow(dead_code)]
fn create_circleimage(height: usize, width: usize, radius: f64) -> Array2<f64> {
let center_y = _height as f64 / 2.0;
let center_x = width as f64 / 2.0;
Array2::fromshape_fn((_height, width), |(i, j)| {
let dy = i as f64 - center_y;
let dx = j as f64 - center_x;
let distance = (dx * dx + dy * dy).sqrt();
if distance <= radius {
1.0
} else {
0.0
}
})
}
#[allow(dead_code)]
fn create_squareimage(height: usize, width: usize, size: usize) -> Array2<f64> {
let start_y = (_height - size) / 2;
let start_x = (width - size) / 2;
Array2::fromshape_fn((_height, width), |(i, j)| {
if i >= start_y && i < start_y + size && j >= start_x && j < start_x + size {
1.0
} else {
0.0
}
})
}
#[allow(dead_code)]
fn computeimage_stats(image: &Array2<f64>) -> (f64, f64, f64) {
let min = image.fold(f64::INFINITY, |acc, &x| acc.min(x));
let max = image.fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
let mean = image.sum() / image.len() as f64;
(min, max, mean)
}
#[allow(dead_code)]
fn add_realistic_noise(image: &Array2<f64>, noiselevel: f64) -> Array2<f64> {
// Simple deterministic noise for reproducible examples
image
+ &Array2::fromshape_fn(image.dim(), |(i, j)| {
let hash = ((i * 7 + j * 11) % 17) as f64 / 17.0;
noise_level * (hash - 0.5) * 2.0
})
}
#[allow(dead_code)]
fn create_edge_testimage(height: usize, width: usize) -> Array2<f64> {
Array2::fromshape_fn((_height, width), |(i, j)| {
// Create various edge patterns
if i > _height / 2 && i < _height / 2 + 3 {
1.0 // Horizontal line
} else if j > width / 2 && j < width / 2 + 3 {
1.0 // Vertical line
} else if ((i as f64 - _height as f64 / 4.0).powi(2)
+ (j as f64 - width as f64 / 4.0).powi(2))
.sqrt()
< 10.0
{
0.8 // Circle
} else {
0.1 // Background
}
})
}
#[allow(dead_code)]
fn count_edge_pixels(image: &Array2<f64>, threshold: f64) -> usize {
image.iter().filter(|&&x| x > threshold).count()
}
#[allow(dead_code)]
fn roberts_cross_gradient(image: &ArrayView2<f64>) -> NdimageResult<Array2<f64>> {
// Simple Roberts cross gradient implementation
let (height, width) = image.dim();
let mut result = Array2::zeros((height, width));
for i in 0..height - 1 {
for j in 0..width - 1 {
let gx = image[[i, j]] - image[[i + 1, j + 1]];
let gy = image[[i, j + 1]] - image[[i + 1, j]];
result[[i, j]] = (gx * gx + gy * gy).sqrt();
}
}
Ok(result)
}
#[allow(dead_code)]
fn create_binaryshapesimage(height: usize, width: usize) -> Array2<u8> {
Array2::fromshape_fn((_height, width), |(i, j)| {
// Create several shapes
let center1_y = _height / 4;
let center1_x = width / 4;
let center2_y = 3 * _height / 4;
let center2_x = 3 * width / 4;
let dist1 =
((i as f64 - center1_y as f64).powi(2) + (j as f64 - center1_x as f64).powi(2)).sqrt();
let dist2 =
((i as f64 - center2_y as f64).powi(2) + (j as f64 - center2_x as f64).powi(2)).sqrt();
if dist1 < 12.0
|| dist2 < 8.0
|| (i > _height / 2 - 5 && i < _height / 2 + 5 && j > width / 3 && j < 2 * width / 3)
{
1u8
} else {
0u8
}
})
}
#[allow(dead_code)]
fn count_white_pixels<T>(image: &Array2<T>) -> usize
where
T: PartialOrd + From<u8>,
{
image.iter().filter(|&x| *x > T::from(0u8)).count()
}
#[allow(dead_code)]
fn create_multi_objectimage(height: usize, width: usize) -> Array2<f64> {
Array2::from_shape_fn((height, width), |(i, j)| {
// Create multiple objects of different sizes and intensities
let objects = [
((20, 20), 8.0, 0.8), // (center, radius, intensity)
((30, 60), 6.0, 0.9),
((60, 30), 10.0, 0.7),
((70, 70), 5.0, 0.85),
((80, 20), 4.0, 0.6),
];
for &((cy, cx), radius, intensity) in &objects {
let dist = ((i as f64 - cy as f64).powi(2) + (j as f64 - cx as f64).powi(2)).sqrt();
if dist <= radius {
return intensity;
}
}
0.1 // Background
})
}
#[allow(dead_code)]
fn create_test_patternimage(height: usize, width: usize) -> Array2<f64> {
Array2::from_shape_fn((height, width), |(i, j)| {
let x = i as f64 / height as f64;
let y = j as f64 / width as f64;
// Create a test pattern with various frequencies
let pattern1 = (x * std::f64::consts::PI * 4.0).sin();
let pattern2 = (y * std::f64::consts::PI * 6.0).cos();
let radial = ((x - 0.5).powi(2) + (y - 0.5).powi(2)).sqrt();
0.5 + 0.3 * pattern1 * pattern2 + 0.2 * (1.0 - radial * 2.0).max(0.0)
})
}
#[allow(dead_code)]
fn create_complex_testimage(height: usize, width: usize) -> Array2<f64> {
Array2::from_shape_fn((height, width), |(i, j)| {
let x = i as f64 / height as f64;
let y = j as f64 / width as f64;
// Complex pattern with multiple features
let waves = (x * std::f64::consts::PI * 8.0).sin() * (y * std::f64::consts::PI * 6.0).cos();
let spots = if ((x - 0.3).powi(2) + (y - 0.3).powi(2)).sqrt() < 0.1
|| ((x - 0.7).powi(2) + (y - 0.7).powi(2)).sqrt() < 0.15
{
0.8
} else {
0.0
};
let texture = (x * 50.0).sin() * (y * 40.0).cos() * 0.1;
(0.4 + 0.2 * waves + spots + texture).clamp(0.0, 1.0)
})
}
#[allow(dead_code)]
fn create_watershed_markers(_distancemap: &Array2<f64>, threshold: f64) -> Array2<u32> {
let mut markers = Array2::zeros(_distance_map.dim());
let mut label = 1u32;
// Simple peak finding for watershed markers
let (height, width) = distance_map.dim();
for i in 1..height - 1 {
for j in 1..width - 1 {
if distance_map[[i, j]] > threshold
&& distance_map[[i, j]] > distance_map[[i - 1, j]]
&& distance_map[[i, j]] > distance_map[[i + 1, j]]
&& distance_map[[i, j]] > distance_map[[i, j - 1]]
&& distance_map[[i, j]] > distance_map[[i, j + 1]]
{
markers[[i, j]] = label;
label += 1;
}
}
}
markers
}
#[allow(dead_code)]
fn estimate_sharpness(image: &Array2<f64>) -> f64 {
// Simple sharpness estimate using Laplacian variance
let laplacian_kernel = Array2::from_shape_vec(
(3, 3),
vec![0.0, -1.0, 0.0, -1.0, 4.0, -1.0, 0.0, -1.0, 0.0],
)
.unwrap();
// Simple convolution for demonstration
let mut sum_squares = 0.0;
let mut count = 0;
let (height, width) = image.dim();
for i in 1..height - 1 {
for j in 1..width - 1 {
let mut laplacian = 0.0;
for ki in 0..3 {
for kj in 0..3 {
laplacian += image[[i + ki - 1, j + kj - 1]] * laplacian_kernel[[ki, kj]];
}
}
sum_squares += laplacian * laplacian;
count += 1;
}
}
sum_squares / count as f64
}
#[allow(dead_code)]
fn estimate_contrast(image: &Array2<f64>) -> f64 {
let stats = computeimage_stats(image);
stats.1 - stats.0 // max - min
}
#[allow(dead_code)]
fn estimate_noise_level(image: &Array2<f64>) -> f64 {
// Estimate noise using high-frequency content
let (height, width) = image.dim();
let mut noise_sum = 0.0;
let mut count = 0;
for i in 1..height - 1 {
for j in 1..width - 1 {
// Simple gradient magnitude as noise indicator
let gx = image[[i, j + 1]] - image[[i, j - 1]];
let gy = image[[i + 1, j]] - image[[i - 1, j]];
noise_sum += (gx * gx + gy * gy).sqrt();
count += 1;
}
}
noise_sum / count as f64
}
#[allow(dead_code)]
fn enhance_contrast(image: &Array2<f64>, factor: f64) -> NdimageResult<Array2<f64>> {
let stats = computeimage_stats(image);
let mean = stats.2;
Ok(image.mapv(|x| ((x - mean) * factor + mean).clamp(0.0, 1.0)))
}