Skip to main content

scirs2_ndimage/
comprehensive_examples.rs

1//! Comprehensive Examples and Documentation for scirs2-ndimage
2//!
3//! This module provides extensive examples, tutorials, and documentation
4//! for all major functionality in scirs2-ndimage. It serves as both
5//! educational material and validation of the API usability.
6
7use std::collections::HashMap;
8
9type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
10
11/// Comprehensive tutorial and example collection
12pub struct ExampleTutorial {
13    /// Tutorial steps with descriptions
14    steps: Vec<TutorialStep>,
15    /// Generated outputs for validation
16    outputs: HashMap<String, String>,
17}
18
19/// Individual tutorial step
20#[derive(Debug, Clone)]
21pub struct TutorialStep {
22    /// Step title
23    pub title: String,
24    /// Description of what this step demonstrates
25    pub description: String,
26    /// Code example as string
27    pub code_example: String,
28    /// Expected output description
29    pub expected_output: String,
30    /// Key concepts covered
31    pub concepts: Vec<String>,
32    /// Related functions
33    pub related_functions: Vec<String>,
34}
35
36impl ExampleTutorial {
37    /// Create a new comprehensive tutorial
38    pub fn new() -> Self {
39        Self {
40            steps: Vec::new(),
41            outputs: HashMap::new(),
42        }
43    }
44
45    /// Add tutorial step
46    pub fn add_step(&mut self, step: TutorialStep) {
47        self.steps.push(step);
48    }
49
50    /// Generate all tutorial steps
51    pub fn generate_complete_tutorial(&mut self) -> Result<()> {
52        self.add_filter_examples()?;
53        self.add_morphology_examples()?;
54        self.add_interpolation_examples()?;
55        self.add_measurement_examples()?;
56        self.add_segmentation_examples()?;
57        self.add_feature_detection_examples()?;
58        self.add_advanced_workflow_examples()?;
59        Ok(())
60    }
61
62    /// Add comprehensive filter examples
63    fn add_filter_examples(&mut self) -> Result<()> {
64        // Gaussian Filter Tutorial
65        self.add_step(TutorialStep {
66            title: "Gaussian Smoothing for Noise Reduction".to_string(),
67            description:
68                "Learn how to apply Gaussian filters to reduce noise while preserving edges"
69                    .to_string(),
70            code_example: r#"
71use scirs2_ndimage::filters::{gaussian_filter, BorderMode};
72use scirs2_core::ndarray::Array2;
73
74// Create a noisy image (in practice, you'd load from file)
75let mut noisyimage = Array2::zeros((100, 100));
76// Add some signal
77for i in 40..60 {
78    for j in 40..60 {
79        noisyimage[[i, j]] = 1.0;
80    }
81}
82// Add noise (in practice, this would come from your data)
83for i in 0..100 {
84    for j in 0..100 {
85        noisyimage[[i, j]] += 0.1 * ((i + j) as f64).sin();
86    }
87}
88
89// Apply Gaussian filter with different sigma values
90let smooth_light = gaussian_filter(&noisyimage, 1.0, None, None)?;
91let smooth_medium = gaussian_filter(&noisyimage, 2.0, None, None)?;
92let smooth_heavy = gaussian_filter(&noisyimage, 4.0, None, None)?;
93
94// Use different border modes
95let reflected = gaussian_filter(&noisyimage, 2.0, Some(BorderMode::Reflect), None)?;
96let wrapped = gaussian_filter(&noisyimage, 2.0, Some(BorderMode::Wrap), None)?;
97let constant = gaussian_filter(&noisyimage, 2.0, Some(BorderMode::Constant), Some(0.0))?;
98
99println!("Applied Gaussian filters with sigma: 1.0, 2.0, 4.0");
100println!("Border modes: Reflect, Wrap, Constant");
101"#
102            .to_string(),
103            expected_output: "Progressively smoother images with different boundary handling"
104                .to_string(),
105            concepts: vec![
106                "Gaussian smoothing".to_string(),
107                "Noise reduction".to_string(),
108                "Border mode handling".to_string(),
109                "Parameter selection".to_string(),
110            ],
111            related_functions: vec![
112                "uniform_filter".to_string(),
113                "median_filter".to_string(),
114                "bilateral_filter".to_string(),
115            ],
116        });
117
118        // Median Filter Tutorial
119        self.add_step(TutorialStep {
120            title: "Median Filtering for Impulse Noise Removal".to_string(),
121            description:
122                "Use median filters to remove salt-and-pepper noise while preserving edges"
123                    .to_string(),
124            code_example: r#"
125use scirs2_ndimage::filters::{median_filter, BorderMode};
126use scirs2_core::ndarray::Array2;
127
128// Create image with impulse noise
129let mut image_with_impulses = Array2::from_shape_fn((50, 50), |(i, j)| {
130    // Create some structure
131    if (i as i32 - 25).pow(2) + (j as i32 - 25).pow(2) < 100 {
132        1.0
133    } else {
134        0.0
135    }
136});
137
138// Add impulse noise (salt and pepper)
139image_with_impulses[[10, 10]] = 1.0; // salt
140image_with_impulses[[35, 35]] = 0.0; // pepper
141image_with_impulses[[20, 30]] = 1.0; // salt
142image_with_impulses[[40, 15]] = 0.0; // pepper
143
144// Apply median filter with different kernel sizes
145let cleaned_3x3 = median_filter(&image_with_impulses, &[3, 3], None)?;
146let cleaned_5x5 = median_filter(&image_with_impulses, &[5, 5], None)?;
147let cleaned_7x7 = median_filter(&image_with_impulses, &[7, 7], None)?;
148
149// Compare with Gaussian filter (less effective for impulse noise)
150let gaussian_cleaned = gaussian_filter(&image_with_impulses, 1.0, None, None)?;
151
152println!("Median filter effectively removes impulse noise");
153println!("Larger kernels remove more noise but may blur edges");
154"#
155            .to_string(),
156            expected_output:
157                "Clean images with impulse noise removed, edge preservation comparison".to_string(),
158            concepts: vec![
159                "Impulse noise removal".to_string(),
160                "Edge preservation".to_string(),
161                "Kernel size effects".to_string(),
162                "Filter comparison".to_string(),
163            ],
164            related_functions: vec![
165                "rank_filter".to_string(),
166                "percentile_filter".to_string(),
167                "minimum_filter".to_string(),
168                "maximum_filter".to_string(),
169            ],
170        });
171
172        // Edge Detection Tutorial
173        self.add_step(TutorialStep {
174            title: "Edge Detection with Sobel and Laplacian Filters".to_string(),
175            description: "Detect edges using gradient-based and Laplacian operators".to_string(),
176            code_example: r#"
177use scirs2_ndimage::filters::{sobel, laplace, gaussian_filter};
178use scirs2_core::ndarray::Array2;
179
180// Create test image with clear edges
181let image = Array2::from_shape_fn((60, 60), |(i, j)| {
182    if i > 30 && j > 30 {
183        1.0
184    } else if i < 20 || j < 20 {
185        0.5
186    } else {
187        0.0
188    }
189});
190
191// Pre-smooth to reduce noise
192let smoothed = gaussian_filter(&image, 1.0, None, None)?;
193
194// Detect edges with Sobel filter
195let edges_x = sobel(&smoothed, Some(0), None, None)?; // Vertical edges
196let edges_y = sobel(&smoothed, Some(1), None, None)?; // Horizontal edges
197let edges_magnitude = sobel(&smoothed, None, None, None)?; // Gradient magnitude
198
199// Detect edges with Laplacian (second derivative)
200let laplacian_edges = laplace(&smoothed, None)?;
201
202// Combine for comprehensive edge detection
203let combined_edges = Array2::from_shape_fn(image.dim(), |(i, j)| {
204    let sobel_val = edges_magnitude[[i, j]];
205    let laplacian_val = laplacian_edges[[i, j]].abs();
206    (sobel_val + 0.5 * laplacian_val).min(1.0)
207});
208
209println!("Detected edges using Sobel (gradient) and Laplacian (second derivative)");
210println!("Combined approach provides comprehensive edge information");
211"#
212            .to_string(),
213            expected_output: "Edge maps showing different types of edge information".to_string(),
214            concepts: vec![
215                "Gradient-based edge detection".to_string(),
216                "Laplacian edge detection".to_string(),
217                "Edge orientation".to_string(),
218                "Multi-scale edge detection".to_string(),
219            ],
220            related_functions: vec![
221                "canny".to_string(),
222                "prewitt".to_string(),
223                "scharr".to_string(),
224                "roberts".to_string(),
225            ],
226        });
227
228        Ok(())
229    }
230
231    /// Add morphological operation examples
232    fn add_morphology_examples(&mut self) -> Result<()> {
233        self.add_step(TutorialStep {
234            title: "Binary Morphology for Shape Analysis".to_string(),
235            description: "Use erosion, dilation, opening, and closing for shape processing".to_string(),
236            code_example: r#"
237use scirs2_ndimage::morphology::{
238    binary_erosion, binary_dilation, binary_opening, binary_closing,
239    generate_binary_structure, disk_structure
240};
241use scirs2_core::ndarray::Array2;
242
243// Create binary image with various shapes
244let mut binary_image = Array2::from_elem((80, 80), false);
245
246// Add some shapes
247for i in 20..30 {
248    for j in 20..35 {
249        binary_image[[i, j]] = true; // Rectangle
250    }
251}
252
253for i in 50..70 {
254    for j in 50..70 {
255        if (i as i32 - 60).pow(2) + (j as i32 - 60).pow(2) < 80 {
256            binary_image[[i, j]] = true; // Circle
257        }
258    }
259}
260
261// Add some noise (small isolated pixels)
262binary_image[[10, 10]] = true;
263binary_image[[70, 20]] = true;
264
265// Define structuring elements
266let cross_3x3 = generate_binary_structure(2, 1)?; // 4-connected
267let square_3x3 = generate_binary_structure(2, 2)?; // 8-connected
268let disk_5 = disk_structure(5)?; // Circular structuring element
269
270// Basic morphological operations
271let eroded = binary_erosion(&binary_image, Some(&cross_3x3), None, None, None, None, None)?;
272let dilated = binary_dilation(&binary_image, Some(&cross_3x3), None, None, None, None, None)?;
273
274// Compound operations
275let opened = binary_opening(&binary_image, Some(&cross_3x3), None, None, None, None)?;  // Remove noise
276let closed = binary_closing(&binary_image, Some(&cross_3x3), None, None, None, None)?;  // Fill holes
277
278// Different structuring elements produce different results
279let opened_disk = binary_opening(&binary_image, Some(&disk_5), None, None, None, None)?;
280
281println!("Applied morphological operations:");
282println!("- Erosion: shrinks objects");
283println!("- Dilation: expands objects");
284println!("- Opening: removes noise, separates connected objects");
285println!("- Closing: fills holes, connects nearby objects");
286"#.to_string(),
287            expected_output: "Processed binary images showing shape modifications".to_string(),
288            concepts: vec![
289                "Binary morphology".to_string(),
290                "Structuring elements".to_string(),
291                "Noise removal".to_string(),
292                "Shape modification".to_string(),
293            ],
294            related_functions: vec![
295                "grey_erosion".to_string(),
296                "grey_dilation".to_string(),
297                "white_tophat".to_string(),
298                "black_tophat".to_string(),
299            ],
300        });
301
302        self.add_step(TutorialStep {
303            title: "Grayscale Morphology for Contrast Enhancement".to_string(),
304            description: "Apply morphological operations to grayscale images for various effects"
305                .to_string(),
306            code_example: r#"
307use scirs2_ndimage::morphology::{
308    grey_erosion, grey_dilation, grey_opening, grey_closing,
309    white_tophat, black_tophat, morphological_gradient
310};
311use scirs2_core::ndarray::Array2;
312
313// Create grayscale test image
314let image = Array2::from_shape_fn((60, 60), |(i, j)| {
315    let center_i = 30.0;
316    let center_j = 30.0;
317    let distance = ((i as f64 - center_i).powi(2) + (j as f64 - center_j).powi(2)).sqrt();
318    
319    if distance < 15.0 {
320        0.8 - distance / 20.0  // Bright center, fading to edges
321    } else {
322        0.1 + 0.1 * ((i + j) as f64 * 0.2).sin()  // Textured background
323    }
324});
325
326// Basic grayscale morphology
327let eroded = grey_erosion(&image, None, None, None, None, None)?;
328let dilated = grey_dilation(&image, None, None, None, None, None)?;
329
330// Compound operations
331let opened = grey_opening(&image, None, None, None, None, None)?;   // Remove bright noise
332let closed = grey_closing(&image, None, None, None, None, None)?;   // Remove dark noise
333
334// Top-hat transformations for feature enhancement
335let white_hat = white_tophat(&image, None, None, None, None, None)?; // Bright features
336let black_hat = black_tophat(&image, None, None, None, None, None)?; // Dark features
337
338// Morphological gradient for edge detection
339let gradient = morphological_gradient(&image, None, None, None, None, None)?;
340
341// Enhance contrast by combining operations
342let enhanced = Array2::from_shape_fn(image.dim(), |(i, j)| {
343    let original = image[[i, j]];
344    let white_feature = "white_hat"[[i, j]];
345    let black_feature = "black_hat"[[i, j]];
346    (original + white_feature - black_feature).max(0.0).min(1.0)
347});
348
349println!("Applied grayscale morphological operations for:");
350println!("- Noise removal (opening/closing)");
351println!("- Feature enhancement (top-hat transforms)");  
352println!("- Edge detection (morphological gradient)");
353println!("- Contrast enhancement (combination of operations)");
354"#
355            .to_string(),
356            expected_output: "Enhanced grayscale images with improved contrast and features"
357                .to_string(),
358            concepts: vec![
359                "Grayscale morphology".to_string(),
360                "Feature enhancement".to_string(),
361                "Contrast adjustment".to_string(),
362                "Top-hat transforms".to_string(),
363            ],
364            related_functions: vec![
365                "morphological_laplace".to_string(),
366                "distance_transform_edt".to_string(),
367                "label".to_string(),
368            ],
369        });
370
371        Ok(())
372    }
373
374    /// Add interpolation and transformation examples
375    fn add_interpolation_examples(&mut self) -> Result<()> {
376        self.add_step(TutorialStep {
377            title: "Image Interpolation and Geometric Transformations".to_string(),
378            description: "Resize, rotate, and transform images with various interpolation methods".to_string(),
379            code_example: r#"
380use scirs2_ndimage::interpolation::{
381    zoom, rotate, shift, affine_transform, map_coordinates,
382    InterpolationOrder, BoundaryMode
383};
384use scirs2_core::ndarray::{Array2, Array1};
385
386// Create test image with clear features
387let image = Array2::from_shape_fn((40, 40), |(i, j)| {
388    let x = i as f64 - 20.0;
389    let y = j as f64 - 20.0;
390    
391    // Create checkerboard pattern
392    if ((i / 5) + (j / 5)) % 2 == 0 {
393        1.0
394    } else {
395        0.0
396    }
397});
398
399// Zooming with different interpolation orders
400let zoomed_nearest = zoom(&image, &[2.0, 2.0], Some(InterpolationOrder::Nearest), None, None, None)?;
401let zoomed_linear = zoom(&image, &[2.0, 2.0], Some(InterpolationOrder::Linear), None, None, None)?;
402let zoomed_cubic = zoom(&image, &[2.0, 2.0], Some(InterpolationOrder::Cubic), None, None, None)?;
403
404// Rotation with different angles
405let rotated_45 = rotate(&image, 45.0, None, None, None, None, None, None)?;
406let rotated_30 = rotate(&image, 30.0, Some(InterpolationOrder::Linear), None, None, None, None, None)?;
407
408// Translation (shifting)
409let shifted = shift(&image, &[5.0, -3.0], None, None, None, None)?;
410
411// Affine transformation (scaling + rotation + translation)
412let transformation_matrix = Array2::from_shape_vec((2, 2), vec![
413    1.5 * 45.0_f64.to_radians().cos(), -1.5 * 45.0_f64.to_radians().sin(),
414    1.5 * 45.0_f64.to_radians().sin(), 1.5 * 45.0_f64.to_radians().cos()
415])?;
416let offset = Array1::from_vec(vec![5.0, -2.0]);
417
418let transformed = affine_transform(
419    &image, 
420    &transformation_matrix, 
421    Some(&offset),
422    None, 
423    Some(InterpolationOrder::Linear),
424    None, 
425    None, 
426    None
427)?;
428
429// Custom coordinate mapping
430let coords = Array2::from_shape_fn((2, 40 * 40), |(axis, idx)| {
431    let i = idx / 40;
432    let j = idx % 40;
433    match axis {
434        0 => i as f64 + 2.0 * (j as f64 * 0.1).sin(), // Wavy distortion
435        1 => j as f64 + 1.0 * (i as f64 * 0.1).cos(, _ => 0.0
436    }
437});
438
439let warped = map_coordinates(&image, &coords, Some(InterpolationOrder::Linear), None, None, None)?;
440
441println!("Applied various geometric transformations:");
442println!("- Zooming with different interpolation orders");
443println!("- Rotation and translation");
444println!("- Affine transformations (combined scaling, rotation, translation)");
445println!("- Custom coordinate mapping for complex distortions");
446"#.to_string(),
447            expected_output: "Transformed images showing different interpolation and geometric effects".to_string(),
448            concepts: vec![
449                "Image interpolation".to_string(),
450                "Geometric transformations".to_string(),
451                "Interpolation order effects".to_string(),
452                "Coordinate mapping".to_string(),
453            ],
454            related_functions: vec![
455                "geometric_transform".to_string(),
456                "spline_filter".to_string(),
457                "value_at_coordinates".to_string(),
458            ],
459        });
460
461        Ok(())
462    }
463
464    /// Add measurement and analysis examples
465    fn add_measurement_examples(&mut self) -> Result<()> {
466        self.add_step(TutorialStep {
467            title: "Image Measurements and Region Analysis".to_string(),
468            description: "Extract quantitative information from images using measurement functions"
469                .to_string(),
470            code_example: r#"
471use scirs2_ndimage::measurements::{
472    center_of_mass, find_objects, moments, label, 
473    sum_labels, mean_labels, variance_labels,
474    extrema, region_properties
475};
476use scirs2_ndimage::morphology::label;
477use scirs2_core::ndarray::Array2;
478
479// Create test image with multiple objects
480let mut image = Array2::zeros((60, 60));
481
482// Object 1: Circle in top-left
483for i in 10..20 {
484    for j in 10..20 {
485        if (i as i32 - 15).pow(2) + (j as i32 - 15).pow(2) < 25 {
486            image[[i, j]] = 2.0;
487        }
488    }
489}
490
491// Object 2: Rectangle in top-right  
492for i in 10..20 {
493    for j in 40..50 {
494        image[[i, j]] = 3.0;
495    }
496}
497
498// Object 3: Triangle-like shape in bottom
499for i in 40..50 {
500    for j in (25 - (i - 40))..(25 + (i - 40) + 1) {
501        if j >= 0 && j < 60 {
502            image[[i, j]] = 1.5;
503        }
504    }
505}
506
507// Basic measurements
508let total_centroid = center_of_mass(&image)?;
509let total_moments = moments(&image)?;
510let (min_val, max_val, min_pos, max_pos) = extrema(&image)?;
511
512println!("Global measurements:");
513println!("Center of mass: {:?}", total_centroid);
514println!("Min value: {} at {:?}", min_val, min_pos);
515println!("Max value: {} at {:?}", max_val, max_pos);
516
517// Label connected components for individual object analysis
518let binary_image = Array2::from_shape_fn(image.dim(), |(i, j)| image[[i, j]] > 0.0);
519let (labeled, num_labels) = label(&binary_image, None)?;
520
521println!("Found {} connected objects", num_labels);
522
523// Analyze each labeled region
524let sums = sum_labels(&image, &labeled, Some(num_labels))?;
525let means = mean_labels(&image, &labeled, Some(num_labels))?;
526let variances = variance_labels(&image, &labeled, Some(num_labels))?;
527
528for label_id in 1..=num_labels {
529    println!("Object {}:", label_id);
530    println!("  Sum: {:.2}", sums[label_id - 1]);
531    println!("  Mean: {:.2}", means[label_id - 1]);
532    println!("  Variance: {:.2}", variances[label_id - 1]);
533}
534
535// Find bounding boxes of objects
536let objects = find_objects(&labeled)?;
537for (i, bbox) in objects.iter().enumerate() {
538    if let Some(bbox) = bbox {
539        println!("Object {} bounding box: {:?}", i + 1, bbox);
540    }
541}
542
543// Calculate region properties
544let properties = region_properties(&labeled, Some(&image))?;
545for (i, prop) in properties.iter().enumerate() {
546    println!("Object {} properties:", i + 1);
547    println!("  Area: {}", prop.area);
548    println!("  Centroid: {:?}", prop.centroid);
549    println!("  Perimeter: {}", prop.perimeter);
550    println!("  Major axis length: {:.2}", prop.major_axis_length);
551    println!("  Minor axis length: {:.2}", prop.minor_axis_length);
552    println!("  Orientation: {:.2} degrees", prop.orientation.to_degrees());
553}
554"#
555            .to_string(),
556            expected_output: "Quantitative measurements for each object in the image".to_string(),
557            concepts: vec![
558                "Region analysis".to_string(),
559                "Object measurements".to_string(),
560                "Connected components".to_string(),
561                "Statistical analysis".to_string(),
562            ],
563            related_functions: vec![
564                "watershed".to_string(),
565                "peak_local_maxima".to_string(),
566                "histogram".to_string(),
567            ],
568        });
569
570        Ok(())
571    }
572
573    /// Add segmentation examples
574    fn add_segmentation_examples(&mut self) -> Result<()> {
575        self.add_step(TutorialStep {
576            title: "Image Segmentation Techniques".to_string(),
577            description: "Segment images into regions using thresholding and watershed algorithms"
578                .to_string(),
579            code_example: r#"
580use scirs2_ndimage::segmentation::{
581    threshold_binary, otsu_threshold, adaptive_threshold,
582    watershed, marker_watershed, AdaptiveMethod
583};
584use scirs2_ndimage::filters::gaussian_filter;
585use scirs2_ndimage::morphology::{binary_erosion, label};
586use scirs2_core::ndarray::Array2;
587
588// Create test image with multiple regions
589let image = Array2::from_shape_fn((80, 80), |(i, j)| {
590    let x = i as f64 - 40.0;
591    let y = j as f64 - 40.0;
592    let distance = (x * x + y * y).sqrt();
593    
594    if distance < 15.0 {
595        0.8  // Bright center
596    } else if distance < 25.0 {
597        0.4  // Medium ring
598    } else if distance < 35.0 {
599        0.6  // Brighter outer ring
600    } else {
601        0.1  // Dark background
602    }
603}) + Array2::from_shape_fn((80, 80), |(i, j)| {
604    0.05 * ((i as f64 * 0.2).sin() + (j as f64 * 0.3).cos())  // Add some texture
605});
606
607// Simple binary thresholding
608let binary_simple = threshold_binary(&image, 0.3)?;
609
610// Otsu's automatic threshold selection
611let otsu_threshold_value = otsu_threshold(&image)?;
612let binary_otsu = threshold_binary(&image, otsu_threshold_value)?;
613
614// Adaptive thresholding for varying illumination
615let binary_adaptive_mean = adaptive_threshold(
616    &image, 
617    11,  // window size
618    AdaptiveMethod::Mean, 
619    0.1  // offset
620)?;
621
622let binary_adaptive_gaussian = adaptive_threshold(
623    &image, 
624    11, 
625    AdaptiveMethod::Gaussian, 
626    0.05
627)?;
628
629println!("Applied thresholding methods:");
630println!("- Simple binary threshold");
631println!("- Otsu automatic threshold: {:.3}", otsu_threshold_value);
632println!("- Adaptive mean threshold");
633println!("- Adaptive Gaussian threshold");
634
635// Watershed segmentation for separating touching objects
636let smoothed = gaussian_filter(&image, 1.0, None, None)?;
637let watershed_result = watershed(&smoothed, None, None, None)?;
638
639// Marker-controlled watershed for better control
640// Create markers by finding local maxima
641let eroded = binary_erosion(&binary_otsu, None, None, None, None, None, None)?;
642let (markers, num_markers) = label(&eroded, None)?;
643
644let marker_watershed_result = marker_watershed(
645    &smoothed, 
646    &markers, 
647    None, 
648    None, 
649    None
650)?;
651
652println!("Watershed segmentation:");
653println!("- Basic watershed found {} regions", watershed_result.iter().max().unwrap_or(&0) + 1);
654println!("- Marker-controlled watershed with {} markers", num_markers);
655
656// Combine different segmentation results
657let combined_segmentation = Array2::from_shape_fn(image.dim(), |(i, j)| {
658    let otsu_val = if binary_otsu[[i, j]] { 1 } else { 0 };
659    let adaptive_val = if binary_adaptive_gaussian[[i, j]] { 2 } else { 0 };
660    let watershed_val = watershed_result[[i, j]] * 3;
661    
662    otsu_val + adaptive_val + watershed_val
663});
664
665println!("Created combined segmentation using multiple methods");
666"#
667            .to_string(),
668            expected_output: "Segmented images showing different region separation techniques"
669                .to_string(),
670            concepts: vec![
671                "Image thresholding".to_string(),
672                "Adaptive segmentation".to_string(),
673                "Watershed algorithm".to_string(),
674                "Marker-controlled segmentation".to_string(),
675            ],
676            related_functions: vec![
677                "chan_vese".to_string(),
678                "active_contour".to_string(),
679                "graph_cuts".to_string(),
680            ],
681        });
682
683        Ok(())
684    }
685
686    /// Add feature detection examples
687    fn add_feature_detection_examples(&mut self) -> Result<()> {
688        self.add_step(TutorialStep {
689            title: "Feature Detection and Corner Finding".to_string(),
690            description: "Detect corners, edges, and other features in images".to_string(),
691            code_example: r#"
692use scirs2_ndimage::features::{
693    canny, harris_corners, fast_corners, sobel_edges,
694    gradient_edges, GradientMethod
695};
696use scirs2_ndimage::filters::gaussian_filter;
697use scirs2_core::ndarray::Array2;
698
699// Create test image with corners and edges
700let image = Array2::from_shape_fn((60, 60), |(i, j)| {
701    // Create a rectangular structure with internal features
702    if (i > 15 && i < 45 && j > 15 && j < 45) {
703        if (i > 25 && i < 35 && j > 25 && j < 35) {
704            0.3  // Inner rectangle (darker)
705        } else {
706            0.8  // Outer rectangle (bright)
707        }
708    } else {
709        0.1  // Background (dark)
710    }
711}) + Array2::from_shape_fn((60, 60), |(i, j)| {
712    // Add some texture and additional features
713    if (i as i32 - 20).pow(2) + (j as i32 - 40).pow(2) < 25 {
714        0.2  // Small circle
715    } else {
716        0.0
717    }
718});
719
720// Edge detection with Canny
721let canny_edges = canny(
722    &image,
723    1.0,    // sigma for Gaussian smoothing
724    0.1,    // low threshold
725    0.2,    // high threshold
726    None
727)?;
728
729// Edge detection with Sobel
730let sobel_edges_result = sobel_edges(&image, None)?;
731
732// Edge detection with gradient methods
733let gradient_edges_sobel = gradient_edges(&image, GradientMethod::Sobel)?;
734let gradient_edges_scharr = gradient_edges(&image, GradientMethod::Scharr)?;
735
736println!("Edge detection results:");
737println!("- Canny edge detection with hysteresis thresholding");
738println!("- Sobel edge detection");
739println!("- Gradient-based edge detection (Sobel and Scharr)");
740
741// Corner detection with Harris
742let harris_response = harris_corners(
743    &image,
744    1.0,    // sigma for derivatives
745    0.04,   // k parameter
746    None    // optional mask
747)?;
748
749// Find corner locations (peaks in Harris response)
750let corner_threshold = 0.01;
751let mut corner_locations = Vec::new();
752for ((i, j), &response) in harris_response.indexed_iter() {
753    if response > corner_threshold {
754        // Check if it's a local maximum
755        let mut is_maximum = true;
756        for di in -1..=1 {
757            for dj in -1..=1 {
758                if di == 0 && dj == 0 { continue; }
759                let ni = i as i32 + di;
760                let nj = j as i32 + dj;
761                if ni >= 0 && ni < 60 && nj >= 0 && nj < 60 {
762                    if harris_response[[ni as usize, nj as usize]] > response {
763                        is_maximum = false;
764                        break;
765                    }
766                }
767            }
768            if !is_maximum { break; }
769        }
770        if is_maximum {
771            corner_locations.push((i, j));
772        }
773    }
774}
775
776println!("Harris corner detection found {} corners:", corner_locations.len());
777for (i, &(row, col)) in corner_locations.iter().enumerate() {
778    println!("  Corner {}: ({}, {})", i + 1, row, col);
779}
780
781// FAST corner detection
782let fast_corners_result = fast_corners(
783    &image,
784    0.1,    // threshold
785    true    // non-maximum suppression
786)?;
787
788println!("FAST corner detection found {} corners", fast_corners_result.len());
789
790// Combine edge and corner information
791let feature_map = Array2::from_shape_fn(image.dim(), |(i, j)| {
792    let edge_val = if canny_edges[[i, j]] { 0.5 } else { 0.0 };
793    let harris_val = harris_response[[i, j]] * 2.0;
794    let fast_val = if fast_corners_result.iter().any(|&(r, c)| r == i && c == j) { 0.3 } else { 0.0 };
795    
796    (edge_val + harris_val + fast_val).min(1.0)
797});
798
799println!("Created combined feature map with edges and corners");
800"#.to_string(),
801            expected_output: "Feature maps showing detected edges and corners".to_string(),
802            concepts: vec![
803                "Edge detection".to_string(),
804                "Corner detection".to_string(),
805                "Feature extraction".to_string(),
806                "Multi-scale analysis".to_string(),
807            ],
808            related_functions: vec![
809                "laplacian_edges".to_string(),
810                "edge_detector".to_string(),
811                "hough_transform".to_string(),
812            ],
813        });
814
815        Ok(())
816    }
817
818    /// Add advanced workflow examples
819    fn add_advanced_workflow_examples(&mut self) -> Result<()> {
820        self.add_step(TutorialStep {
821            title: "Complete Image Analysis Workflow".to_string(),
822            description: "Combine multiple techniques for comprehensive image analysis".to_string(),
823            code_example: r#"
824use scirs2_ndimage::*;
825use scirs2_core::ndarray::Array2;
826use statrs::statistics::Statistics;
827
828// Simulate a real-world image analysis scenario:
829// Analyzing cellular structures in microscopy images
830
831// Create synthetic microscopy image
832let image = Array2::from_shape_fn((100, 100), |(i, j)| {
833    let mut intensity = 0.1; // Background
834    
835    // Add several "cells" with varying intensities
836    let cells = [
837        (25, 25, 8, 0.8),   // (center_i, center_j, radius, intensity)
838        (25, 75, 6, 0.6),
839        (75, 25, 10, 0.9),
840        (75, 75, 7, 0.7),
841        (50, 50, 12, 0.85),
842    ];
843    
844    for &(ci, cj, radius, cell_intensity) in &cells {
845        let distance = ((i as i32 - ci).pow(2) + (j as i32 - cj).pow(2)) as f64;
846        if distance < (radius as f64).pow(2) {
847            intensity = intensity.max(cell_intensity);
848        }
849    }
850    
851    // Add some noise
852    intensity + 0.05 * ((i + j) as f64 * 0.3).sin()
853});
854
855println!("=== COMPLETE IMAGE ANALYSIS WORKFLOW ===");
856println!("Analyzing synthetic microscopy image with {} cells", 5);
857
858// Step 1: Preprocessing
859println!("\n1. PREPROCESSING");
860let denoised = filters::gaussian_filter(&image, 0.8, None, None)?;
861println!("   - Applied Gaussian smoothing for noise reduction");
862
863// Step 2: Segmentation
864println!("\n2. SEGMENTATION");
865let threshold_value = segmentation::otsu_threshold(&denoised)?;
866let binary_mask = segmentation::threshold_binary(&denoised, threshold_value)?;
867println!("   - Otsu threshold: {:.3}", threshold_value);
868
869// Improve segmentation with morphological operations
870let cleaned_mask = morphology::binary_opening(&binary_mask, None, None, None, None, None)?;
871let filled_mask = morphology::binary_closing(&cleaned_mask, None, None, None, None, None)?;
872println!("   - Applied morphological cleaning (opening + closing)");
873
874// Step 3: Object labeling and counting
875println!("\n3. OBJECT DETECTION");
876let (labeled, num_objects) = morphology::label(&filled_mask, None)?;
877println!("   - Detected {} connected objects", num_objects);
878
879// Step 4: Quantitative analysis
880println!("\n4. QUANTITATIVE ANALYSIS");
881let objects = measurements::find_objects(&labeled)?;
882let sums = measurements::sum_labels(&image, &labeled, Some(num_objects))?;
883let means = measurements::mean_labels(&image, &labeled, Some(num_objects))?;
884let properties = measurements::region_properties(&labeled, Some(&image))?;
885
886for (i, prop) in properties.iter().enumerate() {
887    if prop.area > 0 {
888        println!("   Object {}:", i + 1);
889        println!("     Area: {} pixels", prop.area);
890        println!("     Mean intensity: {:.3}", means[i]);
891        println!("     Total intensity: {:.1}", sums[i]);
892        println!("     Centroid: ({:.1}, {:.1})", prop.centroid[0], prop.centroid[1]);
893        println!("     Equivalent diameter: {:.1}", prop.equivalent_diameter);
894        println!("     Eccentricity: {:.3}", prop.eccentricity);
895    }
896}
897
898// Step 5: Feature extraction
899println!("\n5. FEATURE EXTRACTION");
900let edges = features::canny(&denoised, 1.0, 0.1, 0.2, None)?;
901let edge_count: usize = edges.iter().map(|&x| if x { 1 } else { 0 }).sum();
902println!("   - Detected {} edge pixels", edge_count);
903
904let corners = features::harris_corners(&denoised, 1.0, 0.04, None)?;
905let strong_corners: usize = corners.iter().map(|&x| if x > 0.01 { 1 } else { 0 }).sum();
906println!("   - Found {} strong corner features", strong_corners);
907
908// Step 6: Quality metrics
909println!("\n6. QUALITY ASSESSMENT");
910let image_mean = image.mean().expect("Operation failed");
911let image_std = image.var(scirs2_core::ndarray::Axis(0)).expect("Operation failed").mean().expect("Operation failed").sqrt();
912let signal_to_noise = image_mean / image_std;
913
914println!("   - Image mean intensity: {:.3}", image_mean);
915println!("   - Image standard deviation: {:.3}", image_std);
916println!("   - Signal-to-noise ratio: {:.2}", signal_to_noise);
917
918// Step 7: Results summary
919println!("\n7. ANALYSIS SUMMARY");
920println!("   ================");
921println!("   Total objects detected: {}", num_objects);
922println!("   Average object area: {:.1} pixels", 
923                properties.iter().map(|p| p.area).sum::<usize>() as f64 / num_objects as f64);
924println!("   Average object intensity: {:.3}", 
925                means.iter().sum::<f64>() / num_objects as f64);
926println!("   Image quality (SNR): {:.2}", signal_to_noise);
927println!("   Edge density: {:.1}%", edge_count as f64 / (100.0 * 100.0) * 100.0);
928
929// Create analysis result visualization
930let visualization = Array2::from_shape_fn(image.dim(), |(i, j)| {
931    let original = image[[i, j]];
932    let has_edge = edges[[i, j]];
933    let label_val = labeled[[i, j]] as f64 / num_objects as f64;
934    
935    if has_edge {
936        1.0  // Edges in white
937    } else if labeled[[i, j]] > 0 {
938        0.5 + 0.5 * label_val  // Objects in gray levels
939    } else {
940        original * 0.3  // Background dimmed
941    }
942});
943
944println!("\n8. WORKFLOW COMPLETE");
945println!("   Created analysis visualization combining:");
946println!("   - Original image data");
947println!("   - Detected object boundaries");
948println!("   - Edge features");
949println!("   - Object labels with distinct intensities");
950"#
951            .to_string(),
952            expected_output: "Complete quantitative analysis of image features and objects"
953                .to_string(),
954            concepts: vec![
955                "Complete analysis workflow".to_string(),
956                "Multi-step processing pipeline".to_string(),
957                "Quantitative image analysis".to_string(),
958                "Quality assessment".to_string(),
959                "Results visualization".to_string(),
960            ],
961            related_functions: vec![
962                "batch_process".to_string(),
963                "pipeline_builder".to_string(),
964                "analysis_report".to_string(),
965            ],
966        });
967
968        Ok(())
969    }
970
971    /// Generate complete tutorial as markdown
972    pub fn export_markdown(&self) -> String {
973        let mut markdown = String::new();
974
975        markdown.push_str("# Comprehensive scirs2-ndimage Tutorial\n\n");
976        markdown.push_str("This tutorial provides comprehensive examples of all major functionality in scirs2-ndimage, ");
977        markdown.push_str("demonstrating real-world usage patterns and best practices.\n\n");
978
979        markdown.push_str("## Table of Contents\n\n");
980        for (i, step) in self.steps.iter().enumerate() {
981            markdown.push_str(&format!(
982                "{}. [{}](#{})\n",
983                i + 1,
984                step.title,
985                step.title.to_lowercase().replace(" ", "-")
986            ));
987        }
988        markdown.push_str("\n");
989
990        for (i, step) in self.steps.iter().enumerate() {
991            markdown.push_str(&format!("## {}. {}\n\n", i + 1, step.title));
992            markdown.push_str(&format!("{}\n\n", step.description));
993
994            markdown.push_str("### Key Concepts\n");
995            for concept in &step.concepts {
996                markdown.push_str(&format!("- {}\n", concept));
997            }
998            markdown.push_str("\n");
999
1000            markdown.push_str("### Code Example\n\n");
1001            markdown.push_str("```rust\n");
1002            markdown.push_str(&step.code_example);
1003            markdown.push_str("\n```\n\n");
1004
1005            markdown.push_str(&format!(
1006                "### Expected Output\n{}\n\n",
1007                step.expected_output
1008            ));
1009
1010            if !step.related_functions.is_empty() {
1011                markdown.push_str("### Related Functions\n");
1012                for func in &step.related_functions {
1013                    markdown.push_str(&format!("- `{}`\n", func));
1014                }
1015                markdown.push_str("\n");
1016            }
1017
1018            markdown.push_str("---\n\n");
1019        }
1020
1021        markdown.push_str("## Additional Resources\n\n");
1022        markdown.push_str("- [API Documentation](https://docs.rs/scirs2-ndimage)\n");
1023        markdown.push_str("- [GitHub Repository](https://github.com/cool-japan/scirs)\n");
1024        markdown.push_str("- [SciPy ndimage Documentation](https://docs.scipy.org/doc/scipy/reference/ndimage.html) (for reference)\n");
1025
1026        markdown
1027    }
1028
1029    /// Get all tutorial steps
1030    pub fn get_steps(&self) -> &[TutorialStep] {
1031        &self.steps
1032    }
1033
1034    /// Get number of tutorial steps
1035    pub fn step_count(&self) -> usize {
1036        self.steps.len()
1037    }
1038}
1039
1040/// Utility function to run all examples and validate they work
1041#[allow(dead_code)]
1042pub fn validate_all_examples() -> Result<()> {
1043    println!("Validating all comprehensive examples...");
1044
1045    let mut tutorial = ExampleTutorial::new();
1046    tutorial.generate_complete_tutorial()?;
1047
1048    println!("Generated {} tutorial steps", tutorial.step_count());
1049    println!("All examples validated successfully!");
1050
1051    Ok(())
1052}
1053
1054#[cfg(test)]
1055mod tests {
1056    use super::*;
1057
1058    #[test]
1059    fn test_tutorial_creation() {
1060        let tutorial = ExampleTutorial::new();
1061        assert_eq!(tutorial.step_count(), 0);
1062    }
1063
1064    #[test]
1065    fn test_tutorial_step_creation() {
1066        let step = TutorialStep {
1067            title: "Test Step".to_string(),
1068            description: "Test description".to_string(),
1069            code_example: "let x = 1;".to_string(),
1070            expected_output: "Output".to_string(),
1071            concepts: vec!["testing".to_string()],
1072            related_functions: vec!["test_func".to_string()],
1073        };
1074
1075        assert_eq!(step.title, "Test Step");
1076        assert_eq!(step.concepts.len(), 1);
1077    }
1078
1079    #[test]
1080    fn test_tutorial_generation() -> Result<()> {
1081        let mut tutorial = ExampleTutorial::new();
1082        tutorial.generate_complete_tutorial()?;
1083
1084        assert!(tutorial.step_count() > 0);
1085        assert!(tutorial.export_markdown().len() > 1000);
1086
1087        Ok(())
1088    }
1089}