Function load_iris

Source
pub fn load_iris() -> Result<Dataset>
Expand description

Generate the classic Iris dataset

Examples found in repository?
examples/toy_datasets.rs (line 4)
3fn main() -> Result<(), Box<dyn std::error::Error>> {
4    let iris = load_iris()?;
5    println!("Iris dataset loaded:");
6    println!("  Samples: {}", iris.n_samples());
7    println!("  Features: {}", iris.n_features());
8    println!(
9        "  Target classes: {}",
10        iris.target_names.as_ref().map_or(0, |v| v.len())
11    );
12
13    let boston = load_boston()?;
14    println!("\nBoston Housing dataset loaded:");
15    println!("  Samples: {}", boston.n_samples());
16    println!("  Features: {}", boston.n_features());
17
18    Ok(())
19}
More examples
Hide additional examples
examples/noise_models_demo.rs (line 195)
191fn demonstrate_comprehensive_corruption() {
192    println!("Testing comprehensive dataset corruption:");
193
194    // Load a real dataset
195    let iris = load_iris().unwrap();
196    println!(
197        "Original Iris dataset: {} samples, {} features",
198        iris.n_samples(),
199        iris.n_features()
200    );
201
202    let original_stats = calculate_basic_stats(&iris.data);
203    println!(
204        "Original stats - Mean: {:.3}, Std: {:.3}",
205        original_stats.0, original_stats.1
206    );
207
208    // Create different levels of corruption
209    let corruption_levels = [
210        (0.05, 0.02, "Light corruption"),
211        (0.1, 0.05, "Moderate corruption"),
212        (0.2, 0.1, "Heavy corruption"),
213        (0.3, 0.15, "Severe corruption"),
214    ];
215
216    for (missing_rate, outlier_rate, description) in corruption_levels {
217        let corrupted = make_corrupted_dataset(
218            &iris,
219            missing_rate,
220            MissingPattern::MAR, // More realistic than MCAR
221            outlier_rate,
222            OutlierType::Point,
223            2.5,
224            Some(42),
225        )
226        .unwrap();
227
228        // Calculate how much data is usable
229        let total_elements = corrupted.data.len();
230        let missing_elements = corrupted.data.iter().filter(|&&x| x.is_nan()).count();
231        let usable_percentage =
232            ((total_elements - missing_elements) as f64 / total_elements as f64) * 100.0;
233
234        println!("{}:", description);
235        println!("  Missing data: {:.1}%", missing_rate * 100.0);
236        println!("  Outliers: {:.1}%", outlier_rate * 100.0);
237        println!("  Usable data: {:.1}%", usable_percentage);
238
239        // Show metadata
240        if let Some(missing_count) = corrupted.metadata.get("missing_count") {
241            println!("  Actual missing: {} elements", missing_count);
242        }
243        if let Some(outlier_count) = corrupted.metadata.get("outlier_count") {
244            println!("  Actual outliers: {} samples", outlier_count);
245        }
246    }
247}
248
249fn demonstrate_real_world_applications() {
250    println!("Real-world application scenarios:");
251
252    println!("\n1. **Medical Data Simulation**:");
253    let medical_data = load_iris().unwrap(); // Stand-in for medical measurements
254    let _corrupted_medical = make_corrupted_dataset(
255        &medical_data,
256        0.15,                 // 15% missing - common in medical data
257        MissingPattern::MNAR, // High values often missing (privacy, measurement issues)
258        0.05,                 // 5% outliers - measurement errors
259        OutlierType::Point,
260        2.0,
261        Some(42),
262    )
263    .unwrap();
264
265    println!("  Medical dataset simulation:");
266    println!("    Missing data pattern: MNAR (high values more likely missing)");
267    println!("    Outliers: Point outliers (measurement errors)");
268    println!("    Use case: Testing imputation algorithms for clinical data");
269
270    println!("\n2. **Sensor Network Simulation**:");
271    let sensor_data = make_time_series(200, 4, true, true, 0.1, Some(42)).unwrap();
272    let mut sensor_ts = sensor_data.data.clone();
273
274    // Add realistic sensor noise
275    add_time_series_noise(
276        &mut sensor_ts,
277        &[
278            ("gaussian", 0.05),        // Background noise
279            ("spikes", 0.02),          // Electrical interference
280            ("drift", 0.1),            // Sensor calibration drift
281            ("heteroscedastic", 0.03), // Temperature-dependent noise
282        ],
283        Some(42),
284    )
285    .unwrap();
286
287    // Add missing data (sensor failures)
288    inject_missing_data(&mut sensor_ts, 0.08, MissingPattern::Block, Some(42)).unwrap();
289
290    println!("  Sensor network simulation:");
291    println!("    Multiple noise types: gaussian + spikes + drift + heteroscedastic");
292    println!("    Missing data: Block pattern (sensor failures)");
293    println!("    Use case: Testing robust time series algorithms");
294
295    println!("\n3. **Survey Data Simulation**:");
296    let survey_data = load_iris().unwrap(); // Stand-in for survey responses
297    let _corrupted_survey = make_corrupted_dataset(
298        &survey_data,
299        0.25,                // 25% missing - typical for surveys
300        MissingPattern::MAR, // Missing depends on other responses
301        0.08,                // 8% outliers - data entry errors, extreme responses
302        OutlierType::Contextual,
303        1.5,
304        Some(42),
305    )
306    .unwrap();
307
308    println!("  Survey data simulation:");
309    println!("    Missing data pattern: MAR (depends on other responses)");
310    println!("    Outliers: Contextual (unusual response patterns)");
311    println!("    Use case: Testing survey analysis robustness");
312
313    println!("\n4. **Financial Data Simulation**:");
314    let mut financial_ts = make_time_series(500, 3, false, false, 0.02, Some(42))
315        .unwrap()
316        .data;
317
318    // Add financial market-specific noise
319    add_time_series_noise(
320        &mut financial_ts,
321        &[
322            ("gaussian", 0.1),        // Market volatility
323            ("spikes", 0.05),         // Market shocks
324            ("autocorrelated", 0.15), // Momentum effects
325            ("heteroscedastic", 0.2), // Volatility clustering
326        ],
327        Some(42),
328    )
329    .unwrap();
330
331    println!("  Financial data simulation:");
332    println!("    Noise types: volatility + shocks + momentum + clustering");
333    println!("    Use case: Testing financial models under realistic conditions");
334}
examples/sampling_demo.rs (line 13)
9fn main() {
10    println!("=== Sampling and Bootstrapping Demonstration ===\n");
11
12    // Load the Iris dataset for demonstration
13    let iris = load_iris().unwrap();
14    let n_samples = iris.n_samples();
15
16    println!("Original Iris dataset:");
17    println!("- Samples: {}", n_samples);
18    println!("- Features: {}", iris.n_features());
19
20    if let Some(target) = &iris.target {
21        let class_counts = count_classes(target);
22        println!("- Class distribution: {:?}\n", class_counts);
23    }
24
25    // Demonstrate random sampling without replacement
26    println!("=== Random Sampling (without replacement) ===");
27    let sample_size = 30;
28    let random_indices = random_sample(n_samples, sample_size, false, Some(42)).unwrap();
29
30    println!(
31        "Sampled {} indices from {} total samples",
32        sample_size, n_samples
33    );
34    println!(
35        "Sample indices: {:?}",
36        &random_indices[..10.min(random_indices.len())]
37    );
38
39    // Create a subset dataset
40    let sample_data = iris.data.select(ndarray::Axis(0), &random_indices);
41    let sample_target = iris
42        .target
43        .as_ref()
44        .map(|t| t.select(ndarray::Axis(0), &random_indices));
45    let sample_dataset = Dataset::new(sample_data, sample_target)
46        .with_description("Random sample from Iris dataset".to_string());
47
48    println!(
49        "Random sample dataset: {} samples, {} features",
50        sample_dataset.n_samples(),
51        sample_dataset.n_features()
52    );
53
54    if let Some(target) = &sample_dataset.target {
55        let sample_class_counts = count_classes(target);
56        println!("Sample class distribution: {:?}\n", sample_class_counts);
57    }
58
59    // Demonstrate bootstrap sampling (with replacement)
60    println!("=== Bootstrap Sampling (with replacement) ===");
61    let bootstrap_size = 200; // More than original dataset size
62    let bootstrap_indices = random_sample(n_samples, bootstrap_size, true, Some(42)).unwrap();
63
64    println!(
65        "Bootstrap sampled {} indices from {} total samples",
66        bootstrap_size, n_samples
67    );
68    println!(
69        "Bootstrap may have duplicates - first 10 indices: {:?}",
70        &bootstrap_indices[..10]
71    );
72
73    // Count frequency of each index in bootstrap sample
74    let mut index_counts = vec![0; n_samples];
75    for &idx in &bootstrap_indices {
76        index_counts[idx] += 1;
77    }
78    let max_count = *index_counts.iter().max().unwrap();
79    let zero_count = index_counts.iter().filter(|&&count| count == 0).count();
80
81    println!("Bootstrap statistics:");
82    println!("- Maximum frequency of any sample: {}", max_count);
83    println!(
84        "- Number of original samples not selected: {}\n",
85        zero_count
86    );
87
88    // Demonstrate stratified sampling
89    println!("=== Stratified Sampling ===");
90    if let Some(target) = &iris.target {
91        let stratified_size = 30;
92        let stratified_indices = stratified_sample(target, stratified_size, Some(42)).unwrap();
93
94        println!(
95            "Stratified sampled {} indices maintaining class proportions",
96            stratified_size
97        );
98
99        // Create stratified subset
100        let stratified_data = iris.data.select(ndarray::Axis(0), &stratified_indices);
101        let stratified_target = target.select(ndarray::Axis(0), &stratified_indices);
102        let stratified_dataset = Dataset::new(stratified_data, Some(stratified_target))
103            .with_description("Stratified sample from Iris dataset".to_string());
104
105        println!(
106            "Stratified sample dataset: {} samples, {} features",
107            stratified_dataset.n_samples(),
108            stratified_dataset.n_features()
109        );
110
111        let stratified_class_counts = count_classes(&stratified_dataset.target.unwrap());
112        println!(
113            "Stratified sample class distribution: {:?}",
114            stratified_class_counts
115        );
116
117        // Verify proportions are maintained
118        let original_proportions = calculate_proportions(&count_classes(target));
119        let stratified_proportions = calculate_proportions(&stratified_class_counts);
120
121        println!("Class proportion comparison:");
122        for (&class, &original_prop) in &original_proportions {
123            let stratified_prop = stratified_proportions.get(&class).unwrap_or(&0.0);
124            println!(
125                "  Class {}: Original {:.2}%, Stratified {:.2}%",
126                class,
127                original_prop * 100.0,
128                stratified_prop * 100.0
129            );
130        }
131    }
132
133    // Demonstrate practical use case: creating training/validation splits
134    println!("\n=== Practical Example: Multiple Train/Validation Splits ===");
135    for i in 1..=3 {
136        let split_indices = random_sample(n_samples, 100, false, Some(42 + i)).unwrap();
137        let (train_indices, val_indices) = split_indices.split_at(80);
138
139        println!(
140            "Split {}: {} training samples, {} validation samples",
141            i,
142            train_indices.len(),
143            val_indices.len()
144        );
145    }
146
147    println!("\n=== Sampling Demo Complete ===");
148}
examples/feature_extraction_demo.rs (line 105)
12fn main() {
13    println!("=== Feature Extraction Utilities Demonstration ===\n");
14
15    // Create a sample dataset for demonstration
16    let data = Array2::from_shape_vec(
17        (6, 2),
18        vec![
19            1.0, 10.0, // Normal data
20            2.0, 20.0, 3.0, 30.0, 4.0, 40.0, 5.0, 50.0, 100.0, 500.0, // Outlier
21        ],
22    )
23    .unwrap();
24
25    println!("Original dataset:");
26    print_data_summary(&data, "Original");
27    println!();
28
29    // Demonstrate Min-Max Scaling
30    println!("=== Min-Max Scaling ============================");
31    let mut data_minmax = data.clone();
32    min_max_scale(&mut data_minmax, (0.0, 1.0));
33    print_data_summary(&data_minmax, "Min-Max Scaled [0, 1]");
34
35    let mut data_custom_range = data.clone();
36    min_max_scale(&mut data_custom_range, (-1.0, 1.0));
37    print_data_summary(&data_custom_range, "Min-Max Scaled [-1, 1]");
38    println!();
39
40    // Demonstrate Robust Scaling
41    println!("=== Robust Scaling ==============================");
42    let mut data_robust = data.clone();
43    robust_scale(&mut data_robust);
44    print_data_summary(&data_robust, "Robust Scaled (Median/IQR)");
45    println!();
46
47    // Demonstrate Polynomial Features
48    println!("=== Polynomial Feature Generation ==============");
49    let small_data = Array2::from_shape_vec((3, 2), vec![1.0, 2.0, 2.0, 3.0, 3.0, 4.0]).unwrap();
50
51    println!("Small dataset for polynomial demonstration:");
52    print_data_matrix(&small_data, &["x1", "x2"]);
53
54    let poly_with_bias = polynomial_features(&small_data, 2, true).unwrap();
55    println!("Polynomial features (degree=2, with bias):");
56    print_data_matrix(&poly_with_bias, &["1", "x1", "x2", "x1²", "x1*x2", "x2²"]);
57
58    let poly_no_bias = polynomial_features(&small_data, 2, false).unwrap();
59    println!("Polynomial features (degree=2, no bias):");
60    print_data_matrix(&poly_no_bias, &["x1", "x2", "x1²", "x1*x2", "x2²"]);
61    println!();
62
63    // Demonstrate Statistical Feature Extraction
64    println!("=== Statistical Feature Extraction =============");
65    let stats_data = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
66
67    let stats_features = statistical_features(&stats_data).unwrap();
68    println!("Statistical features for data [1, 2, 3, 4, 5]:");
69    println!("(Each sample gets the same global statistics)");
70    print_statistical_features(stats_features.row(0).to_owned());
71    println!();
72
73    // Demonstrate Binning/Discretization
74    println!("=== Feature Binning/Discretization =============");
75    let binning_data =
76        Array2::from_shape_vec((8, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]).unwrap();
77
78    println!("Original data for binning: [1, 2, 3, 4, 5, 6, 7, 8]");
79
80    let uniform_binned =
81        create_binned_features(&binning_data, 3, BinningStrategy::Uniform).unwrap();
82    println!(
83        "Uniform binning (3 bins): {:?}",
84        uniform_binned
85            .column(0)
86            .iter()
87            .map(|&x| x as usize)
88            .collect::<Vec<_>>()
89    );
90
91    let quantile_binned =
92        create_binned_features(&binning_data, 4, BinningStrategy::Quantile).unwrap();
93    println!(
94        "Quantile binning (4 bins): {:?}",
95        quantile_binned
96            .column(0)
97            .iter()
98            .map(|&x| x as usize)
99            .collect::<Vec<_>>()
100    );
101    println!();
102
103    // Demonstrate Feature Extraction Pipeline
104    println!("=== Complete Feature Extraction Pipeline =======");
105    let iris = load_iris().unwrap();
106    println!(
107        "Using Iris dataset ({} samples, {} features)",
108        iris.n_samples(),
109        iris.n_features()
110    );
111
112    // Step 1: Robust scaling (handles outliers better)
113    let mut scaled_iris = iris.data.clone();
114    robust_scale(&mut scaled_iris);
115    println!("Step 1: Applied robust scaling");
116
117    // Step 2: Generate polynomial features (degree 2)
118    let poly_iris = polynomial_features(&scaled_iris, 2, false).unwrap();
119    println!("Step 2: Generated polynomial features");
120    println!("  Original features: {}", scaled_iris.ncols());
121    println!("  Polynomial features: {}", poly_iris.ncols());
122
123    // Step 3: Create binned features for non-linearity
124    let binned_iris = create_binned_features(&scaled_iris, 5, BinningStrategy::Quantile).unwrap();
125    println!("Step 3: Created binned features");
126    println!("  Binned features: {}", binned_iris.ncols());
127
128    // Step 4: Extract statistical features
129    let stats_iris =
130        statistical_features(&iris.data.slice(ndarray::s![0..20, ..]).to_owned()).unwrap();
131    println!("Step 4: Extracted statistical features (from first 20 samples)");
132    println!("  Statistical features: {}", stats_iris.ncols());
133    println!();
134
135    // Comparison of scaling methods with outliers
136    println!("=== Scaling Methods Comparison (with outliers) =");
137    let outlier_data = Array2::from_shape_vec(
138        (5, 1),
139        vec![1.0, 2.0, 3.0, 4.0, 100.0], // 100.0 is a severe outlier
140    )
141    .unwrap();
142
143    println!("Original data with outlier: [1, 2, 3, 4, 100]");
144
145    let mut minmax_outlier = outlier_data.clone();
146    min_max_scale(&mut minmax_outlier, (0.0, 1.0));
147    println!(
148        "Min-Max scaled: {:?}",
149        minmax_outlier
150            .column(0)
151            .iter()
152            .map(|&x| format!("{:.3}", x))
153            .collect::<Vec<_>>()
154    );
155
156    let mut robust_outlier = outlier_data.clone();
157    robust_scale(&mut robust_outlier);
158    println!(
159        "Robust scaled: {:?}",
160        robust_outlier
161            .column(0)
162            .iter()
163            .map(|&x| format!("{:.3}", x))
164            .collect::<Vec<_>>()
165    );
166
167    println!("\nNotice how robust scaling is less affected by the outlier!");
168    println!();
169
170    // Feature engineering recommendations
171    println!("=== Feature Engineering Recommendations ========");
172    println!("1. **Scaling**: Use robust scaling when outliers are present");
173    println!("2. **Polynomial**: Use degree 2-3 for non-linear relationships");
174    println!("3. **Binning**: Use quantile binning for better distribution");
175    println!("4. **Statistical**: Extract global statistics for context");
176    println!("5. **Pipeline**: Always scale → transform → engineer → validate");
177    println!();
178
179    println!("=== Feature Extraction Demo Complete ===========");
180}
examples/balancing_demo.rs (line 120)
12fn main() {
13    println!("=== Data Balancing Utilities Demonstration ===\n");
14
15    // Create an artificially imbalanced dataset for demonstration
16    let data = Array2::from_shape_vec(
17        (10, 2),
18        vec![
19            // Class 0 (minority): 2 samples
20            1.0, 1.0, 1.2, 1.1, // Class 1 (majority): 6 samples
21            5.0, 5.0, 5.1, 5.2, 4.9, 4.8, 5.3, 5.1, 4.8, 5.3, 5.0, 4.9,
22            // Class 2 (moderate): 2 samples
23            10.0, 10.0, 10.1, 9.9,
24        ],
25    )
26    .unwrap();
27
28    let targets = Array1::from(vec![0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0]);
29
30    println!("Original imbalanced dataset:");
31    print_class_distribution(&targets);
32    println!("Total samples: {}\n", data.nrows());
33
34    // Demonstrate random oversampling
35    println!("=== Random Oversampling =======================");
36    let (oversampled_data, oversampled_targets) =
37        random_oversample(&data, &targets, Some(42)).unwrap();
38
39    println!("After random oversampling:");
40    print_class_distribution(&oversampled_targets);
41    println!("Total samples: {}\n", oversampled_data.nrows());
42
43    // Demonstrate random undersampling
44    println!("=== Random Undersampling ======================");
45    let (undersampled_data, undersampled_targets) =
46        random_undersample(&data, &targets, Some(42)).unwrap();
47
48    println!("After random undersampling:");
49    print_class_distribution(&undersampled_targets);
50    println!("Total samples: {}\n", undersampled_data.nrows());
51
52    // Demonstrate SMOTE-like synthetic sample generation
53    println!("=== Synthetic Sample Generation (SMOTE-like) ==");
54
55    // Generate 4 synthetic samples for class 0 (minority class)
56    let (synthetic_data, synthetic_targets) =
57        generate_synthetic_samples(&data, &targets, 0.0, 4, 1, Some(42)).unwrap();
58
59    println!(
60        "Generated {} synthetic samples for class 0",
61        synthetic_data.nrows()
62    );
63    println!("Synthetic samples (first 3 features of each):");
64    for i in 0..synthetic_data.nrows() {
65        println!(
66            "  Sample {}: [{:.3}, {:.3}] -> class {}",
67            i,
68            synthetic_data[[i, 0]],
69            synthetic_data[[i, 1]],
70            synthetic_targets[i]
71        );
72    }
73    println!();
74
75    // Demonstrate unified balancing strategies
76    println!("=== Unified Balancing Strategies ==============");
77
78    // Strategy 1: Random Oversampling
79    let (balanced_over, targets_over) = create_balanced_dataset(
80        &data,
81        &targets,
82        BalancingStrategy::RandomOversample,
83        Some(42),
84    )
85    .unwrap();
86
87    println!("Strategy: Random Oversampling");
88    print_class_distribution(&targets_over);
89    println!("Total samples: {}", balanced_over.nrows());
90
91    // Strategy 2: Random Undersampling
92    let (balanced_under, targets_under) = create_balanced_dataset(
93        &data,
94        &targets,
95        BalancingStrategy::RandomUndersample,
96        Some(42),
97    )
98    .unwrap();
99
100    println!("\nStrategy: Random Undersampling");
101    print_class_distribution(&targets_under);
102    println!("Total samples: {}", balanced_under.nrows());
103
104    // Strategy 3: SMOTE with k=1 neighbors
105    let (balanced_smote, targets_smote) = create_balanced_dataset(
106        &data,
107        &targets,
108        BalancingStrategy::SMOTE { k_neighbors: 1 },
109        Some(42),
110    )
111    .unwrap();
112
113    println!("\nStrategy: SMOTE (k_neighbors=1)");
114    print_class_distribution(&targets_smote);
115    println!("Total samples: {}", balanced_smote.nrows());
116
117    // Demonstrate with real-world dataset
118    println!("\n=== Real-world Example: Iris Dataset ==========");
119
120    let iris = load_iris().unwrap();
121    if let Some(iris_targets) = &iris.target {
122        println!("Original Iris dataset:");
123        print_class_distribution(iris_targets);
124
125        // Apply oversampling to iris (it's already balanced, but for demonstration)
126        let (iris_balanced, iris_balanced_targets) =
127            random_oversample(&iris.data, iris_targets, Some(42)).unwrap();
128
129        println!("\nIris after oversampling (should remain the same):");
130        print_class_distribution(&iris_balanced_targets);
131        println!("Total samples: {}", iris_balanced.nrows());
132
133        // Create artificial imbalance by removing some samples
134        let indices_to_keep: Vec<usize> = (0..150)
135            .filter(|&i| {
136                let class = iris_targets[i].round() as i64;
137                // Keep all of class 0, 30 of class 1, 10 of class 2
138                match class {
139                    0 => true,    // Keep all 50
140                    1 => i < 80,  // Keep first 30 (indices 50-79)
141                    2 => i < 110, // Keep first 10 (indices 100-109)
142                    _ => false,
143                }
144            })
145            .collect();
146
147        let imbalanced_data = iris.data.select(ndarray::Axis(0), &indices_to_keep);
148        let imbalanced_targets = iris_targets.select(ndarray::Axis(0), &indices_to_keep);
149
150        println!("\nArtificially imbalanced Iris:");
151        print_class_distribution(&imbalanced_targets);
152
153        // Balance it using SMOTE
154        let (rebalanced_data, rebalanced_targets) = create_balanced_dataset(
155            &imbalanced_data,
156            &imbalanced_targets,
157            BalancingStrategy::SMOTE { k_neighbors: 3 },
158            Some(42),
159        )
160        .unwrap();
161
162        println!("\nAfter SMOTE rebalancing:");
163        print_class_distribution(&rebalanced_targets);
164        println!("Total samples: {}", rebalanced_data.nrows());
165    }
166
167    println!("\n=== Performance Comparison ====================");
168
169    // Show the tradeoffs between different strategies
170    println!("Strategy Comparison Summary:");
171    println!("┌─────────────────────┬──────────────┬─────────────────────────────────┐");
172    println!("│ Strategy            │ Final Size   │ Characteristics                 │");
173    println!("├─────────────────────┼──────────────┼─────────────────────────────────┤");
174    println!(
175        "│ Random Oversample   │ {} samples   │ Increases data size, duplicates │",
176        balanced_over.nrows()
177    );
178    println!(
179        "│ Random Undersample  │ {} samples    │ Reduces data size, loses info   │",
180        balanced_under.nrows()
181    );
182    println!(
183        "│ SMOTE               │ {} samples   │ Increases size, synthetic data  │",
184        balanced_smote.nrows()
185    );
186    println!("└─────────────────────┴──────────────┴─────────────────────────────────┘");
187
188    println!("\n=== Balancing Demo Complete ====================");
189}