Function make_corrupted_dataset

Source
pub fn make_corrupted_dataset(
    base_dataset: &Dataset,
    missing_rate: f64,
    missing_pattern: MissingPattern,
    outlier_rate: f64,
    outlier_type: OutlierType,
    outlier_strength: f64,
    random_seed: Option<u64>,
) -> Result<Dataset>
Expand description

Generate a dataset with controlled corruption patterns

Examples found in repository?
examples/noise_models_demo.rs (lines 217-225)
191fn demonstrate_comprehensive_corruption() {
192    println!("Testing comprehensive dataset corruption:");
193
194    // Load a real dataset
195    let iris = load_iris().unwrap();
196    println!(
197        "Original Iris dataset: {} samples, {} features",
198        iris.n_samples(),
199        iris.n_features()
200    );
201
202    let original_stats = calculate_basic_stats(&iris.data);
203    println!(
204        "Original stats - Mean: {:.3}, Std: {:.3}",
205        original_stats.0, original_stats.1
206    );
207
208    // Create different levels of corruption
209    let corruption_levels = [
210        (0.05, 0.02, "Light corruption"),
211        (0.1, 0.05, "Moderate corruption"),
212        (0.2, 0.1, "Heavy corruption"),
213        (0.3, 0.15, "Severe corruption"),
214    ];
215
216    for (missing_rate, outlier_rate, description) in corruption_levels {
217        let corrupted = make_corrupted_dataset(
218            &iris,
219            missing_rate,
220            MissingPattern::MAR, // More realistic than MCAR
221            outlier_rate,
222            OutlierType::Point,
223            2.5,
224            Some(42),
225        )
226        .unwrap();
227
228        // Calculate how much data is usable
229        let total_elements = corrupted.data.len();
230        let missing_elements = corrupted.data.iter().filter(|&&x| x.is_nan()).count();
231        let usable_percentage =
232            ((total_elements - missing_elements) as f64 / total_elements as f64) * 100.0;
233
234        println!("{}:", description);
235        println!("  Missing data: {:.1}%", missing_rate * 100.0);
236        println!("  Outliers: {:.1}%", outlier_rate * 100.0);
237        println!("  Usable data: {:.1}%", usable_percentage);
238
239        // Show metadata
240        if let Some(missing_count) = corrupted.metadata.get("missing_count") {
241            println!("  Actual missing: {} elements", missing_count);
242        }
243        if let Some(outlier_count) = corrupted.metadata.get("outlier_count") {
244            println!("  Actual outliers: {} samples", outlier_count);
245        }
246    }
247}
248
249fn demonstrate_real_world_applications() {
250    println!("Real-world application scenarios:");
251
252    println!("\n1. **Medical Data Simulation**:");
253    let medical_data = load_iris().unwrap(); // Stand-in for medical measurements
254    let _corrupted_medical = make_corrupted_dataset(
255        &medical_data,
256        0.15,                 // 15% missing - common in medical data
257        MissingPattern::MNAR, // High values often missing (privacy, measurement issues)
258        0.05,                 // 5% outliers - measurement errors
259        OutlierType::Point,
260        2.0,
261        Some(42),
262    )
263    .unwrap();
264
265    println!("  Medical dataset simulation:");
266    println!("    Missing data pattern: MNAR (high values more likely missing)");
267    println!("    Outliers: Point outliers (measurement errors)");
268    println!("    Use case: Testing imputation algorithms for clinical data");
269
270    println!("\n2. **Sensor Network Simulation**:");
271    let sensor_data = make_time_series(200, 4, true, true, 0.1, Some(42)).unwrap();
272    let mut sensor_ts = sensor_data.data.clone();
273
274    // Add realistic sensor noise
275    add_time_series_noise(
276        &mut sensor_ts,
277        &[
278            ("gaussian", 0.05),        // Background noise
279            ("spikes", 0.02),          // Electrical interference
280            ("drift", 0.1),            // Sensor calibration drift
281            ("heteroscedastic", 0.03), // Temperature-dependent noise
282        ],
283        Some(42),
284    )
285    .unwrap();
286
287    // Add missing data (sensor failures)
288    inject_missing_data(&mut sensor_ts, 0.08, MissingPattern::Block, Some(42)).unwrap();
289
290    println!("  Sensor network simulation:");
291    println!("    Multiple noise types: gaussian + spikes + drift + heteroscedastic");
292    println!("    Missing data: Block pattern (sensor failures)");
293    println!("    Use case: Testing robust time series algorithms");
294
295    println!("\n3. **Survey Data Simulation**:");
296    let survey_data = load_iris().unwrap(); // Stand-in for survey responses
297    let _corrupted_survey = make_corrupted_dataset(
298        &survey_data,
299        0.25,                // 25% missing - typical for surveys
300        MissingPattern::MAR, // Missing depends on other responses
301        0.08,                // 8% outliers - data entry errors, extreme responses
302        OutlierType::Contextual,
303        1.5,
304        Some(42),
305    )
306    .unwrap();
307
308    println!("  Survey data simulation:");
309    println!("    Missing data pattern: MAR (depends on other responses)");
310    println!("    Outliers: Contextual (unusual response patterns)");
311    println!("    Use case: Testing survey analysis robustness");
312
313    println!("\n4. **Financial Data Simulation**:");
314    let mut financial_ts = make_time_series(500, 3, false, false, 0.02, Some(42))
315        .unwrap()
316        .data;
317
318    // Add financial market-specific noise
319    add_time_series_noise(
320        &mut financial_ts,
321        &[
322            ("gaussian", 0.1),        // Market volatility
323            ("spikes", 0.05),         // Market shocks
324            ("autocorrelated", 0.15), // Momentum effects
325            ("heteroscedastic", 0.2), // Volatility clustering
326        ],
327        Some(42),
328    )
329    .unwrap();
330
331    println!("  Financial data simulation:");
332    println!("    Noise types: volatility + shocks + momentum + clustering");
333    println!("    Use case: Testing financial models under realistic conditions");
334}