Skip to main content

scirs2_datasets/
lib.rs

1//! # SciRS2 Datasets - Dataset Loading and Generation
2//!
3//! **scirs2-datasets** provides dataset utilities modeled after scikit-learn's `datasets` module,
4//! offering toy datasets (Iris, Boston, MNIST), synthetic data generators, cross-validation splitters,
5//! and data preprocessing utilities for machine learning workflows.
6//!
7//! ## 🎯 Key Features
8//!
9//! - **Toy Datasets**: Classic datasets (Iris, Boston Housing, Breast Cancer, Digits)
10//! - **Data Generators**: Synthetic data for classification, regression, clustering
11//! - **Cross-Validation**: K-fold, stratified, time series CV splitters
12//! - **Preprocessing**: Train/test split, normalization, feature scaling
13//! - **Caching**: Efficient disk caching for downloaded datasets
14//!
15//! ## 📦 Module Overview
16//!
17//! | SciRS2 Function | scikit-learn Equivalent | Description |
18//! |-----------------|-------------------------|-------------|
19//! | `load_iris` | `sklearn.datasets.load_iris` | Classic Iris classification dataset |
20//! | `load_boston` | `sklearn.datasets.load_boston` | Boston housing regression dataset |
21//! | `make_classification` | `sklearn.datasets.make_classification` | Synthetic classification data |
22//! | `make_regression` | `sklearn.datasets.make_regression` | Synthetic regression data |
23//! | `make_blobs` | `sklearn.datasets.make_blobs` | Synthetic clustering data |
24//! | `k_fold_split` | `sklearn.model_selection.KFold` | K-fold cross-validation |
25//!
26//! ## 🚀 Quick Start
27//!
28//! ```toml
29//! [dependencies]
30//! scirs2-datasets = "0.1.5"
31//! ```
32//!
33//! ```rust
34//! use scirs2_datasets::{load_iris, make_classification};
35//!
36//! // Load classic Iris dataset
37//! let iris = load_iris().expect("Operation failed");
38//! println!("{} samples, {} features", iris.n_samples(), iris.n_features());
39//!
40//! // Generate synthetic classification data
41//! let data = make_classification(100, 5, 3, 2, 4, Some(42)).expect("Operation failed");
42//! ```
43//!
44//! ## 🔒 Version: 0.2.0 (February 8, 2026)
45//!
46//! ### v0.2.0 New Features
47//!
48//! - **Lazy Loading**: Memory-mapped datasets with zero-copy views
49//! - **Data Augmentation**: GPU-accelerated augmentation pipeline
50//! - **Parallel Preprocessing**: Multi-threaded preprocessing with work-stealing
51//! - **Distributed Loading**: Shard-aware loading for distributed training
52//! - **Format Support**: Parquet, Arrow, HDF5 integration via scirs2-io
53//! - **Benchmarks**: Comprehensive comparison with PyTorch DataLoader
54//!
55//! # Examples
56//!
57//! ## Loading toy datasets
58//!
59//! ```rust
60//! use scirs2_datasets::{load_iris, load_boston};
61//!
62//! // Load the classic Iris dataset
63//! let iris = load_iris().expect("Operation failed");
64//! println!("Iris dataset: {} samples, {} features", iris.n_samples(), iris.n_features());
65//!
66//! // Load the Boston housing dataset
67//! let boston = load_boston().expect("Operation failed");
68//! println!("Boston dataset: {} samples, {} features", boston.n_samples(), boston.n_features());
69//! ```
70//!
71//! ## Generating synthetic datasets
72//!
73//! ```rust
74//! use scirs2_datasets::{make_classification, make_regression, make_blobs, make_spirals, make_moons};
75//!
76//! // Generate a classification dataset
77//! let classification = make_classification(100, 5, 3, 2, 4, Some(42)).expect("Operation failed");
78//! println!("Classification dataset: {} samples, {} features, {} classes",
79//!          classification.n_samples(), classification.n_features(), 3);
80//!
81//! // Generate a regression dataset
82//! let regression = make_regression(50, 4, 3, 0.1, Some(42)).expect("Operation failed");
83//! println!("Regression dataset: {} samples, {} features",
84//!          regression.n_samples(), regression.n_features());
85//!
86//! // Generate a clustering dataset
87//! let blobs = make_blobs(80, 3, 4, 1.0, Some(42)).expect("Operation failed");
88//! println!("Blobs dataset: {} samples, {} features, {} clusters",
89//!          blobs.n_samples(), blobs.n_features(), 4);
90//!
91//! // Generate non-linear patterns
92//! let spirals = make_spirals(200, 2, 0.1, Some(42)).expect("Operation failed");
93//! let moons = make_moons(150, 0.05, Some(42)).expect("Operation failed");
94//! ```
95//!
96//! ## Cross-validation
97//!
98//! ```rust
99//! use scirs2_datasets::{load_iris, k_fold_split, stratified_k_fold_split};
100//!
101//! let iris = load_iris().expect("Operation failed");
102//!
103//! // K-fold cross-validation
104//! let k_folds = k_fold_split(iris.n_samples(), 5, true, Some(42)).expect("Operation failed");
105//! println!("Created {} folds for K-fold CV", k_folds.len());
106//!
107//! // Stratified K-fold cross-validation
108//! if let Some(target) = &iris.target {
109//!     let stratified_folds = stratified_k_fold_split(target, 5, true, Some(42)).expect("Operation failed");
110//!     println!("Created {} stratified folds", stratified_folds.len());
111//! }
112//! ```
113//!
114//! ## Dataset manipulation
115//!
116//! ```rust
117//! use scirs2_datasets::{load_iris, Dataset};
118//!
119//! let iris = load_iris().expect("Operation failed");
120//!
121//! // Access dataset properties
122//! println!("Dataset: {} samples, {} features", iris.n_samples(), iris.n_features());
123//! if let Some(featurenames) = iris.featurenames() {
124//!     println!("Features: {:?}", featurenames);
125//! }
126//! ```
127
128#![warn(missing_docs)]
129
130pub mod advanced_generators;
131pub mod benchmarks;
132pub mod cache;
133pub mod cloud;
134pub mod distributed;
135pub mod domain_specific;
136pub mod error;
137pub mod explore;
138pub mod external;
139pub mod generators;
140pub mod gpu;
141pub mod gpu_optimization;
142pub mod loaders;
143pub mod ml_integration;
144pub mod real_world;
145pub mod registry;
146pub mod sample;
147pub mod streaming;
148pub mod time_series;
149pub mod toy;
150/// Core utilities for working with datasets
151///
152/// This module provides the Dataset struct and helper functions for
153/// manipulating and transforming datasets.
154pub mod utils;
155
156/// Standard benchmark datasets (fully embedded, no download required)
157///
158/// Provides well-known ML datasets: Iris, Wine, Breast Cancer, Digits, Boston.
159/// Each returns a `DatasetResult` with data, target, feature names, and description.
160pub mod standard;
161
162/// API stability guarantees and compatibility documentation
163///
164/// This module defines the API stability levels and compatibility guarantees
165/// for the scirs2-datasets crate.
166pub mod stability;
167
168/// Pure Rust platform directory detection (replaces `dirs` crate for COOLJAPAN Pure Rust policy)
169pub mod platform_dirs;
170
171// Temporary module to test method resolution conflict
172mod method_resolution_test;
173
174pub mod adaptive_streaming_engine;
175pub mod neuromorphic_data_processor;
176pub mod quantum_enhanced_generators;
177pub mod quantum_neuromorphic_fusion;
178
179// v0.2.0 modules
180/// Lazy loading and memory-mapped datasets
181///
182/// Provides zero-copy dataset access with adaptive chunking for memory-efficient
183/// processing of datasets larger than available RAM.
184#[cfg(feature = "lazy-loading")]
185pub mod lazy_loading;
186
187/// Data augmentation pipeline with GPU support
188///
189/// Composable augmentation transforms for images, audio, and tabular data
190/// with optional GPU acceleration for improved performance.
191#[cfg(feature = "augmentation")]
192pub mod augmentation;
193
194/// Parallel data preprocessing
195///
196/// Multi-threaded preprocessing pipeline with work-stealing scheduler and
197/// backpressure handling for optimal throughput.
198pub mod parallel_preprocessing;
199
200/// Distributed dataset loading
201///
202/// Shard-aware loading for distributed training with multi-node coordination
203/// and distributed caching.
204#[cfg(feature = "distributed")]
205pub mod distributed_loading;
206
207/// Format support (Parquet, Arrow, HDF5)
208///
209/// Integration with scirs2-io for reading and writing datasets in modern
210/// columnar and scientific formats.
211pub mod formats;
212
213// Re-export commonly used functionality
214pub use adaptive_streaming_engine::{
215    create_adaptive_engine, create_adaptive_engine_with_config, AdaptiveStreamConfig,
216    AdaptiveStreamingEngine, AlertSeverity, AlertType, ChunkMetadata, DataCharacteristics,
217    MemoryStrategy, PatternType, PerformanceMetrics, QualityAlert, QualityMetrics,
218    StatisticalMoments, StreamChunk, TrendDirection, TrendIndicators,
219};
220pub use advanced_generators::{
221    make_adversarial_examples, make_anomaly_dataset, make_continual_learning_dataset,
222    make_domain_adaptation_dataset, make_few_shot_dataset, make_multitask_dataset,
223    AdversarialConfig, AnomalyConfig, AnomalyType, AttackMethod, ContinualLearningDataset,
224    DomainAdaptationConfig, DomainAdaptationDataset, FewShotDataset, MultiTaskConfig,
225    MultiTaskDataset, TaskType,
226};
227pub use benchmarks::{BenchmarkResult, BenchmarkRunner, BenchmarkSuite, PerformanceComparison};
228pub use cloud::{
229    presets::{azure_client, gcs_client, public_s3_client, s3_client, s3_compatible_client},
230    public_datasets::{AWSOpenData, AzureOpenData, GCPPublicData},
231    CloudClient, CloudConfig, CloudCredentials, CloudProvider,
232};
233pub use distributed::{DistributedConfig, DistributedProcessor, ScalingMethod, ScalingParameters};
234pub use domain_specific::{
235    astronomy::StellarDatasets,
236    climate::ClimateDatasets,
237    convenience::{
238        list_domain_datasets, load_atmospheric_chemistry, load_climate_data, load_exoplanets,
239        load_gene_expression, load_stellar_classification,
240    },
241    genomics::GenomicsDatasets,
242    DomainConfig, QualityFilters,
243};
244pub use explore::{
245    convenience::{explore, export_summary, info, quick_summary},
246    DatasetExplorer, DatasetSummary, ExploreConfig, FeatureStatistics, InferredDataType,
247    OutputFormat, QualityAssessment,
248};
249#[cfg(not(feature = "download"))]
250pub use external::convenience::{load_github_dataset_sync, load_uci_dataset_sync};
251pub use external::{
252    convenience::{list_uci_datasets, load_from_url_sync},
253    repositories::{GitHubRepository, KaggleRepository, UCIRepository},
254    ExternalClient, ExternalConfig, ProgressCallback,
255};
256pub use ml_integration::{
257    convenience::{create_experiment, cv_split, prepare_for_ml, train_test_split},
258    CrossValidationResults, DataSplit, MLExperiment, MLPipeline, MLPipelineConfig,
259    ScalingMethod as MLScalingMethod,
260};
261
262pub use cache::{
263    get_cachedir, BatchOperations, BatchResult, CacheFileInfo, CacheManager, CacheStats,
264    DatasetCache, DetailedCacheStats,
265};
266#[cfg(feature = "download")]
267pub use external::convenience::{load_from_url, load_github_dataset, load_uci_dataset};
268pub use generators::{
269    add_time_series_noise, benchmark_gpu_vs_cpu, get_gpu_info, gpu_is_available,
270    inject_missing_data, inject_outliers, make_anisotropic_blobs, make_blobs, make_blobs_gpu,
271    make_circles, make_classification, make_classification_gpu, make_corrupted_dataset, make_helix,
272    make_hierarchical_clusters, make_intersecting_manifolds, make_manifold, make_moons,
273    make_regression, make_regression_gpu, make_s_curve, make_severed_sphere, make_spirals,
274    make_swiss_roll, make_swiss_roll_advanced, make_time_series, make_torus, make_twin_peaks,
275    ManifoldConfig, ManifoldType, MissingPattern, OutlierType,
276};
277// Time series generators
278pub use generators::time_series::{
279    make_ar_process, make_random_walk, make_seasonal, make_sine_wave,
280};
281// Graph generators
282pub use generators::graph::{
283    make_barabasi_albert, make_karate_club, make_random_graph, make_watts_strogatz,
284};
285// Sparse matrix generators
286pub use generators::sparse::{make_sparse_banded, make_sparse_laplacian, make_sparse_spd};
287// Classification generators
288pub use generators::classification::{
289    make_classification_enhanced, make_hastie_10_2, make_multilabel_classification,
290    ClassificationConfig, MultilabelConfig, MultilabelDataset,
291};
292// Regression generators
293pub use generators::regression::{
294    make_friedman1, make_friedman2, make_friedman3, make_low_rank_matrix, make_sparse_uncorrelated,
295};
296// Structured generators
297pub use generators::structured::{
298    make_biclusters, make_checkerboard, make_sparse_coded_signal, make_sparse_spd_matrix,
299    make_spd_matrix,
300};
301// Standard datasets
302pub use gpu::{
303    get_optimal_gpu_config, is_cuda_available, is_opencl_available, list_gpu_devices,
304    make_blobs_auto_gpu, make_classification_auto_gpu, make_regression_auto_gpu, GpuBackend,
305    GpuBenchmark, GpuBenchmarkResults, GpuConfig, GpuContext, GpuDeviceInfo, GpuMemoryConfig,
306};
307pub use gpu_optimization::{
308    benchmark_advanced_performance, generate_advanced_matrix, AdvancedGpuOptimizer,
309    AdvancedKernelConfig, BenchmarkResult as AdvancedBenchmarkResult, DataLayout,
310    LoadBalancingMethod, MemoryAccessPattern, PerformanceBenchmarkResults, SpecializationLevel,
311    VectorizationStrategy,
312};
313pub use loaders::{
314    load_csv, load_csv_legacy, load_csv_parallel, load_csv_streaming, load_json, load_raw,
315    save_json, CsvConfig, DatasetChunkIterator, StreamingConfig,
316};
317pub use neuromorphic_data_processor::{
318    create_neuromorphic_processor, create_neuromorphic_processor_with_topology, NetworkTopology,
319    NeuromorphicProcessor, NeuromorphicTransform, SynapticPlasticity,
320};
321pub use quantum_enhanced_generators::{
322    make_quantum_blobs, make_quantum_classification, make_quantum_regression,
323    QuantumDatasetGenerator,
324};
325pub use quantum_neuromorphic_fusion::{
326    create_fusion_with_params, create_quantum_neuromorphic_fusion, QuantumBioFusionResult,
327    QuantumInterference, QuantumNeuromorphicFusion,
328};
329pub use real_world::{
330    list_real_world_datasets, load_adult, load_california_housing, load_heart_disease,
331    load_red_wine_quality, load_titanic, RealWorldConfig, RealWorldDatasets,
332};
333pub use registry::{get_registry, load_dataset_byname, DatasetMetadata, DatasetRegistry};
334pub use sample::*;
335pub use standard::{
336    load_boston as load_boston_full, load_breast_cancer as load_breast_cancer_full,
337    load_digits as load_digits_full, load_iris as load_iris_full, load_wine, DatasetResult,
338};
339pub use streaming::{
340    stream_classification, stream_csv, stream_regression, DataChunk, StreamConfig, StreamProcessor,
341    StreamStats, StreamTransformer, StreamingIterator,
342};
343pub use toy::*;
344pub use utils::{
345    analyze_dataset_advanced, create_balanced_dataset, create_binned_features,
346    generate_synthetic_samples, importance_sample, k_fold_split, min_max_scale,
347    polynomial_features, quick_quality_assessment, random_oversample, random_sample,
348    random_undersample, robust_scale, statistical_features, stratified_k_fold_split,
349    stratified_sample, time_series_split, AdvancedDatasetAnalyzer, AdvancedQualityMetrics,
350    BalancingStrategy, BinningStrategy, CorrelationInsights, CrossValidationFolds, Dataset,
351    NormalityAssessment,
352};
353
354// v0.2.0 re-exports
355#[cfg(feature = "lazy-loading")]
356pub use lazy_loading::{
357    from_binary as lazy_from_binary, from_binary_with_config as lazy_from_binary_with_config,
358    LazyChunkIterator, LazyDataset, LazyLoadConfig, MmapDataset,
359};
360
361#[cfg(feature = "augmentation")]
362pub use augmentation::{
363    standard_image_augmentation, standard_tabular_augmentation, AugmentationPipeline, Brightness,
364    Contrast, GaussianNoise, HorizontalFlip, Mixup, RandomFeatureScale, RandomRotation90,
365    Transform, VerticalFlip,
366};
367
368pub use parallel_preprocessing::{
369    create_pipeline, create_pipeline_with_config, ParallelConfig, ParallelPipeline, PreprocessFn,
370};
371
372#[cfg(feature = "distributed")]
373pub use distributed_loading::{
374    create_loader, create_loader_with_config, DistributedCache,
375    DistributedConfig as DistributedLoadingConfig, DistributedLoader, Shard,
376};
377
378pub use formats::{CompressionCodec, FormatConfig, FormatType};
379
380#[cfg(feature = "formats")]
381pub use formats::{
382    read_auto, read_hdf5, read_parquet, write_hdf5, write_parquet, FormatConverter, Hdf5Reader,
383    Hdf5Writer, ParquetReader, ParquetWriter,
384};