1use scirs2_datasets::{
7 create_adaptive_engine_with_config,
9 make_classification,
11 quick_quality_assessment,
12 AdaptiveStreamConfig,
13 AdvancedDatasetAnalyzer,
14 AdvancedGpuOptimizer,
15 ChunkMetadata,
16 DataCharacteristics,
17 Dataset,
18 GpuBackend,
19 GpuConfig,
20 GpuContext,
21 StatisticalMoments,
22 StreamChunk,
23 TrendDirection,
24 TrendIndicators,
25};
26use statrs::statistics::Statistics;
27use std::time::Instant;
28
29#[allow(dead_code)]
30fn main() -> Result<(), Box<dyn std::error::Error>> {
31 println!("š SciRS2-Datasets Advanced Mode Showcase");
32 println!("===========================================\n");
33
34 let dataset = create_sampledataset()?;
36 println!(
37 "š Created sample dataset: {} samples, {} features",
38 dataset.n_samples(),
39 dataset.n_features()
40 );
41
42 demonstrate_advanced_analytics(&dataset)?;
44
45 demonstrate_advanced_gpu_optimization()?;
47
48 demonstrate_adaptive_streaming(&dataset)?;
50
51 println!("\nā
Advanced mode demonstration completed successfully!");
52 Ok(())
53}
54
55#[allow(dead_code)]
57fn create_sampledataset() -> Result<Dataset, Box<dyn std::error::Error>> {
58 println!("š§ Generating sample classification dataset...");
59
60 let dataset = make_classification(
61 1000, 10, 3, 2, 5, Some(42), )?;
68
69 Ok(dataset)
70}
71
72#[allow(dead_code)]
74fn demonstrate_advanced_analytics(dataset: &Dataset) -> Result<(), Box<dyn std::error::Error>> {
75 println!("\nš§ Advanced Analytics Demonstration");
76 println!("==========================================");
77
78 println!("š Running quick quality assessment...");
80 let quick_quality = quick_quality_assessment(dataset)?;
81 println!(" Quality Score: {quick_quality:.3}");
82
83 println!("š¬ Running comprehensive advanced-analysis...");
85 let start_time = Instant::now();
86
87 let analyzer = AdvancedDatasetAnalyzer::new()
88 .with_gpu(true)
89 .with_advanced_precision(true)
90 .with_significance_threshold(0.01);
91
92 let metrics = analyzer.analyze_dataset_quality(dataset)?;
93 let analysis_time = start_time.elapsed();
94
95 println!(" Analysis completed in: {analysis_time:?}");
96 println!(" Complexity Score: {:.3}", metrics.complexity_score);
97 println!(" Entropy: {:.3}", metrics.entropy);
98 println!(" Outlier Score: {:.3}", metrics.outlier_score);
99 println!(" ML Quality Score: {:.3}", metrics.ml_quality_score);
100
101 println!(" Normality Assessment:");
103 println!(
104 " Overall Normality: {:.3}",
105 metrics.normality_assessment.overall_normality
106 );
107 println!(
108 " Shapiro-Wilk (avg): {:.3}",
109 metrics.normality_assessment.shapiro_wilk_scores.mean()
110 );
111
112 println!(" Correlation Insights:");
114 println!(
115 " Feature Importance (top 3): {:?}",
116 metrics
117 .correlation_insights
118 .feature_importance
119 .iter()
120 .take(3)
121 .map(|&x| format!("{x:.3}"))
122 .collect::<Vec<_>>()
123 );
124
125 Ok(())
126}
127
128#[allow(dead_code)]
130fn demonstrate_advanced_gpu_optimization() -> Result<(), Box<dyn std::error::Error>> {
131 println!("\nā” Advanced-GPU Optimization Demonstration");
132 println!("=====================================");
133
134 println!("š§ Initializing GPU context...");
136 let gpu_config = GpuConfig {
137 backend: GpuBackend::Cpu,
138 threads_per_block: 1, ..Default::default()
140 };
141 let gpu_context = GpuContext::new(gpu_config)?; println!(" Backend: {:?}", gpu_context.backend());
143
144 let optimizer = AdvancedGpuOptimizer::new()
146 .with_adaptive_kernels(true)
147 .with_memory_prefetch(true)
148 .with_multi_gpu(false) .with_auto_tuning(true);
150
151 println!("š„ Generating advanced-optimized matrix...");
153 let start_time = Instant::now();
154 let matrix = optimizer.generate_advanced_optimized_matrix(
155 &gpu_context,
156 500, 200, "normal", )?;
160 let generation_time = start_time.elapsed();
161
162 println!(
163 " Generated {}x{} matrix in: {:?}",
164 matrix.nrows(),
165 matrix.ncols(),
166 generation_time
167 );
168 let matrix_mean = matrix.clone().mean();
169 let matrix_std = matrix.var(1.0).sqrt();
170 println!(
171 " Matrix stats: mean={:.3}, std={:.3}",
172 matrix_mean, matrix_std
173 );
174
175 println!("š Running performance benchmarks...");
177 let datashapes = vec![(100, 50), (500, 200), (1000, 500)];
178 let benchmark_results =
179 optimizer.benchmark_performance(&gpu_context, "matrix_generation", &datashapes)?;
180
181 println!(" Benchmark Results:");
182 println!(
183 " Best Speedup: {:.2}x",
184 benchmark_results.best_speedup()
185 );
186 println!(
187 " Average Speedup: {:.2}x",
188 benchmark_results.average_speedup()
189 );
190 println!(
191 " Total Memory Usage: {:.1} MB",
192 benchmark_results.total_memory_usage()
193 );
194
195 Ok(())
196}
197
198#[allow(dead_code)]
200fn demonstrate_adaptive_streaming(dataset: &Dataset) -> Result<(), Box<dyn std::error::Error>> {
201 println!("\nš Adaptive Streaming Demonstration");
202 println!("===================================");
203
204 let config = AdaptiveStreamConfig::default();
206
207 println!("š§ Initializing adaptive streaming engine...");
208 let mut engine = create_adaptive_engine_with_config(config);
209
210 println!("š” Simulating data stream...");
212 let data = &dataset.data;
213 let chunksize = 20;
214 let num_chunks = (data.nrows() / chunksize).min(10); let mut total_processed = 0;
217 let start_time = Instant::now();
218
219 for i in 0..num_chunks {
220 let start_row = i * chunksize;
221 let end_row = (start_row + chunksize).min(data.nrows());
222
223 let chunkdata = data
225 .slice(scirs2_core::ndarray::s![start_row..end_row, ..])
226 .to_owned();
227
228 let chunk = StreamChunk {
229 data: chunkdata,
230 timestamp: Instant::now(),
231 metadata: ChunkMetadata {
232 source_id: format!("demo_source_{i}"),
233 sequence_number: i as u64,
234 characteristics: DataCharacteristics {
235 moments: StatisticalMoments {
236 mean: 0.0,
237 variance: 1.0,
238 skewness: 0.0,
239 kurtosis: 0.0,
240 },
241 entropy: 1.0,
242 trend: TrendIndicators {
243 linear_slope: 0.1,
244 trend_strength: 0.5,
245 direction: TrendDirection::Increasing,
246 seasonality: 0.2,
247 },
248 anomaly_score: 0.1,
249 },
250 },
251 quality_score: 0.9,
252 };
253
254 let results = engine.process_stream(chunk)?;
256 total_processed += results.len();
257
258 if !results.is_empty() {
259 println!(
260 " Processed batch {}: {} datasets generated",
261 i + 1,
262 results.len()
263 );
264 }
265 }
266
267 let streaming_time = start_time.elapsed();
268
269 println!(" Streaming completed in: {streaming_time:?}");
270 println!(" Total datasets processed: {total_processed}");
271
272 println!("š Getting performance metrics...");
274 let perf_metrics = engine.get_performance_metrics()?;
275 println!(" Processing Latency: {:?}", perf_metrics.latency);
276 println!(" Throughput: {:.1} chunks/sec", perf_metrics.throughput);
277 println!(
278 " Memory Efficiency: {:.1}%",
279 perf_metrics.memory_efficiency * 100.0
280 );
281
282 let quality_metrics = engine.get_quality_metrics()?;
284 println!(" Quality Metrics:");
285 println!(
286 " Integrity: {:.1}%",
287 quality_metrics.integrity_score * 100.0
288 );
289 println!(
290 " Completeness: {:.1}%",
291 quality_metrics.completeness_score * 100.0
292 );
293 println!(
294 " Overall Quality: {:.1}%",
295 quality_metrics.overall_score * 100.0
296 );
297
298 let buffer_stats = engine.get_buffer_statistics()?;
300 println!(" Buffer Statistics:");
301 println!(" Utilization: {:.1}%", buffer_stats.utilization * 100.0);
302 println!(" Memory Usage: {} bytes", buffer_stats.memory_usage);
303
304 Ok(())
305}
306
307#[cfg(test)]
308mod tests {
309 use super::*;
310
311 #[test]
312 fn test_sample_dataset_creation() {
313 let result = create_sampledataset();
314 assert!(result.is_ok());
315 let dataset = result.unwrap();
316 assert_eq!(dataset.n_samples(), 1000);
317 assert_eq!(dataset.n_features(), 10);
318 }
319
320 #[test]
321 fn test_advanced_analytics_integration() {
322 let dataset = create_sampledataset().unwrap();
323 let result = demonstrate_advanced_analytics(&dataset);
324 assert!(result.is_ok());
325 }
326
327 #[test]
328 fn test_gpu_optimization_integration() {
329 let result = demonstrate_advanced_gpu_optimization();
330 match &result {
331 Ok(_) => println!("GPU optimization test passed"),
332 Err(e) => println!("GPU optimization test failed with error: {}", e),
333 }
334 assert!(result.is_ok());
335 }
336
337 #[test]
338 fn test_adaptive_streaming_integration() {
339 let dataset = create_sampledataset().unwrap();
340 let result = demonstrate_adaptive_streaming(&dataset);
341 assert!(result.is_ok());
342 }
343}