1use crate::{
7 backend_detection::BackendFeatureDetector,
8 device::CpuDevice,
9 memory_monitor::{MemoryPressure, SystemMemoryMonitor},
10 ConversionUtils, DType, Device, InteropDocs, NumpyArrayInfo, Result, Shape,
11};
12
13pub struct DeviceExamples;
15
16impl DeviceExamples {
17 pub fn basic_device_usage() -> Result<()> {
19 let cpu_device = CpuDevice::new();
21 println!("Created CPU device: {:?}", cpu_device.device_type());
22
23 if cpu_device.is_available().unwrap_or(false) {
25 println!("Device is available for computation");
26 }
27
28 cpu_device.synchronize()?;
30 println!("Device synchronized successfully");
31
32 Ok(())
33 }
34
35 pub fn device_capabilities() -> Result<()> {
37 let detector = BackendFeatureDetector::new()?;
38
39 let runtime_features = &detector.runtime_features;
41 println!("Runtime features: {runtime_features:#?}");
42
43 let available_devices = &detector.available_devices;
45 println!("Available devices: {available_devices:#?}");
46
47 if runtime_features.cpu_features.simd.avx2 {
49 println!("AVX2 is available for accelerated operations");
50 }
51
52 if runtime_features.cpu_features.simd.neon {
53 println!("ARM NEON is available for vectorized operations");
54 }
55
56 Ok(())
57 }
58
59 pub fn synchronization_patterns() -> Result<()> {
61 let device = CpuDevice::new();
62
63 device.synchronize()?;
65
66 device.synchronize()?; device.synchronize()?; println!("All synchronization patterns completed successfully");
73 Ok(())
74 }
75}
76
77pub struct ShapeExamples;
79
80impl ShapeExamples {
81 pub fn basic_shape_operations() -> Result<()> {
83 let shape1 = Shape::new(vec![2, 3, 4]);
85 let shape2 = Shape::new(vec![24]);
86
87 println!("Shape 1: {:?}, elements: {}", shape1.dims(), shape1.numel());
88 println!("Shape 2: {:?}, elements: {}", shape2.dims(), shape2.numel());
89
90 if shape1.is_contiguous() {
92 println!("Shape 1 is contiguous");
93 }
94
95 if shape2.is_scalar() {
96 println!("Shape 2 is scalar");
97 } else {
98 println!("Shape 2 is not scalar");
99 }
100
101 Ok(())
102 }
103
104 pub fn broadcasting_examples() -> Result<()> {
106 let shape1 = Shape::new(vec![3, 1, 4]);
107 let shape2 = Shape::new(vec![1, 2, 1]);
108
109 match shape1.broadcast_with(&shape2) {
111 Ok(result_shape) => {
112 println!(
113 "Broadcasting {:?} with {:?} gives {:?}",
114 shape1.dims(),
115 shape2.dims(),
116 result_shape.dims()
117 );
118 }
119 Err(e) => {
120 println!("Broadcasting failed: {e}");
121 }
122 }
123
124 Ok(())
125 }
126
127 pub fn advanced_shape_operations() -> Result<()> {
129 let shape = Shape::new(vec![2, 3, 4, 5]);
130
131 println!("Shape: {:?}", shape.dims());
133 println!("Number of dimensions: {}", shape.ndim());
134 println!("Total elements: {}", shape.numel());
135 println!("Is contiguous: {}", shape.is_contiguous());
136
137 let strides = shape.strides();
139 println!("Strides: {strides:?}");
140
141 let reshaped = Shape::new(vec![6, 20]);
143 if shape.numel() == reshaped.numel() {
144 println!("Reshaped to: {:?}", reshaped.dims());
145 } else {
146 println!(
147 "Cannot reshape {} to {} - incompatible element count",
148 shape.numel(),
149 reshaped.numel()
150 );
151 }
152
153 Ok(())
154 }
155}
156
157pub struct DTypeExamples;
159
160impl DTypeExamples {
161 pub fn basic_dtype_operations() {
163 let dtypes = vec![
165 DType::F32,
166 DType::F64,
167 DType::I32,
168 DType::I64,
169 DType::U8,
170 DType::Bool,
171 DType::C64,
172 DType::C128,
173 ];
174
175 for dtype in dtypes {
176 let name = dtype.name();
177 let size = dtype.size_bytes();
178 let is_float = dtype.is_float();
179 let is_int = dtype.is_int();
180 let is_complex = dtype.is_complex();
181 println!("Type: {name}, Size: {size} bytes, Float: {is_float}, Int: {is_int}, Complex: {is_complex}");
182 }
183 }
184
185 pub fn type_promotion_examples() {
187 use crate::dtype::TypePromotion;
188
189 let common_type = <DType as TypePromotion>::common_type(&[DType::I32, DType::F32]);
191 println!("Common type of I32 and F32: {common_type:?}");
192
193 let common_type = <DType as TypePromotion>::common_type(&[DType::F32, DType::F64]);
194 println!("Common type of F32 and F64: {common_type:?}");
195
196 let common_type = <DType as TypePromotion>::common_type(&[DType::F32, DType::C64]);
198 println!("Common type of F32 and C64: {common_type:?}");
199 }
200
201 pub fn quantized_types() {
203 use crate::dtype::{QInt8, QUInt8};
204
205 let qint8 = QInt8 {
207 value: -100,
208 scale: 0.5,
209 zero_point: 0,
210 };
211 let quint8 = QUInt8 {
212 value: 50,
213 scale: 0.25,
214 zero_point: 128,
215 };
216
217 println!("QInt8: value={}, scale={}", qint8.value, qint8.scale);
218 println!("QUInt8: value={}, scale={}", quint8.value, quint8.scale);
219
220 let float_val = qint8.value as f32 * qint8.scale;
222 let back_to_qint8 = QInt8 {
223 value: (float_val / 0.5) as i8,
224 scale: 0.5,
225 zero_point: 0,
226 };
227
228 println!("QInt8 -> f32: {float_val}");
229 println!("f32 -> QInt8: value={}", back_to_qint8.value);
230 }
231}
232
233pub struct MemoryExamples;
235
236impl MemoryExamples {
237 pub fn memory_pool_usage() -> Result<()> {
239 use crate::storage::{allocate_pooled, deallocate_pooled};
240
241 let size = 1024;
243 let _alignment = 64;
244 let ptr: Vec<f32> = allocate_pooled(size);
245
246 println!("Allocated {size} bytes from memory pool");
247
248 println!("Pool allocation successful");
250
251 deallocate_pooled(ptr);
253 println!("Memory deallocated");
254
255 Ok(())
256 }
257
258 pub fn memory_monitoring() -> Result<()> {
260 let monitor = SystemMemoryMonitor::new()?;
261
262 let stats = monitor.current_stats();
264 println!(
265 "System memory: {} MB total, {} MB available",
266 stats.total_physical / 1024 / 1024,
267 stats.available_physical / 1024 / 1024
268 );
269
270 let pressure = stats.pressure;
272 match pressure {
273 crate::memory_monitor::MemoryPressure::Normal => println!("Memory pressure: Normal"),
274 crate::memory_monitor::MemoryPressure::Moderate => {
275 println!("Memory pressure: Moderate")
276 }
277 crate::memory_monitor::MemoryPressure::High => println!("Memory pressure: High"),
278 crate::memory_monitor::MemoryPressure::Critical => {
279 println!("Memory pressure: Critical")
280 }
281 }
282
283 Ok(())
284 }
285}
286
287pub struct InteropExamples;
289
290impl InteropExamples {
291 pub fn numpy_compatibility() {
293 let numpy_info = NumpyArrayInfo::new(vec![10, 20, 30], DType::F32);
295
296 println!("NumPy array info:");
297 println!(" Shape: {:?}", numpy_info.shape);
298 println!(" Strides: {:?}", numpy_info.strides);
299 println!(" C-contiguous: {}", numpy_info.c_contiguous);
300 println!(" F-contiguous: {}", numpy_info.f_contiguous);
301 println!(" Size in bytes: {}", numpy_info.nbytes);
302
303 let efficiency = ConversionUtils::layout_efficiency_score(
305 &numpy_info.shape,
306 &numpy_info.strides,
307 numpy_info.dtype.size(),
308 );
309 println!(" Layout efficiency: {efficiency:.2}");
310 }
311
312 pub fn onnx_conversion() {
314 use crate::interop::{OnnxDataType, OnnxTensorInfo};
315
316 let torsh_types = vec![DType::F32, DType::I64, DType::Bool, DType::C64];
318
319 for dtype in torsh_types {
320 let onnx_type = OnnxDataType::from(dtype);
321 println!("ToRSh {dtype:?} -> ONNX {onnx_type:?}");
322
323 let back_to_torsh = DType::try_from(onnx_type).unwrap();
325 assert_eq!(dtype, back_to_torsh);
326 }
327
328 let tensor_info = OnnxTensorInfo {
330 elem_type: OnnxDataType::Float,
331 shape: vec![Some(10), None, Some(20)], name: Some("example_tensor".to_string()),
333 };
334
335 println!("ONNX tensor info: {tensor_info:#?}");
336 }
337
338 pub fn arrow_integration() {
340 use crate::interop::{ArrowDataType, ArrowTypeInfo};
341 use std::collections::HashMap;
342
343 let dtype = DType::F64;
345 let arrow_type = ArrowDataType::from(dtype);
346 println!("ToRSh {dtype:?} -> Arrow {arrow_type:?}");
347
348 let complex_dtype = DType::C128;
350 let arrow_complex = ArrowDataType::from(complex_dtype);
351 println!("ToRSh {complex_dtype:?} -> Arrow {arrow_complex:?}");
352
353 let mut metadata = HashMap::new();
355 metadata.insert("origin".to_string(), "torsh".to_string());
356 metadata.insert("version".to_string(), "0.1.0".to_string());
357
358 let arrow_info = ArrowTypeInfo {
359 data_type: arrow_type,
360 metadata,
361 };
362
363 println!("Arrow type info: {arrow_info:#?}");
364 }
365}
366
367pub struct WorkflowExamples;
369
370impl WorkflowExamples {
371 pub fn basic_tensor_workflow() -> Result<()> {
373 println!("=== Basic Tensor Workflow ===");
374
375 let device = CpuDevice::new();
377 println!("1. Created device: {:?}", device.device_type());
378
379 let shape = Shape::new(vec![3, 4, 5]);
381 let dtype = DType::F32;
382 println!("2. Defined shape: {:?}, dtype: {:?}", shape.dims(), dtype);
383
384 let bytes_needed = shape.numel() * dtype.size_bytes();
386 println!("3. Memory needed: {bytes_needed} bytes");
387
388 let numpy_info = NumpyArrayInfo::new(shape.dims().to_vec(), dtype);
390 println!(
391 "4. NumPy compatible: C-order={}, size={} bytes",
392 numpy_info.c_contiguous, numpy_info.nbytes
393 );
394
395 device.synchronize()?;
397 println!("5. Device synchronized");
398
399 Ok(())
400 }
401
402 pub fn memory_aware_processing() -> Result<()> {
404 println!("=== Memory-Aware Processing ===");
405
406 let monitor = SystemMemoryMonitor::new()?;
408 let stats = monitor.current_stats();
409
410 println!(
411 "1. System memory: {:.1} GB available",
412 stats.available_physical as f64 / 1024.0 / 1024.0 / 1024.0
413 );
414 println!(" Memory pressure: {:?}", stats.pressure);
415
416 let max_elements = match stats.pressure {
418 MemoryPressure::Normal => 1_000_000,
419 MemoryPressure::Moderate => 500_000,
420 MemoryPressure::High => 100_000,
421 MemoryPressure::Critical => 10_000,
422 };
423
424 let shape = if max_elements >= 1_000_000 {
426 Shape::new(vec![100, 100, 100])
427 } else if max_elements >= 100_000 {
428 Shape::new(vec![50, 50, 40])
429 } else {
430 Shape::new(vec![20, 20, 25])
431 };
432
433 println!(
434 "2. Selected shape: {:?} ({} elements)",
435 shape.dims(),
436 shape.numel()
437 );
438
439 let size = shape.numel();
441 let data: Vec<f32> = vec![0.0; size];
442 println!("3. Allocated {size} elements");
443
444 println!("4. Processing tensor...");
446
447 drop(data);
449 println!("5. Memory deallocated");
450
451 Ok(())
452 }
453
454 pub fn cross_platform_workflow() -> Result<()> {
456 println!("=== Cross-Platform Compatibility ===");
457
458 let detector = BackendFeatureDetector::new()?;
460 let features = &detector.runtime_features;
461
462 println!("1. Platform detection:");
463 println!(" Architecture: {:?}", features.cpu_features.architecture);
464 println!(
465 " SIMD: AVX2={}, NEON={}",
466 features.cpu_features.simd.avx2, features.cpu_features.simd.neon
467 );
468
469 let dtype = if features.cpu_features.simd.avx512f {
471 println!("2. Using F32 (AVX-512 available)");
472 DType::F32
473 } else if features.cpu_features.simd.avx2 {
474 println!("2. Using F32 (AVX2 available)");
475 DType::F32
476 } else {
477 println!("2. Using F64 (fallback for precision)");
478 DType::F64
479 };
480
481 let shape = Shape::new(vec![32, 32, 32]); let numpy_info = NumpyArrayInfo::new(shape.dims().to_vec(), dtype);
484
485 println!("3. Created tensor:");
486 println!(" Shape: {:?}", numpy_info.shape);
487 println!(
488 " Layout efficiency: {:.2}",
489 ConversionUtils::layout_efficiency_score(
490 &numpy_info.shape,
491 &numpy_info.strides,
492 dtype.size()
493 )
494 );
495
496 println!("4. Interoperability:");
498 let onnx_type = crate::interop::OnnxDataType::from(dtype);
499 let arrow_type = crate::interop::ArrowDataType::from(dtype);
500 println!(" ONNX type: {onnx_type:?}");
501 println!(" Arrow type: {arrow_type:?}");
502
503 Ok(())
504 }
505}
506
507pub struct PerformanceExamples;
509
510impl PerformanceExamples {
511 pub fn memory_layout_optimization() {
513 println!("=== Memory Layout Optimization ===");
514
515 let shapes_and_layouts = vec![
516 ("C-contiguous", vec![1000, 1000], vec![4000, 4]),
517 ("F-contiguous", vec![1000, 1000], vec![4, 4000]),
518 ("Strided", vec![1000, 1000], vec![8000, 8]),
519 ];
520
521 for (name, shape, strides) in shapes_and_layouts {
522 let efficiency = ConversionUtils::layout_efficiency_score(&shape, &strides, 4);
523 println!("{name}: efficiency = {efficiency:.3}");
524
525 if efficiency > 0.9 {
526 println!(" ✓ Excellent layout for performance");
527 } else if efficiency > 0.7 {
528 println!(" ⚠ Good layout, some optimization possible");
529 } else {
530 println!(" ⚡ Consider layout optimization");
531 }
532 }
533 }
534
535 pub fn simd_optimization_guidance() -> Result<()> {
537 println!("=== SIMD Optimization Guidance ===");
538
539 let detector = BackendFeatureDetector::new()?;
540 let features = &detector.runtime_features;
541
542 let vector_widths = if features.cpu_features.simd.avx512f {
544 println!("AVX-512 detected: 16 floats per vector");
545 16
546 } else if features.cpu_features.simd.avx2 {
547 println!("AVX2 detected: 8 floats per vector");
548 8
549 } else if features.cpu_features.simd.neon {
550 println!("NEON detected: 4 floats per vector");
551 4
552 } else {
553 println!("No SIMD detected: scalar operations");
554 1
555 };
556
557 let recommended_sizes = vec![vector_widths * 32, vector_widths * 64, vector_widths * 128];
559
560 println!("Recommended tensor sizes for optimal SIMD usage:");
561 for size in recommended_sizes {
562 println!(
563 " {} elements ({}x vector width)",
564 size,
565 size / vector_widths
566 );
567 }
568
569 Ok(())
570 }
571}
572
573pub struct DocumentationExamples;
575
576impl DocumentationExamples {
577 pub fn print_help() {
579 println!("{}", InteropDocs::supported_conversions());
580 println!("{}", InteropDocs::conversion_examples());
581 }
582
583 pub fn api_overview() {
585 println!(
586 r#"
587ToRSh Core API Overview
588======================
589
590Core Modules:
591• device - Device abstraction and management
592• dtype - Data type definitions and operations
593• shape - Shape and stride utilities
594• storage - Memory management and allocation
595• interop - Interoperability with other libraries
596• error - Error handling and reporting
597
598Key Types:
599• Device - Hardware device abstraction
600• DType - Tensor data types (F32, I64, C64, etc.)
601• Shape - Tensor dimensions and layout
602• TorshError - Comprehensive error handling
603
604Getting Started:
6051. Create a device: let device = CpuDevice::new();
6062. Define shape: let shape = Shape::new(vec![2, 3, 4]);
6073. Choose dtype: let dtype = DType::F32;
6084. Check interop: let numpy_info = NumpyArrayInfo::new(...);
609
610For detailed examples, see the examples module.
611"#
612 );
613 }
614}
615
616#[cfg(test)]
617mod tests {
618 use super::*;
619
620 #[test]
621 fn test_device_examples() {
622 assert!(DeviceExamples::basic_device_usage().is_ok());
623 assert!(DeviceExamples::device_capabilities().is_ok());
624 assert!(DeviceExamples::synchronization_patterns().is_ok());
625 }
626
627 #[test]
628 fn test_shape_examples() {
629 assert!(ShapeExamples::basic_shape_operations().is_ok());
630 assert!(ShapeExamples::broadcasting_examples().is_ok());
631 assert!(ShapeExamples::advanced_shape_operations().is_ok());
632 }
633
634 #[test]
635 fn test_dtype_examples() {
636 DTypeExamples::basic_dtype_operations();
637 DTypeExamples::type_promotion_examples();
638 DTypeExamples::quantized_types();
639 }
640
641 #[test]
642 fn test_memory_examples() {
643 assert!(MemoryExamples::memory_pool_usage().is_ok());
644 assert!(MemoryExamples::memory_monitoring().is_ok());
645 }
646
647 #[test]
648 fn test_interop_examples() {
649 InteropExamples::numpy_compatibility();
650 InteropExamples::onnx_conversion();
651 InteropExamples::arrow_integration();
652 }
653
654 #[test]
655 fn test_workflow_examples() {
656 assert!(WorkflowExamples::basic_tensor_workflow().is_ok());
657 assert!(WorkflowExamples::memory_aware_processing().is_ok());
658 assert!(WorkflowExamples::cross_platform_workflow().is_ok());
659 }
660
661 #[test]
662 fn test_performance_examples() {
663 PerformanceExamples::memory_layout_optimization();
664 assert!(PerformanceExamples::simd_optimization_guidance().is_ok());
665 }
666
667 #[test]
668 fn test_documentation_examples() {
669 DocumentationExamples::print_help();
670 DocumentationExamples::api_overview();
671 }
672}