accelerated_neural_ops_example/
accelerated_neural_ops_example.rs1use ndarray::{array, Array2};
8use scirs2_neural::error::Result;
9use scirs2_neural::gpu::{create_neural_ops, create_neural_ops_with_backend};
10
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; let variance = array![2.0, 2.0]; let gamma = array![1.0, 1.0]; let beta = array![0.0, 0.0]; println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
118
119fn demonstrate_performance_scaling() -> Result<()> {
120 let ops = create_neural_ops()?;
121
122 let sizes = vec![10, 50, 100];
124
125 for size in sizes {
126 let a = Array2::ones((size, size));
127 let b = Array2::ones((size, size));
128
129 let start = std::time::Instant::now();
130 let _result = ops.matrix_multiply(&a, &b)?;
131 let duration = start.elapsed();
132
133 println!(
134 "Matrix {}x{} multiplication took: {:?}",
135 size, size, duration
136 );
137 }
138
139 println!();
140 Ok(())
141}
142
143#[cfg(test)]
144mod tests {
145 use super::*;
146
147 #[test]
148 fn test_example_runs() {
149 assert!(main().is_ok());
151 }
152
153 #[test]
154 fn test_performance_demo() {
155 assert!(demonstrate_performance_scaling().is_ok());
156 }
157}