pub struct NeuralOps { /* private fields */ }
Expand description
Enhanced neural operations accelerator with full GPU support
Implementations§
Source§impl NeuralOps
impl NeuralOps
Sourcepub fn with_backend(backend: &str) -> Result<Self>
pub fn with_backend(backend: &str) -> Result<Self>
Create with specified backend preference
Sourcepub fn enable_mixed_precision(
&mut self,
config: MixedPrecisionConfig,
) -> Result<()>
pub fn enable_mixed_precision( &mut self, config: MixedPrecisionConfig, ) -> Result<()>
Enable mixed precision training
Sourcepub fn is_gpu_available(&self) -> bool
pub fn is_gpu_available(&self) -> bool
Check if GPU is available
Sourcepub fn gpu_context(&self) -> Option<&Arc<GpuContext>>
pub fn gpu_context(&self) -> Option<&Arc<GpuContext>>
Get GPU context (if available)
Sourcepub fn matrix_multiply(
&self,
a: &Array2<f32>,
b: &Array2<f32>,
) -> Result<Array2<f32>>
pub fn matrix_multiply( &self, a: &Array2<f32>, b: &Array2<f32>, ) -> Result<Array2<f32>>
Optimized matrix multiplication
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 26)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
118
119fn demonstrate_performance_scaling() -> Result<()> {
120 let ops = create_neural_ops()?;
121
122 // Create progressively larger matrices to show scaling
123 let sizes = vec![10, 50, 100];
124
125 for size in sizes {
126 let a = Array2::ones((size, size));
127 let b = Array2::ones((size, size));
128
129 let start = std::time::Instant::now();
130 let _result = ops.matrix_multiply(&a, &b)?;
131 let duration = start.elapsed();
132
133 println!(
134 "Matrix {}x{} multiplication took: {:?}",
135 size, size, duration
136 );
137 }
138
139 println!();
140 Ok(())
141}
Sourcepub fn batch_matrix_multiply(
&self,
a: &ArrayD<f32>,
b: &ArrayD<f32>,
) -> Result<ArrayD<f32>>
pub fn batch_matrix_multiply( &self, a: &ArrayD<f32>, b: &ArrayD<f32>, ) -> Result<ArrayD<f32>>
Batch matrix multiplication for neural network layers
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 37)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn relu_forward(&self, input: &ArrayD<f32>) -> Result<ArrayD<f32>>
pub fn relu_forward(&self, input: &ArrayD<f32>) -> Result<ArrayD<f32>>
ReLU activation function
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 46)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn relu_backward(
&self,
input: &ArrayD<f32>,
grad_output: &ArrayD<f32>,
) -> Result<ArrayD<f32>>
pub fn relu_backward( &self, input: &ArrayD<f32>, grad_output: &ArrayD<f32>, ) -> Result<ArrayD<f32>>
ReLU derivative for backpropagation
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 54)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn sigmoid_forward(&self, input: &ArrayD<f32>) -> Result<ArrayD<f32>>
pub fn sigmoid_forward(&self, input: &ArrayD<f32>) -> Result<ArrayD<f32>>
Sigmoid activation function
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 49)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn sigmoid_backward(
&self,
output: &ArrayD<f32>,
grad_output: &ArrayD<f32>,
) -> Result<ArrayD<f32>>
pub fn sigmoid_backward( &self, output: &ArrayD<f32>, grad_output: &ArrayD<f32>, ) -> Result<ArrayD<f32>>
Sigmoid derivative
Sourcepub fn batch_normalize(
&self,
input: &ArrayD<f32>,
mean: &Array1<f32>,
var: &Array1<f32>,
gamma: &Array1<f32>,
beta: &Array1<f32>,
epsilon: f32,
) -> Result<ArrayD<f32>>
pub fn batch_normalize( &self, input: &ArrayD<f32>, mean: &Array1<f32>, var: &Array1<f32>, gamma: &Array1<f32>, beta: &Array1<f32>, epsilon: f32, ) -> Result<ArrayD<f32>>
Batch normalization forward pass
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 84)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn softmax_forward(&self, input: &ArrayD<f32>) -> Result<ArrayD<f32>>
pub fn softmax_forward(&self, input: &ArrayD<f32>) -> Result<ArrayD<f32>>
Softmax activation function
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 62)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn conv2d_forward(
&self,
input: &ArrayD<f32>,
kernel: &ArrayD<f32>,
stride: (usize, usize),
padding: (usize, usize),
) -> Result<ArrayD<f32>>
pub fn conv2d_forward( &self, input: &ArrayD<f32>, kernel: &ArrayD<f32>, stride: (usize, usize), padding: (usize, usize), ) -> Result<ArrayD<f32>>
Convolution forward pass (simplified 2D implementation)
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 96)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Sourcepub fn gpu_matrix_multiply<T>(
&self,
a: &CudaTensor<T>,
b: &CudaTensor<T>,
) -> Result<CudaTensor<T>>
pub fn gpu_matrix_multiply<T>( &self, a: &CudaTensor<T>, b: &CudaTensor<T>, ) -> Result<CudaTensor<T>>
Optimized GPU matrix multiplication
Sourcepub fn gpu_relu<T>(&self, input: &CudaTensor<T>) -> Result<CudaTensor<T>>
pub fn gpu_relu<T>(&self, input: &CudaTensor<T>) -> Result<CudaTensor<T>>
GPU-accelerated ReLU activation
Sourcepub fn gpu_softmax<T>(&self, input: &CudaTensor<T>) -> Result<CudaTensor<T>>
pub fn gpu_softmax<T>(&self, input: &CudaTensor<T>) -> Result<CudaTensor<T>>
GPU-accelerated softmax
Sourcepub fn gpu_conv2d<T>(
&self,
input: &CudaTensor<T>,
kernel: &CudaTensor<T>,
stride: (usize, usize),
padding: (usize, usize),
) -> Result<CudaTensor<T>>
pub fn gpu_conv2d<T>( &self, input: &CudaTensor<T>, kernel: &CudaTensor<T>, stride: (usize, usize), padding: (usize, usize), ) -> Result<CudaTensor<T>>
GPU-accelerated convolution
Sourcepub fn synchronize(&self) -> Result<()>
pub fn synchronize(&self) -> Result<()>
Synchronize GPU operations
Sourcepub fn backend_info(&self) -> String
pub fn backend_info(&self) -> String
Get backend information
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 16)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}
Trait Implementations§
Auto Trait Implementations§
impl Freeze for NeuralOps
impl RefUnwindSafe for NeuralOps
impl Send for NeuralOps
impl Sync for NeuralOps
impl Unpin for NeuralOps
impl UnwindSafe for NeuralOps
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more