pub fn create_neural_ops_with_backend(backend: &str) -> Result<NeuralOps>
Expand description
Helper function to create neural operations with preferred backend
Examples found in repository?
examples/accelerated_neural_ops_example.rs (line 102)
11fn main() -> Result<()> {
12 println!("=== Accelerated Neural Operations Demo ===\n");
13
14 // Create neural operations context
15 let ops = create_neural_ops()?;
16 println!("{}\n", ops.backend_info());
17
18 // Demonstrate matrix multiplication
19 println!("1. Matrix Multiplication:");
20 let a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
21 let b = array![[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]];
22
23 println!("Matrix A (2x3):\n{:?}", a);
24 println!("Matrix B (3x2):\n{:?}", b);
25
26 let result = ops.matrix_multiply(&a, &b)?;
27 println!("Result A * B (2x2):\n{:?}\n", result);
28
29 // Demonstrate batch matrix multiplication
30 println!("2. Batch Matrix Multiplication:");
31 let batch_a = array![[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]].into_dyn();
32 let batch_b = array![[[2.0, 0.0], [1.0, 1.0]], [[1.0, 2.0], [0.0, 1.0]]].into_dyn();
33
34 println!("Batch A shape: {:?}", batch_a.shape());
35 println!("Batch B shape: {:?}", batch_b.shape());
36
37 let batch_result = ops.batch_matrix_multiply(&batch_a, &batch_b)?;
38 println!("Batch result shape: {:?}", batch_result.shape());
39 println!("Batch result:\n{:?}\n", batch_result);
40
41 // Demonstrate activation functions
42 println!("3. Activation Functions:");
43 let input = array![[-2.0, -1.0, 0.0, 1.0, 2.0]].into_dyn();
44 println!("Input: {:?}", input);
45
46 let relu_output = ops.relu_forward(&input)?;
47 println!("ReLU output: {:?}", relu_output);
48
49 let sigmoid_output = ops.sigmoid_forward(&input)?;
50 println!("Sigmoid output: {:?}", sigmoid_output);
51
52 // Demonstrate ReLU backward pass
53 let grad_output = array![[1.0, 1.0, 1.0, 1.0, 1.0]].into_dyn();
54 let relu_grad = ops.relu_backward(&input, &grad_output)?;
55 println!("ReLU gradient: {:?}\n", relu_grad);
56
57 // Demonstrate softmax
58 println!("4. Softmax Activation:");
59 let logits = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]].into_dyn();
60 println!("Logits: {:?}", logits);
61
62 let softmax_output = ops.softmax_forward(&logits)?;
63 println!("Softmax output: {:?}", softmax_output);
64
65 // Verify softmax properties (each row sums to 1)
66 for (i, row) in softmax_output.axis_iter(ndarray::Axis(0)).enumerate() {
67 let sum: f32 = row.sum();
68 println!("Row {} sum: {:.6}", i, sum);
69 }
70 println!();
71
72 // Demonstrate batch normalization
73 println!("5. Batch Normalization:");
74 let batch_input = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]].into_dyn();
75 let mean = array![3.0, 4.0]; // Per-channel mean
76 let variance = array![2.0, 2.0]; // Per-channel variance
77 let gamma = array![1.0, 1.0]; // Scale parameter
78 let beta = array![0.0, 0.0]; // Shift parameter
79
80 println!("Input: {:?}", batch_input);
81 println!("Mean: {:?}", mean);
82 println!("Variance: {:?}", variance);
83
84 let normalized = ops.batch_normalize(&batch_input, &mean, &variance, &gamma, &beta, 1e-5)?;
85 println!("Normalized output: {:?}\n", normalized);
86
87 // Demonstrate convolution
88 println!("6. 2D Convolution:");
89 let conv_input = array![[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]].into_dyn(); // Shape: (1, 1, 3, 3) - (batch, channels, height, width)
90
91 let kernel = array![[[[1.0, 0.0], [-1.0, 0.0]]]].into_dyn(); // Shape: (1, 1, 2, 2) - (out_channels, in_channels, kernel_h, kernel_w)
92
93 println!("Input shape: {:?}", conv_input.shape());
94 println!("Kernel shape: {:?}", kernel.shape());
95
96 let conv_output = ops.conv2d_forward(&conv_input, &kernel, (1, 1), (0, 0))?;
97 println!("Convolution output shape: {:?}", conv_output.shape());
98 println!("Convolution output: {:?}\n", conv_output);
99
100 // Demonstrate different backend preferences
101 println!("7. Backend Selection:");
102 let cpu_ops = create_neural_ops_with_backend("CPU")?;
103 println!("{}", cpu_ops.backend_info());
104
105 let gpu_ops = create_neural_ops_with_backend("GPU")?;
106 println!("{}", gpu_ops.backend_info());
107
108 let custom_ops = create_neural_ops_with_backend("Custom-Accelerator")?;
109 println!("{}\n", custom_ops.backend_info());
110
111 // Performance comparison example
112 println!("8. Performance Demonstration:");
113 demonstrate_performance_scaling()?;
114
115 println!("=== Demo Complete ===");
116 Ok(())
117}