Skip to main content

feedforward_neural_net/
feedforward_neural_net.rs

1//! Construct a small feedforward-style network by building layers in a loop.
2//!
3//! This is not a full neural-network framework, but it shows how larger graphs can be
4//! assembled programmatically rather than node-by-node.
5//!
6//! Layout:
7//! - input layer:  3 nodes
8//! - hidden layer: 2 nodes
9//! - hidden layer: 2 nodes
10//! - output layer: 2 nodes
11//!
12//! Each node in the next layer is computed as:
13//!   relu(add(add(prev[0], prev[1]), prev[2]...))
14//!
15//! All nodes use the same tensor shape so that the example can be expressed using
16//! only `Add` and `ReLU`.
17//!
18//! For kernel authors, this example is the most useful one for understanding scale:
19//! - kernels remain small and local,
20//! - graph construction can be arbitrarily rich,
21//! - executor behavior is unchanged regardless of graph size.
22//!
23//! This example also shows an important architectural boundary:
24//!
25//! Graph construction:
26//! - decides how many nodes exist,
27//! - decides which ops connect which dependencies,
28//! - enforces shape / structural constraints,
29//! - decides which nodes are final outputs.
30//!
31//! Executor:
32//! - validates runtime input bindings,
33//! - computes nodes in dependency order,
34//! - looks up the registered kernel for each op kind,
35//! - stores intermediate results for later nodes.
36//!
37//! Kernel:
38//! - computes exactly one node,
39//! - reads already-computed input tensors,
40//! - returns the output tensor or a kernel-level error.
41//!
42//! That separation is what allows users to design custom kernels without needing
43//! to reimplement graph traversal or scheduling.
44use tensor_forge::{Executor, Graph, KernelRegistry, NodeId, Tensor};
45
46fn main() {
47    let mut graph = Graph::new();
48
49    let input_width = 3;
50    let hidden_widths = [2, 2];
51    let output_width = 2;
52    let shape = vec![1, 4];
53
54    // Create the input layer.
55    //
56    // Each input node declares that execution must provide a [1, 4] tensor for it.
57    let mut current_layer: Vec<NodeId> = (0..input_width)
58        .map(|_| graph.input_node(shape.clone()))
59        .collect();
60
61    let input_ids = current_layer.clone();
62
63    // Build hidden layers iteratively.
64    //
65    // This loop is only in graph construction. The resulting graph is still a
66    // feedforward DAG with no cycles.
67    for &layer_width in &hidden_widths {
68        let mut next_layer = Vec::with_capacity(layer_width);
69
70        for _ in 0..layer_width {
71            // Start accumulation with the first node of the current layer.
72            let mut acc = current_layer[0];
73
74            // Repeatedly add the remaining nodes from the current layer.
75            //
76            // Each `graph.add(...)` creates a new intermediate node.
77            for &node in &current_layer[1..] {
78                acc = graph
79                    .add(acc, node)
80                    .expect("Adding nodes in a layer should succeed");
81            }
82
83            // Apply a nonlinearity at the end of the layer computation.
84            let out = graph
85                .relu(acc)
86                .expect("Applying ReLU after accumulation should succeed");
87
88            next_layer.push(out);
89        }
90
91        current_layer = next_layer;
92    }
93
94    // Build the output layer the same way.
95    let mut output_ids = Vec::with_capacity(output_width);
96
97    for _ in 0..output_width {
98        let mut acc = current_layer[0];
99
100        for &node in &current_layer[1..] {
101            acc = graph
102                .add(acc, node)
103                .expect("Adding nodes in the output layer should succeed");
104        }
105
106        let out = graph
107            .relu(acc)
108            .expect("Applying ReLU in the output layer should succeed");
109
110        graph
111            .set_output_node(out)
112            .expect("Setting output node should succeed");
113
114        output_ids.push(out);
115    }
116
117    // Bind concrete input tensors.
118    //
119    // As in the smaller examples, bindings are keyed by input-node `NodeId`.
120    // The graph can be re-used with different runtime values.
121    let bindings = vec![
122        (
123            input_ids[0],
124            Tensor::from_vec(vec![1, 4], vec![1.0, -2.0, 3.0, -4.0])
125                .expect("Tensor construction should succeed"),
126        ),
127        (
128            input_ids[1],
129            Tensor::from_vec(vec![1, 4], vec![0.5, 1.5, -2.5, 3.5])
130                .expect("Tensor construction should succeed"),
131        ),
132        (
133            input_ids[2],
134            Tensor::from_vec(vec![1, 4], vec![10.0, -20.0, 30.0, -40.0])
135                .expect("Tensor construction should succeed"),
136        ),
137    ];
138
139    let exec = Executor::new(KernelRegistry::default());
140    let outputs = exec
141        .execute(&graph, bindings)
142        .expect("Execution should succeed");
143
144    for out in output_ids {
145        let tensor = outputs
146            .get(&out)
147            .expect("Declared output should be present in executor results");
148
149        println!("Computed output for node {:?}: {:?}", out, tensor);
150    }
151}