eryon_actors/engine/neural/
features.rs

1/*
2    Appellation: features <module>
3    Contrib: @FL03
4*/
5
6#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
7#[cfg_attr(
8    feature = "serde",
9    derive(serde_derive::Deserialize, serde_derive::Serialize)
10)]
11pub struct NeuralFeatures {
12    pub(crate) attention: usize,
13    // the number of features of the input layer
14    pub(crate) inputs: usize,
15    // the number of features of the hidden layer
16    pub(crate) hidden: usize,
17    // the number of hidden layers
18    pub(crate) layers: usize,
19    // the number of features of the read/write heads
20    pub(crate) memory: usize,
21    // the number of features of the output layer
22    pub(crate) outputs: usize,
23}
24
25impl NeuralFeatures {
26    pub fn new(
27        attention: usize,
28        memory: usize,
29        inputs: usize,
30        outputs: usize,
31        hidden: usize,
32        layers: usize,
33    ) -> Self {
34        Self {
35            attention,
36            memory,
37            inputs,
38            hidden,
39            layers,
40            outputs,
41        }
42    }
43    /// returns a copy of the attention size
44    pub const fn attention(&self) -> usize {
45        self.attention
46    }
47    /// returns a copy of the number of features within the read/write heads
48    pub const fn codex(&self) -> usize {
49        self.memory
50    }
51    /// returns a copy of the hidden size
52    pub const fn hidden(&self) -> usize {
53        self.hidden
54    }
55    /// returns a copy of the input size
56    pub const fn inputs(&self) -> usize {
57        self.inputs
58    }
59    /// returns a copy of the number of hidden layers
60    pub const fn layers(&self) -> usize {
61        self.layers
62    }
63    /// returns a copy of the output size
64    pub const fn outputs(&self) -> usize {
65        self.outputs
66    }
67    /// the shape of the attention layer (n_outputs, n_attention)
68    pub const fn dim_attention(&self) -> (usize, usize) {
69        // TODO: Consider if this should be (self.attention, self.hidden)
70        (self.outputs, self.attention)
71    }
72    /// the shape of the input layer (inputs, n_hidden)
73    pub const fn dim_input(&self) -> (usize, usize) {
74        (self.inputs, self.hidden)
75    }
76    /// the shape of the hidden layer (hidden, hidden)
77    pub const fn dim_hidden(&self) -> (usize, usize) {
78        (self.hidden, self.hidden)
79    }
80    /// the shape of the memory matrix (n_memory, n_attention)
81    pub const fn dim_memory(&self) -> (usize, usize) {
82        (self.memory, self.attention)
83    }
84    /// the shape of the output layer (n_hidden, n_out)
85    pub const fn dim_output(&self) -> (usize, usize) {
86        (self.hidden, self.outputs)
87    }
88    /// set the number of features for attention
89    pub fn set_attention(&mut self, attention: usize) {
90        self.attention = attention;
91    }
92    /// set the number of features for the read/write heads
93    pub fn set_codex(&mut self, codex: usize) {
94        self.memory = codex;
95    }
96    /// set the number of features for then input layer
97    pub fn set_inputs(&mut self, inputs: usize) {
98        self.inputs = inputs;
99    }
100    /// set the number of features for the hidden layer
101    pub fn set_hidden(&mut self, hidden: usize) {
102        self.hidden = hidden;
103    }
104    /// set the number of features for the output layer
105    pub fn set_outputs(&mut self, outputs: usize) {
106        self.outputs = outputs;
107    }
108    /// consumes the current instance; returns another with the given number of features for
109    /// attention
110    pub fn with_attention(self, attention: usize) -> Self {
111        Self { attention, ..self }
112    }
113    /// consumes the current instance; returns another with the given number of features for the
114    /// read/write heads
115    pub fn with_codex(self, codex: usize) -> Self {
116        Self {
117            memory: codex,
118            ..self
119        }
120    }
121    /// consumes the current instance; returns another with the given number of input features
122    pub fn with_inputs(self, inputs: usize) -> Self {
123        Self { inputs, ..self }
124    }
125    /// consumes the current instance; returns another with the given number of hidden features
126    pub fn with_hidden(self, hidden: usize) -> Self {
127        Self { hidden, ..self }
128    }
129    /// consumes the current instance; returns another with the given number of output features
130    pub fn with_outputs(self, outputs: usize) -> Self {
131        Self { outputs, ..self }
132    }
133}
134
135impl Default for NeuralFeatures {
136    fn default() -> Self {
137        Self {
138            attention: 20,
139            memory: 128,
140            inputs: 5, // (alphabet (3) + states (2))
141            hidden: 50,
142            layers: 1,
143            outputs: 10,
144        }
145    }
146}