pub struct Model {
pub model: NonNull<ANeuralNetworksModel>,
}
Fields§
§model: NonNull<ANeuralNetworksModel>
Implementations§
Source§impl Model
impl Model
pub fn new() -> Result<Self>
Sourcepub fn from_operands(
operands: impl IntoIterator<Item = Operand>,
) -> Result<Self>
pub fn from_operands( operands: impl IntoIterator<Item = Operand>, ) -> Result<Self>
Examples found in repository?
examples/add_arrays.rs (lines 7-12)
4fn main() -> nnapi::Result<()> {
5 let tensor9x_type = Operand::tensor(OperandCode::ANEURALNETWORKS_TENSOR_FLOAT32, vec![9], 0., 0);
6
7 let mut model = Model::from_operands([
8 tensor9x_type.clone(),
9 tensor9x_type.clone(),
10 Operand::activation(),
11 tensor9x_type,
12 ])?;
13
14 model.set_activation_operand_value(2)?;
15 model.add_operation(OperationCode::ANEURALNETWORKS_ADD, &[0, 1, 2], &[3])?;
16 model.identify_inputs_and_outputs(&[0, 1], &[3])?;
17
18 model.finish()?;
19
20 let mut compilation = model.compile()?;
21 compilation.finish()?;
22 let mut execution = compilation.create_execution()?;
23
24 // mind datatype: by default, it's f64, but we need f32
25 execution.set_input(0, &[1f32; 9])?;
26 execution.set_input(1, &[2f32; 9])?;
27
28 let mut output = [0f32; 9];
29 execution.set_output(0, &mut output)?;
30
31 let mut end_event = execution.compute()?;
32 end_event.wait()?;
33
34 assert_eq!(output, [3f32; 9]);
35
36 Ok(())
37}
pub fn add_operand(&mut self, operand: &Operand) -> Result<()>
Sourcepub fn set_activation_operand_value(
&mut self,
activation_idx: i32,
) -> Result<()>
pub fn set_activation_operand_value( &mut self, activation_idx: i32, ) -> Result<()>
Examples found in repository?
examples/add_arrays.rs (line 14)
4fn main() -> nnapi::Result<()> {
5 let tensor9x_type = Operand::tensor(OperandCode::ANEURALNETWORKS_TENSOR_FLOAT32, vec![9], 0., 0);
6
7 let mut model = Model::from_operands([
8 tensor9x_type.clone(),
9 tensor9x_type.clone(),
10 Operand::activation(),
11 tensor9x_type,
12 ])?;
13
14 model.set_activation_operand_value(2)?;
15 model.add_operation(OperationCode::ANEURALNETWORKS_ADD, &[0, 1, 2], &[3])?;
16 model.identify_inputs_and_outputs(&[0, 1], &[3])?;
17
18 model.finish()?;
19
20 let mut compilation = model.compile()?;
21 compilation.finish()?;
22 let mut execution = compilation.create_execution()?;
23
24 // mind datatype: by default, it's f64, but we need f32
25 execution.set_input(0, &[1f32; 9])?;
26 execution.set_input(1, &[2f32; 9])?;
27
28 let mut output = [0f32; 9];
29 execution.set_output(0, &mut output)?;
30
31 let mut end_event = execution.compute()?;
32 end_event.wait()?;
33
34 assert_eq!(output, [3f32; 9]);
35
36 Ok(())
37}
Sourcepub fn add_operation(
&mut self,
operation: OperationCode,
inputs: &[u32],
outputs: &[u32],
) -> Result<()>
pub fn add_operation( &mut self, operation: OperationCode, inputs: &[u32], outputs: &[u32], ) -> Result<()>
Examples found in repository?
examples/add_arrays.rs (line 15)
4fn main() -> nnapi::Result<()> {
5 let tensor9x_type = Operand::tensor(OperandCode::ANEURALNETWORKS_TENSOR_FLOAT32, vec![9], 0., 0);
6
7 let mut model = Model::from_operands([
8 tensor9x_type.clone(),
9 tensor9x_type.clone(),
10 Operand::activation(),
11 tensor9x_type,
12 ])?;
13
14 model.set_activation_operand_value(2)?;
15 model.add_operation(OperationCode::ANEURALNETWORKS_ADD, &[0, 1, 2], &[3])?;
16 model.identify_inputs_and_outputs(&[0, 1], &[3])?;
17
18 model.finish()?;
19
20 let mut compilation = model.compile()?;
21 compilation.finish()?;
22 let mut execution = compilation.create_execution()?;
23
24 // mind datatype: by default, it's f64, but we need f32
25 execution.set_input(0, &[1f32; 9])?;
26 execution.set_input(1, &[2f32; 9])?;
27
28 let mut output = [0f32; 9];
29 execution.set_output(0, &mut output)?;
30
31 let mut end_event = execution.compute()?;
32 end_event.wait()?;
33
34 assert_eq!(output, [3f32; 9]);
35
36 Ok(())
37}
Sourcepub fn identify_inputs_and_outputs(
&mut self,
inputs: &[u32],
outputs: &[u32],
) -> Result<()>
pub fn identify_inputs_and_outputs( &mut self, inputs: &[u32], outputs: &[u32], ) -> Result<()>
Examples found in repository?
examples/add_arrays.rs (line 16)
4fn main() -> nnapi::Result<()> {
5 let tensor9x_type = Operand::tensor(OperandCode::ANEURALNETWORKS_TENSOR_FLOAT32, vec![9], 0., 0);
6
7 let mut model = Model::from_operands([
8 tensor9x_type.clone(),
9 tensor9x_type.clone(),
10 Operand::activation(),
11 tensor9x_type,
12 ])?;
13
14 model.set_activation_operand_value(2)?;
15 model.add_operation(OperationCode::ANEURALNETWORKS_ADD, &[0, 1, 2], &[3])?;
16 model.identify_inputs_and_outputs(&[0, 1], &[3])?;
17
18 model.finish()?;
19
20 let mut compilation = model.compile()?;
21 compilation.finish()?;
22 let mut execution = compilation.create_execution()?;
23
24 // mind datatype: by default, it's f64, but we need f32
25 execution.set_input(0, &[1f32; 9])?;
26 execution.set_input(1, &[2f32; 9])?;
27
28 let mut output = [0f32; 9];
29 execution.set_output(0, &mut output)?;
30
31 let mut end_event = execution.compute()?;
32 end_event.wait()?;
33
34 assert_eq!(output, [3f32; 9]);
35
36 Ok(())
37}
Sourcepub fn finish(&mut self) -> Result<()>
pub fn finish(&mut self) -> Result<()>
Examples found in repository?
examples/add_arrays.rs (line 18)
4fn main() -> nnapi::Result<()> {
5 let tensor9x_type = Operand::tensor(OperandCode::ANEURALNETWORKS_TENSOR_FLOAT32, vec![9], 0., 0);
6
7 let mut model = Model::from_operands([
8 tensor9x_type.clone(),
9 tensor9x_type.clone(),
10 Operand::activation(),
11 tensor9x_type,
12 ])?;
13
14 model.set_activation_operand_value(2)?;
15 model.add_operation(OperationCode::ANEURALNETWORKS_ADD, &[0, 1, 2], &[3])?;
16 model.identify_inputs_and_outputs(&[0, 1], &[3])?;
17
18 model.finish()?;
19
20 let mut compilation = model.compile()?;
21 compilation.finish()?;
22 let mut execution = compilation.create_execution()?;
23
24 // mind datatype: by default, it's f64, but we need f32
25 execution.set_input(0, &[1f32; 9])?;
26 execution.set_input(1, &[2f32; 9])?;
27
28 let mut output = [0f32; 9];
29 execution.set_output(0, &mut output)?;
30
31 let mut end_event = execution.compute()?;
32 end_event.wait()?;
33
34 assert_eq!(output, [3f32; 9]);
35
36 Ok(())
37}
Sourcepub fn compile(&mut self) -> Result<Compilation>
pub fn compile(&mut self) -> Result<Compilation>
Examples found in repository?
examples/add_arrays.rs (line 20)
4fn main() -> nnapi::Result<()> {
5 let tensor9x_type = Operand::tensor(OperandCode::ANEURALNETWORKS_TENSOR_FLOAT32, vec![9], 0., 0);
6
7 let mut model = Model::from_operands([
8 tensor9x_type.clone(),
9 tensor9x_type.clone(),
10 Operand::activation(),
11 tensor9x_type,
12 ])?;
13
14 model.set_activation_operand_value(2)?;
15 model.add_operation(OperationCode::ANEURALNETWORKS_ADD, &[0, 1, 2], &[3])?;
16 model.identify_inputs_and_outputs(&[0, 1], &[3])?;
17
18 model.finish()?;
19
20 let mut compilation = model.compile()?;
21 compilation.finish()?;
22 let mut execution = compilation.create_execution()?;
23
24 // mind datatype: by default, it's f64, but we need f32
25 execution.set_input(0, &[1f32; 9])?;
26 execution.set_input(1, &[2f32; 9])?;
27
28 let mut output = [0f32; 9];
29 execution.set_output(0, &mut output)?;
30
31 let mut end_event = execution.compute()?;
32 end_event.wait()?;
33
34 assert_eq!(output, [3f32; 9]);
35
36 Ok(())
37}
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Model
impl RefUnwindSafe for Model
impl !Send for Model
impl !Sync for Model
impl Unpin for Model
impl UnwindSafe for Model
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more