executorch/lib.rs
1#![cfg_attr(deny_warnings, deny(warnings))]
2// some new clippy::lint annotations are supported in latest Rust but not recognized by older versions
3#![cfg_attr(deny_warnings, allow(unknown_lints))]
4#![cfg_attr(deny_warnings, deny(missing_docs))]
5#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
6
7//! Bindings for ExecuTorch - On-device AI across mobile, embedded and edge for PyTorch.
8//!
9//! Provides a high-level Rust API for executing PyTorch models on mobile, embedded and edge devices using the
10//! [ExecuTorch library](https://pytorch.org/executorch-overview), specifically the
11//! [C++ API](https://github.com/pytorch/executorch).
12//! PyTorch models are created and exported in Python, and then loaded and executed on-device using the
13//! ExecuTorch library.
14//!
15//! The following example create a simple model in Python, exports it, and then executes it in Rust:
16//!
17//! Create a model in Python and export it:
18//! ```ignore
19//! import torch
20//! from executorch.exir import to_edge
21//! from torch.export import export
22//!
23//! class Add(torch.nn.Module):
24//! def __init__(self):
25//! super(Add, self).__init__()
26//!
27//! def forward(self, x: torch.Tensor, y: torch.Tensor):
28//! return x + y
29//!
30//!
31//! model = Add()
32//! exported_program = export(model, (torch.ones(1), torch.ones(1)))
33//! executorch_program = to_edge_transform_and_lower(exported_program).to_executorch()
34//! with open("model.pte", "wb") as file:
35//! file.write(executorch_program.buffer)
36//! ```
37//!
38//! Execute the model in Rust:
39//! ```rust,ignore
40//! use executorch::evalue::{EValue, IntoEValue};
41//! use executorch::module::Module;
42//! use executorch::tensor_ptr;
43//! use ndarray::array;
44//!
45//! let mut module = Module::from_file_path("model.pte");
46//!
47//! let (tensor1, tensor2) = (tensor_ptr![1.0_f32], tensor_ptr![1.0_f32]);
48//! let inputs = [tensor1.into_evalue(), tensor2.into_evalue()];
49//!
50//! let outputs = module.forward(&inputs).unwrap();
51//! let [output]: [EValue; 1] = outputs.try_into().expect("not a single tensor");
52//! let output = output.as_tensor().into_typed::<f32>();
53//!
54//! println!("Output tensor computed: {:?}", output);
55//! assert_eq!(array![2.0], output.as_array());
56//! ```
57//!
58//! ## Cargo Features
59//! - `data-loader`:
60//! Includes additional structs in the [`data_loader`] module for loading data. Without this feature the only
61//! available data loader is `BufferDataLoader. `The `libextension_data_loader.a` static library is
62//! required, compile C++ `executorch` with `EXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON`.
63//! - `module`:
64//! Includes the [`module`] API, a high-level API for loading and executing PyTorch models. It is an alternative to
65//! the lower-level [`Program`](crate::program::Program) API, which is more suitable for embedded systems.
66//! The `libextension_module_static.a` static library is required, compile C++ `executorch` with
67//! `EXECUTORCH_BUILD_EXTENSION_MODULE=ON`. Also includes the `std` feature.
68//! - `tensor-ptr`:
69//! Includes the [`tensor::TensorPtr`] struct, a smart pointer for tensors that manage the lifetime of the tensor
70//! object alongside the lifetimes of the data buffer and additional metadata. The `extension_tensor.a`
71//! static library is required, compile C++ `executorch` with `EXECUTORCH_BUILD_EXTENSION_TENSOR=ON`.
72//! Also includes the `std` feature.
73//! - `etdump`
74//! Includes the `ETDumpGen` struct, an implementation of an `EventTracer`, used for debugging and profiling.
75//! The `libetdump.a` static library is required, compile C++ `executorch` with `EXECUTORCH_BUILD_DEVTOOLS=ON` and
76//! `EXECUTORCH_ENABLE_EVENT_TRACER=ON`.
77//! In addition, the `flatcc` (or `flatcc_d`) library is required, available at `{CPP_EXECUTORCH_DIR}/third-party/flatcc/lib/`,
78//! and should be linked by the user.
79//! - `ndarray`:
80//! Conversions between `executorch` tensors and `ndarray` arrays.
81//! Adds a dependency to the `ndarray` crate.
82//! This feature is enabled by default.
83//! - `f16`:
84//! Adds a dependency to the `half` crate, which provides a fully capable `f16` and `bf16` types.
85//! Without this feature enabled, both of these types are available with a simple conversions to/from `u16` only.
86//! Note that this only affect input/output tensors, the internal computations always have the capability to operate on such scalars.
87//! - `num-complex`:
88//! Adds a dependency to the `num-complex` crate, which provides a fully capable complex number type.
89//! Without this feature enabled, complex numbers are available as a simple struct with two public fields without any operations.
90//! Note that this only affect input/output tensors, the internal computations always have the capability to operate on such scalars.
91//! - `std`:
92//! Enable the standard library. This feature is enabled by default, but can be disabled to build [`executorch`](crate)
93//! in a `no_std` environment.
94//! See the `examples/no_std` example.
95//! Also includes the `alloc` feature.
96//! NOTE: no_std is still WIP, see <https://github.com/pytorch/executorch/issues/4561>
97//! - `alloc`:
98//! Enable allocations.
99//! When this feature is disabled, all methods that require allocations will not be compiled.
100//! This feature is enabled by the `std` feature, which is enabled by default.
101//! Its possible to enable this feature without the `std` feature, and the allocations will be done using the
102//! [`alloc`](https://doc.rust-lang.org/alloc/) crate, that requires a global allocator to be set.
103//!
104//! By default the `std` and `ndarray` features are enabled.
105//!
106//! ## Build
107//! To use the library you must compile the C++ executorch library yourself, as there are many configurations that
108//! determines which modules, backends, and operations are supported. See the `executorch-sys` crate for more info.
109//!
110//! ## Embedded Systems
111//! The library is designed to be used both in `std` and `no_std` environments. The `no_std` environment is useful for
112//! embedded systems, where the standard library is not available. The `alloc` feature can be used to provide an
113//! alternative to the standard library's allocator, but it is possible to use the library without allocations at all.
114//! Due to some difference between Cpp and Rust, it is not trivial to provide such API, and the interface may feel
115//! more verbose. See the `memory::Storage` struct for stack allocations of Cpp objects, and the `examples/no_std`
116//! example.
117//!
118//! ## API Stability
119//! The C++ API is still in Beta, and this Rust lib will continue to change with it. Currently the supported
120//! executorch version is `0.6.0`.
121
122#![cfg_attr(not(feature = "std"), no_std)]
123
124#[cfg(not(feature = "std"))]
125extern crate core as std;
126
127#[doc(hidden)]
128pub mod __private {
129 #[cfg(feature = "std")]
130 pub mod alloc {
131 pub use std::boxed::Box;
132 pub use std::vec::Vec;
133 }
134 #[cfg(not(feature = "std"))]
135 pub mod alloc {
136 extern crate alloc;
137 pub use alloc::boxed::Box;
138 pub use alloc::vec::Vec;
139 }
140}
141
142#[allow(unused_imports)]
143use crate::__private::alloc;
144
145/// The version of the crate.
146pub const VERSION: &str = env!("CARGO_PKG_VERSION");
147
148/// The version of the ExecuTorch C++ library that this crate is compatible and linked with.
149pub const EXECUTORCH_CPP_VERSION: &str = executorch_sys::EXECUTORCH_CPP_VERSION;
150
151#[macro_use]
152mod private;
153pub mod data_loader;
154mod error;
155pub mod evalue;
156pub mod event_tracer;
157pub mod memory;
158#[cfg(feature = "module")]
159pub mod module;
160pub mod platform;
161pub mod program;
162pub mod scalar;
163pub mod tensor;
164pub mod util;
165
166pub(crate) use error::Result;
167pub use error::{CError, Error};
168
169#[cfg(feature = "ndarray")]
170pub use ndarray;
171
172#[cfg(feature = "half")]
173pub use half;
174
175#[cfg(feature = "num-complex")]
176pub use num_complex;
177
178#[cfg(test)]
179mod tests;