executorch/
lib.rs

1#![cfg_attr(deny_warnings, deny(warnings))]
2// some new clippy::lint annotations are supported in latest Rust but not recognized by older versions
3#![cfg_attr(deny_warnings, allow(unknown_lints))]
4#![cfg_attr(deny_warnings, deny(missing_docs))]
5#![cfg_attr(docsrs, feature(doc_cfg))]
6
7//! Bindings for ExecuTorch - On-device AI across mobile, embedded and edge for PyTorch.
8//!
9//! Provides a high-level Rust API for executing PyTorch models on mobile, embedded and edge devices using the
10//! [ExecuTorch library](https://pytorch.org/executorch-overview), specifically the
11//! [C++ API](https://github.com/pytorch/executorch).
12//! PyTorch models are created and exported in Python, and then loaded and executed on-device using the
13//! ExecuTorch library.
14//!
15//! The following example create a simple model in Python, exports it, and then executes it in Rust:
16//!
17//! Create a model in Python and export it:
18//! ```ignore
19//! import torch
20//! from torch.export import export
21//! from executorch.exir import to_edge_transform_and_lower
22//!
23//! class Add(torch.nn.Module):
24//!     def __init__(self):
25//!         super(Add, self).__init__()
26//!
27//!     def forward(self, x: torch.Tensor, y: torch.Tensor):
28//!         return x + y
29//!
30//!
31//! model = Add()
32//! exported_program = export(model, (torch.ones(1), torch.ones(1)))
33//! executorch_program = to_edge_transform_and_lower(exported_program).to_executorch()
34//! with open("model.pte", "wb") as file:
35//!     file.write(executorch_program.buffer)
36//! ```
37//!
38//! Execute the model in Rust:
39//! ```rust,ignore
40//! use executorch::evalue::{EValue, IntoEValue};
41//! use executorch::module::Module;
42//! use executorch::tensor_ptr;
43//! use ndarray::array;
44//!
45//! let mut module = Module::from_file_path("model.pte");
46//!
47//! let (tensor1, tensor2) = (tensor_ptr![1.0_f32], tensor_ptr![1.0_f32]);
48//! let inputs = [tensor1.into_evalue(), tensor2.into_evalue()];
49//!
50//! let outputs = module.forward(&inputs).unwrap();
51//! let [output]: [EValue; 1] = outputs.try_into().expect("not a single output");
52//! let output = output.as_tensor().into_typed::<f32>();
53//!
54//! println!("Output tensor computed: {:?}", output);
55//! assert_eq!(array![2.0], output.as_array());
56//! ```
57//!
58//! ## Cargo Features
59//! - `data-loader`:
60//!   Includes additional structs in the [`data_loader`] module for loading data. Without this feature the only
61//!   available data loader is [`BufferDataLoader`](data_loader::BufferDataLoader). The `libextension_data_loader.a` static library is
62//!   required, compile C++ `executorch` with `EXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON`.
63//! - `module`:
64//!   Includes the [`module`] API, a high-level API for loading and executing PyTorch models. It is an alternative to
65//!   the lower-level [`Program`](crate::program::Program) API, which is more suitable for embedded systems.
66//!   The `libextension_module_static.a` static library is required, compile C++ `executorch` with
67//!   `EXECUTORCH_BUILD_EXTENSION_MODULE=ON`.
68//!   Also includes the `std`, `data-loader` and `flat-tensor` features.
69//! - `tensor-ptr`:
70//!   Includes the [`tensor::TensorPtr`] struct, a smart pointer for tensors that manage the lifetime of the tensor
71//!   object alongside the lifetimes of the data buffer and additional metadata. The `extension_tensor.a`
72//!   static library is required, compile C++ `executorch` with `EXECUTORCH_BUILD_EXTENSION_TENSOR=ON`.
73//!   Also includes the `std` feature.
74//! - `flat-tensor`:
75//!   Includes the [`data_map::FlatTensorDataMap`] struct that can read `.ptd` files with external tensors for models.
76//!   The `libextension_flat_tensor.a` static library is required,
77//!   compile C++ `executorch` with `EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON`.
78//! - `etdump`:
79//!   Includes the [`event_tracer::ETDumpGen`] struct, an implementation of an `EventTracer`, used for debugging and profiling.
80//!   The `libetdump.a` static library is required, compile C++ `executorch` with `EXECUTORCH_BUILD_DEVTOOLS=ON` and
81//!   `EXECUTORCH_ENABLE_EVENT_TRACER=ON`.
82//!   In addition, the `flatcc` (or `flatcc_d`) library is required, available at `{CMAKE_DIR}/third-party/flatcc_ep/lib/`,
83//!   and should be linked by the user.
84//! - `ndarray`:
85//!   Conversions between `executorch` tensors and `ndarray` arrays.
86//!   Adds a dependency to the `ndarray` crate.
87//!   This feature is enabled by default.
88//! - `f16`:
89//!   Adds a dependency to the `half` crate, which provides a fully capable `f16` and `bf16` types.
90//!   Without this feature enabled, both of these types are available with a simple conversions to/from `u16` only.
91//!   Note that this only affect input/output tensors, the internal computations always have the capability to operate on such scalars.
92//! - `num-complex`:
93//!   Adds a dependency to the `num-complex` crate, which provides a fully capable complex number type.
94//!   Without this feature enabled, complex numbers are available as a simple struct with two public fields without any operations.
95//!   Note that this only affect input/output tensors, the internal computations always have the capability to operate on such scalars.
96//! - `std`:
97//!   Enable the standard library. This feature is enabled by default, but can be disabled to build [`executorch`](crate)
98//!   in a `no_std` environment.
99//!   See the `examples/no_std` example.
100//!   Also includes the `alloc` feature.
101//!   NOTE: no_std is still WIP, see <https://github.com/pytorch/executorch/issues/4561>
102//! - `alloc`:
103//!   Enable allocations.
104//!   When this feature is disabled, all methods that require allocations will not be compiled.
105//!   This feature is enabled by the `std` feature, which is enabled by default.
106//!   Its possible to enable this feature without the `std` feature, and the allocations will be done using the
107//!   [`alloc`](https://doc.rust-lang.org/alloc/) crate, that requires a global allocator to be set.
108//!
109//! By default the `std` and `ndarray` features are enabled.
110//!
111//! ## Build
112//! To use the library you must compile the C++ executorch library yourself, as there are many configurations that
113//! determines which modules, backends, and operations are supported. See the `executorch-sys` crate for more info.
114//! Currently the supported Cpp executorch version is `1.0.1`  (or `1.0.0`).
115//!
116//! ## Embedded Systems
117//! The library is designed to be used both in `std` and `no_std` environments. The `no_std` environment is useful for
118//! embedded systems, where the standard library is not available. The `alloc` feature can be used to provide an
119//! alternative to the standard library's allocator, but it is possible to use the library without allocations at all.
120//! Due to some difference between Cpp and Rust, it is not trivial to provide such API, and the interface may feel
121//! more verbose. See the `memory::Storage` struct for stack allocations of Cpp objects, and the `examples/no_std`
122//! example.
123
124#![cfg_attr(not(feature = "std"), no_std)]
125
126#[cfg(not(feature = "std"))]
127extern crate core as std;
128
129#[doc(hidden)]
130pub mod __private {
131    #[cfg(feature = "std")]
132    pub mod alloc {
133        pub use std::boxed::Box;
134        pub use std::vec::Vec;
135    }
136    #[cfg(not(feature = "std"))]
137    pub mod alloc {
138        extern crate alloc;
139        pub use alloc::boxed::Box;
140        pub use alloc::vec::Vec;
141    }
142}
143
144#[allow(unused_imports)]
145use crate::__private::alloc;
146
147/// The version of the crate.
148pub const VERSION: &str = env!("CARGO_PKG_VERSION");
149
150/// The version of the ExecuTorch C++ library that this crate is compatible and linked with.
151pub const EXECUTORCH_CPP_VERSION: &str = executorch_sys::EXECUTORCH_CPP_VERSION;
152
153#[macro_use]
154mod private;
155pub mod data_loader;
156pub mod data_map;
157mod error;
158pub mod evalue;
159pub mod event_tracer;
160pub(crate) mod log;
161pub mod memory;
162#[cfg(feature = "module")]
163pub mod module;
164pub mod platform;
165pub mod program;
166pub mod scalar;
167pub mod tensor;
168pub mod util;
169
170pub use error::Error;
171pub(crate) use error::Result;
172
173#[cfg(feature = "ndarray")]
174pub use ndarray;
175
176#[cfg(feature = "half")]
177pub use half;
178
179#[cfg(feature = "num-complex")]
180pub use num_complex;
181
182#[cfg(test)]
183mod tests;