ultralytics_inference/lib.rs
1// Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
3#![allow(clippy::multiple_crate_versions)]
4#![cfg_attr(docsrs, feature(doc_cfg))]
5
6//! # Ultralytics YOLO Inference Library
7//!
8//! [](https://crates.io/crates/ultralytics-inference)
9//! [](https://docs.rs/ultralytics-inference)
10//! [](https://github.com/ultralytics/inference/blob/main/LICENSE)
11//!
12//! High-performance YOLO model inference library written in Rust, providing a safe
13//! and efficient interface for running [Ultralytics](https://ultralytics.com) YOLO
14//! models on images, videos, and streams.
15//!
16//! ## Features
17//!
18//! - **High Performance** - Pure Rust with zero-cost abstractions and SIMD-optimized preprocessing
19//! - **ONNX Runtime** - Leverages ONNX Runtime for cross-platform hardware acceleration
20//! - **Supported YOLO Versions** - `YOLO11` and `YOLO26` (including YOLO26 end-to-end NMS-free exports)
21//! - **All Tasks** - Detection, segmentation, pose estimation, classification, and OBB
22//! - **Ultralytics API** - Results API matches the Python package for easy migration
23//! - **Multiple Backends** - CPU, CUDA, `TensorRT`, `CoreML`, `OpenVINO`, and more
24//! - **Multiple Sources** - Images, directories, glob patterns, video, webcam, streams
25//!
26//! ## Installation
27//!
28//! Add to your `Cargo.toml`:
29//!
30#![doc = concat!("```toml\n[dependencies]\nultralytics-inference = \"", env!("CARGO_PKG_VERSION"), "\"\n```")]
31//!
32//! Or install the CLI tool:
33//!
34//! ```bash
35//! cargo install ultralytics-inference
36//! ```
37//!
38//! ## Quick Start (Library)
39//!
40//! ```no_run
41//! use ultralytics_inference::{YOLOModel, InferenceConfig};
42//!
43//! fn main() -> Result<(), Box<dyn std::error::Error>> {
44//! // Load model - metadata (classes, task, imgsz) is read automatically
45//! let mut model = YOLOModel::load("yolo26n.onnx")?;
46//!
47//! // Run inference
48//! let results = model.predict("image.jpg")?;
49//!
50//! // Process results
51//! for result in &results {
52//! if let Some(ref boxes) = result.boxes {
53//! println!("Found {} detections", boxes.len());
54//! for i in 0..boxes.len() {
55//! let cls = boxes.cls()[i] as usize;
56//! let conf = boxes.conf()[i];
57//! let name = result.names.get(&cls).map(|s| s.as_str()).unwrap_or("unknown");
58//! println!(" {} {:.2}", name, conf);
59//! }
60//! }
61//! }
62//!
63//! Ok(())
64//! }
65//! ```
66//!
67//! ## CLI Usage
68//!
69//! The `ultralytics-inference` CLI provides a command-line interface for running YOLO inference:
70//!
71//! ```bash
72//! # Install the CLI
73//! cargo install ultralytics-inference
74//!
75//! # Run with defaults (auto-downloads model and sample images)
76//! ultralytics-inference predict
77//!
78//! # Select task — auto-downloads the matching nano model
79//! ultralytics-inference predict --task segment
80//! ultralytics-inference predict --task pose
81//! ultralytics-inference predict --task obb
82//! ultralytics-inference predict --task classify
83//!
84//! # Run on a specific image
85//! ultralytics-inference predict --model yolo26n.onnx --source image.jpg
86//!
87//! # Run on a directory of images
88//! ultralytics-inference predict --model yolo26n.onnx --source images/
89//!
90//! # With custom thresholds
91//! ultralytics-inference predict -m yolo26n.onnx -s image.jpg --conf 0.5 --iou 0.7
92//!
93//! # Filter by class IDs
94//! ultralytics-inference predict --source image.jpg --classes "0,1,2"
95//!
96//! # With visualization window
97//! ultralytics-inference predict --model yolo26n.onnx --source video.mp4 --show
98//!
99//! # Save annotated results
100//! ultralytics-inference predict --model yolo26n.onnx --source image.jpg --save
101//!
102//! # Save individual frames for video input
103//! ultralytics-inference predict --source video.mp4 --save-frames
104//!
105//! # Show help
106//! ultralytics-inference help
107//!
108//! # Show version
109//! ultralytics-inference version
110//! ```
111//!
112//! **CLI Options:**
113//!
114//! | Option | Short | Description | Default |
115//! |--------|-------|-------------|---------|
116//! | `--model` | `-m` | Path to ONNX model file; auto-downloaded if a known YOLO11/YOLO26 name | `yolo26n.onnx` |
117//! | `--task` | | Task type (`detect`, `segment`, `pose`, `obb`, `classify`); selects nano model when `--model` is omitted | `detect` |
118//! | `--source` | `-s` | Input source (image, directory, glob, video, webcam index, or URL) | Task-dependent sample assets |
119//! | `--conf` | | Confidence threshold | `0.25` |
120//! | `--iou` | | `IoU` threshold for NMS | `0.7` |
121//! | `--max-det` | | Maximum number of detections | `300` |
122//! | `--imgsz` | | Inference image size | Model metadata |
123//! | `--rect` | | Enable rectangular inference (minimal padding) | `true` |
124//! | `--batch` | | Batch size for inference | `1` |
125//! | `--half` | | Use FP16 half-precision inference | `false` |
126//! | `--save` | | Save annotated results to runs/\<task\>/predict | `true` |
127//! | `--save-frames` | | Save individual frames for video input | `false` |
128//! | `--show` | | Display results in a window | `false` |
129//! | `--device` | | Device (cpu, cuda:0, mps, coreml, directml:0, openvino, tensorrt:0, xnnpack) | `cpu` |
130//! | `--verbose` | | Show verbose output | `true` |
131//! | `--classes` | | Filter by class IDs, e.g. `0` or `"0,1,2"` or `"[0, 1, 2]"` | all classes |
132//!
133//! ## Task-Specific Examples
134//!
135//! The library supports all YOLO tasks. Export models from Python:
136//!
137//! ```bash
138//! # Detection (default)
139//! yolo export model=yolo26n.pt format=onnx
140//!
141//! # Segmentation
142//! yolo export model=yolo26n-seg.pt format=onnx
143//!
144//! # Pose Estimation
145//! yolo export model=yolo26n-pose.pt format=onnx
146//!
147//! # Classification
148//! yolo export model=yolo26n-cls.pt format=onnx
149//!
150//! # Oriented Bounding Boxes
151//! yolo export model=yolo26n-obb.pt format=onnx
152//! ```
153//!
154//! The task is auto-detected from ONNX metadata:
155//!
156//! ```no_run
157//! use ultralytics_inference::YOLOModel;
158//!
159//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
160//! // Segmentation model - returns masks
161//! let mut model = YOLOModel::load("yolo26n-seg.onnx")?;
162//! let results = model.predict("image.jpg")?;
163//! if let Some(ref masks) = results[0].masks {
164//! println!("Found {} instance masks", masks.len());
165//! }
166//!
167//! // Pose model - returns keypoints
168//! let mut model = YOLOModel::load("yolo26n-pose.onnx")?;
169//! let results = model.predict("image.jpg")?;
170//! if let Some(ref keypoints) = results[0].keypoints {
171//! println!("Found {} poses", keypoints.len());
172//! }
173//!
174//! // Classification model - returns probabilities
175//! let mut model = YOLOModel::load("yolo26n-cls.onnx")?;
176//! let results = model.predict("image.jpg")?;
177//! if let Some(ref probs) = results[0].probs {
178//! println!("Top-1: class {} ({:.1}%)", probs.top1(), probs.top1conf() * 100.0);
179//! }
180//! # Ok(())
181//! # }
182//! ```
183//!
184//! ## Custom Configuration
185//!
186//! Use the builder pattern to customize inference settings:
187//!
188//! ```rust
189//! use ultralytics_inference::InferenceConfig;
190//!
191//! let config = InferenceConfig::new()
192//! .with_confidence(0.5) // Confidence threshold
193//! .with_iou(0.45) // NMS IoU threshold
194//! .with_max_det(300) // Max detections per image
195//! .with_imgsz(640, 640); // Input image size
196//! ```
197//!
198//! ## Hardware Acceleration
199//!
200//! Enable hardware acceleration with Cargo features:
201//!
202//! ```bash
203//! # NVIDIA CUDA
204//! cargo build --release --features cuda
205//!
206//! # NVIDIA TensorRT
207//! cargo build --release --features tensorrt
208//!
209//! # Apple CoreML
210//! cargo build --release --features coreml
211//!
212//! # Intel OpenVINO
213//! cargo build --release --features openvino
214//! ```
215//!
216//! ## Results API
217//!
218//! The [`Results`] struct provides access to inference outputs:
219//!
220//! - [`Boxes`] - Bounding boxes with `xyxy()`, `xywh()`, `xyxyn()`, `xywhn()`, `conf()`, `cls()` methods
221//! - [`Masks`] - Segmentation masks with `data`, `orig_shape` fields
222//! - [`Keypoints`] - Pose keypoints with `xy()`, `xyn()`, `conf()` methods
223//! - [`Probs`] - Classification probabilities with `top1()`, `top5()`, `top1conf()`, `top5conf()` methods
224//! - [`Obb`] - Oriented bounding boxes with `xyxyxyxy()`, `xywhr()`, `conf()`, `cls()` methods
225//!
226//! ## Module Overview
227//!
228//! | Module | Description |
229//! |--------|-------------|
230//! | [`model`] | Core [`YOLOModel`] for loading models and running inference |
231//! | [`results`] | Output types ([`Results`], [`Boxes`], [`Masks`], etc.) |
232//! | [`inference`] | [`InferenceConfig`] for customizing inference settings |
233//! | [`source`] | Input source handling ([`Source`], [`SourceIterator`]) |
234//! | [`task`] | YOLO task types ([`Task`]: Detect, Segment, Pose, etc.) |
235//! | [`mod@error`] | Error types ([`InferenceError`], [`Result`]) |
236//! | [`preprocessing`] | Image preprocessing utilities |
237//! | [`postprocessing`] | Detection post-processing (NMS, decode) |
238//! | [`metadata`] | ONNX model metadata parsing |
239//!
240//! ## Feature Flags
241//!
242//! | Feature | Description |
243//! |---------|-------------|
244//! | `annotate` | Image annotation support (default) |
245//! | `visualize` | Real-time window display (default) |
246//! | `video` | Video file support (`FFmpeg`) |
247//! | `cuda` | NVIDIA CUDA acceleration |
248//! | `tensorrt` | NVIDIA `TensorRT` optimization |
249//! | `coreml` | Apple `CoreML` (macOS/iOS) |
250//! | `openvino` | Intel `OpenVINO` |
251//!
252//! ## License
253//!
254//! This project is dual-licensed under [AGPL-3.0](https://github.com/ultralytics/inference/blob/main/LICENSE)
255//! for open-source use or [Ultralytics Enterprise License](https://ultralytics.com/license)
256//! for commercial applications.
257
258// Modules
259#[cfg(feature = "annotate")]
260pub mod annotate;
261pub mod batch;
262pub mod cli;
263pub mod device;
264pub mod download;
265pub mod error;
266pub mod inference;
267pub mod io;
268
269pub mod logging;
270pub mod metadata;
271pub mod model;
272pub mod postprocessing;
273pub mod preprocessing;
274pub mod results;
275pub mod source;
276pub mod task;
277pub mod utils;
278pub mod visualizer;
279
280// Re-export main types for convenience
281pub use device::Device;
282pub use error::{InferenceError, Result};
283pub use inference::InferenceConfig;
284pub use model::YOLOModel;
285pub use results::{Boxes, Keypoints, Masks, Obb, Probs, Results, Speed};
286pub use source::{Source, SourceIterator, SourceMeta};
287pub use task::Task;
288
289// Re-export metadata for advanced use
290pub use metadata::ModelMetadata;
291
292// Re-export preprocessing utilities
293pub use preprocessing::{PreprocessResult, preprocess_image, preprocess_image_with_precision};
294
295/// Library version.
296pub const VERSION: &str = env!("CARGO_PKG_VERSION");
297
298/// Library name.
299pub const NAME: &str = env!("CARGO_PKG_NAME");
300
301#[cfg(test)]
302mod tests {
303 use super::*;
304
305 #[test]
306 fn test_version() {
307 // Version should be semver format like "0.0.5"
308 assert!(VERSION.contains('.'));
309 }
310
311 #[test]
312 fn test_name() {
313 assert_eq!(NAME, "ultralytics-inference");
314 }
315}