1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
//! # nv-runtime
//!
//! Pipeline orchestration and runtime for the NextVision video perception library.
//!
//! ## Conceptual model
//!
//! The runtime manages **feeds** — independent video streams, each running on a
//! dedicated OS thread. For every frame, a linear pipeline of user-defined
//! **[stages](nv_perception::Stage)** produces structured perception output.
//!
//! ```text
//! Media source → FrameQueue → [Stage 1 → Stage 2 → …] → OutputSink
//! │ │
//! TemporalStore Broadcast channel
//! ViewState (optional subscribers)
//! ```
//!
//! The media backend (GStreamer, via `nv-media`) is an implementation
//! detail. Users interact with backend-agnostic types: [`SourceSpec`](nv_core::SourceSpec),
//! [`FeedConfig`], and [`OutputEnvelope`]. A custom backend can be injected via
//! [`RuntimeBuilder::ingress_factory`].
//!
//! ## Key types
//!
//! - **[`Runtime`]** — manages cross-feed concerns (thread pools, metrics, shutdown).
//! - **[`RuntimeBuilder`]** — builder for configuring and constructing a `Runtime`.
//! - **[`RuntimeHandle`]** — cloneable control handle (add/remove feeds, subscribe, shutdown).
//! - **[`FeedConfig`]** / **[`FeedConfigBuilder`]** — per-feed configuration.
//! - **[`FeedHandle`]** — handle to a running feed (metrics, diagnostics, queue telemetry, pause/resume).
//! - **[`QueueTelemetry`]** — source/sink queue depth and capacity snapshot.
//! - **[`OutputEnvelope`]** — structured, provenanced output for each processed frame.
//! - **[`OutputSink`]** — user-implementable trait for receiving outputs.
//! - **[`Provenance`]** — full audit trail of stage and view-system decisions.
//! - **[`BackpressurePolicy`]** — queue behavior configuration.
//! - **[`FeedDiagnostics`]** / **[`RuntimeDiagnostics`]** — consolidated diagnostic snapshots.
//!
//! ## PTZ / view-state handling
//!
//! Moving-camera feeds use `CameraMode::Observed` with a user-supplied
//! [`ViewStateProvider`](nv_view::ViewStateProvider). The runtime polls it
//! each frame, runs an [`EpochPolicy`](nv_view::EpochPolicy), and manages
//! view epochs, continuity degradation, and trajectory segmentation
//! automatically. Fixed cameras use `CameraMode::Fixed` and skip the
//! view-state machinery entirely.
//!
//! ## Out of scope
//!
//! The runtime does **not** include domain-specific event taxonomies,
//! alerting workflows, calibration semantics, or UI concerns. Those
//! belong in layers built on top of this library.
//!
//! ## Minimal usage
//!
//! ```rust,no_run
//! use nv_runtime::*;
//! use nv_core::*;
//!
//! # struct MyStage;
//! # impl nv_perception::Stage for MyStage {
//! # fn id(&self) -> StageId { StageId("my_stage") }
//! # fn process(&mut self, _ctx: &nv_perception::StageContext<'_>) -> Result<nv_perception::StageOutput, StageError> {
//! # Ok(nv_perception::StageOutput::empty())
//! # }
//! # }
//! struct MySink;
//! impl OutputSink for MySink {
//! fn emit(&self, _output: SharedOutput) {}
//! }
//!
//! # fn example() -> Result<(), NvError> {
//! let runtime = Runtime::builder().build()?;
//! let _feed = runtime.add_feed(
//! FeedConfig::builder()
//! .source(SourceSpec::rtsp("rtsp://cam/stream"))
//! .camera_mode(CameraMode::Fixed)
//! .stages(vec![Box::new(MyStage)])
//! .output_sink(Box::new(MySink))
//! .build()?
//! )?;
//! // runtime.shutdown();
//! # Ok(())
//! # }
//! ```
//!
//! ## Batch inference across feeds
//!
//! Multiple feeds can share a single GPU-accelerated batch processor via
//! [`BatchHandle`]. Create a batch handle once, then reference it from
//! each feed's pipeline.
//!
//! ```rust,no_run
//! use nv_runtime::*;
//! use nv_core::*;
//! use nv_perception::batch::{BatchProcessor, BatchEntry};
//! use std::time::Duration;
//!
//! # struct MyDetector;
//! # impl BatchProcessor for MyDetector {
//! # fn id(&self) -> StageId { StageId("detector") }
//! # fn process(&mut self, items: &mut [BatchEntry]) -> Result<(), StageError> {
//! # for item in items.iter_mut() { item.output = Some(nv_perception::StageOutput::empty()); }
//! # Ok(())
//! # }
//! # }
//! # struct MySink;
//! # impl OutputSink for MySink { fn emit(&self, _: SharedOutput) {} }
//!
//! # fn example() -> Result<(), NvError> {
//! let runtime = Runtime::builder().build()?;
//!
//! // Create a shared batch coordinator.
//! let batch = runtime.create_batch(
//! Box::new(MyDetector),
//! BatchConfig {
//! max_batch_size: 8,
//! max_latency: Duration::from_millis(50),
//! queue_capacity: None,
//! response_timeout: None,
//! max_in_flight_per_feed: 1,
//! startup_timeout: None,
//! },
//! )?;
//!
//! // Build per-feed pipelines referencing the shared batch.
//! let pipeline = FeedPipeline::builder()
//! .batch(batch.clone()).expect("single batch point")
//! .build();
//!
//! let _feed = runtime.add_feed(
//! FeedConfig::builder()
//! .source(SourceSpec::rtsp("rtsp://cam/stream"))
//! .camera_mode(CameraMode::Fixed)
//! .feed_pipeline(pipeline)
//! .output_sink(Box::new(MySink))
//! .build()?
//! )?;
//! # Ok(())
//! # }
//! ```
pub
pub
pub
// Re-export key types at crate root.
pub use BackpressurePolicy;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
// Re-export validation types from nv-perception for convenience.
pub use ;
// Re-export decode types from nv-media for convenience.
pub use ;
// Re-export post-decode hook types from nv-media for convenience.
pub use ;
// Re-export device residency type from nv-media for convenience.
//
// To enable the built-in CUDA-resident frame pipeline, activate the `cuda`
// cargo feature on nv-runtime (which forwards to `nv-media/cuda`):
//
// nv-runtime = { version = "0.1", features = ["cuda"] }
//
// Then set `FeedConfig::device_residency(DeviceResidency::Cuda)`. Without
// the feature, requesting `DeviceResidency::Cuda` returns
// `MediaError::Unsupported` at pipeline build time.
//
// For platform-specific providers (e.g., Jetson NVMM), use
// `DeviceResidency::Provider(provider)` — this path does **not** require
// the `cuda` feature.
pub use DeviceResidency;
// Re-export GPU pipeline provider types for application-level wiring.
pub use ;
// Re-export health types from nv-core for convenience.
pub use ;
// Re-export security policy types from nv-core for convenience.
pub use ;