async_tensorrt/
runtime.rs

1use async_cuda::runtime::Future;
2
3use crate::engine::Engine;
4use crate::ffi::memory::HostBuffer;
5use crate::ffi::sync::runtime::Runtime as InnerRuntime;
6
7type Result<T> = std::result::Result<T, crate::error::Error>;
8
9/// Allows a serialized engine to be serialized.
10///
11/// [TensorRT documentation](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_runtime.html)
12pub struct Runtime {
13    inner: InnerRuntime,
14}
15
16impl Runtime {
17    /// Create a new [`Runtime`].
18    pub async fn new() -> Self {
19        let inner = Future::new(InnerRuntime::new).await;
20        Self { inner }
21    }
22
23    /// Deserialize engine from a plan (a [`HostBuffer`]).
24    ///
25    /// [TensorRT documentation](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_runtime.html#ad0dc765e77cab99bfad901e47216a767)
26    ///
27    /// # Arguments
28    ///
29    /// * `plan` - Plan to deserialize from.
30    pub async fn deserialize_engine_from_plan(self, plan: &HostBuffer) -> Result<Engine> {
31        Future::new(move || {
32            self.inner
33                .deserialize_engine_from_plan(plan)
34                .map(Engine::from_inner)
35        })
36        .await
37    }
38
39    /// Deserialize engine from a slice buffer.
40    ///
41    /// [TensorRT documentation](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_runtime.html#ad0dc765e77cab99bfad901e47216a767)
42    ///
43    /// # Arguments
44    ///
45    /// * `buffer` - Buffer slice to read from.
46    pub async fn deserialize_engine(self, buffer: &[u8]) -> Result<Engine> {
47        Future::new(move || {
48            self.inner
49                .deserialize_engine(buffer)
50                .map(Engine::from_inner)
51        })
52        .await
53    }
54}