1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
use cpp::cpp;

/// Holds properties for configuring a builder to produce an engine.
///
/// [TensorRT documentation](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_builder_config.html)
pub struct BuilderConfig(*mut std::ffi::c_void);

/// Implements [`Send`] for [`BuilderConfig`].
///
/// # Safety
///
/// The TensorRT API is thread-safe with regards to all operations on [`BuilderConfig`].
unsafe impl Send for BuilderConfig {}

/// Implements [`Sync`] for [`BuilderConfig`].
///
/// # Safety
///
/// The TensorRT API is thread-safe with regards to all operations on [`BuilderConfig`].
unsafe impl Sync for BuilderConfig {}

impl BuilderConfig {
    /// Wrap internal pointer as [`BuilderConfig`].
    ///
    /// # Safety
    ///
    /// The pointer must point to a valid `IBuilderConfig` object.
    pub(crate) fn wrap(internal: *mut std::ffi::c_void) -> Self {
        Self(internal)
    }

    /// Set the maximum workspace size.
    ///
    /// [TensorRT documentation](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_builder_config.html#a8209999988ab480c60c8a905dfd2654d)
    ///
    /// # Arguments
    ///
    /// * `size` - The maximum GPU temporary memory which the engine can use at execution time in
    ///   bytes.
    pub fn with_max_workspace_size(mut self, size: usize) -> Self {
        let internal = self.as_mut_ptr();
        cpp!(unsafe [
            internal as "void*",
            size as "std::size_t"
        ] {
            ((IBuilderConfig*) internal)->setMemoryPoolLimit(MemoryPoolType::kWORKSPACE, size);
        });
        self
    }

    /// Set the `kSTRICT_TYPES` flag.
    ///
    /// [TensorRT documentation for `setFlag`](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_builder_config.html#ac9821504ae7a11769e48b0e62761837e)
    /// [TensorRT documentation for `kSTRICT_TYPES`](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/namespacenvinfer1.html#abdc74c40fe7a0c3d05d2caeccfbc29c1ad3ff8ff39475957d8676c2cda337add7)
    pub fn with_strict_types(mut self) -> Self {
        let internal = self.as_mut_ptr();
        cpp!(unsafe [
            internal as "void*"
        ] {
            ((IBuilderConfig*) internal)->setFlag(BuilderFlag::kSTRICT_TYPES);
        });
        self
    }

    /// Set the `kFP16` flag.
    ///
    /// [TensorRT documentation for `setFlag`](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/classnvinfer1_1_1_i_builder_config.html#ac9821504ae7a11769e48b0e62761837e)
    /// [TensorRT documentation for `kFP16`](https://docs.nvidia.com/deeplearning/tensorrt/api/c_api/namespacenvinfer1.html#abdc74c40fe7a0c3d05d2caeccfbc29c1a56e4ef5e47a48568bd24c4e0aaabcead)
    pub fn with_fp16(mut self) -> Self {
        let internal = self.as_mut_ptr();
        cpp!(unsafe [
            internal as "void*"
        ] {
            ((IBuilderConfig*) internal)->setFlag(BuilderFlag::kFP16);
        });
        self
    }

    /// Get internal readonly pointer.
    #[inline(always)]
    pub fn as_ptr(&self) -> *const std::ffi::c_void {
        let BuilderConfig(internal) = *self;
        internal
    }

    /// Get internal mutable pointer.
    #[inline(always)]
    pub fn as_mut_ptr(&mut self) -> *mut std::ffi::c_void {
        let BuilderConfig(internal) = *self;
        internal
    }
}

impl Drop for BuilderConfig {
    fn drop(&mut self) {
        let internal = self.as_mut_ptr();
        cpp!(unsafe [
            internal as "void*"
        ] {
            destroy((IBuilderConfig*) internal);
        });
    }
}