deepviewrt_sys/
ffi.rs

1/* automatically generated by rust-bindgen 0.64.0 */
2
3pub type __off_t = ::std::os::raw::c_long;
4pub type __off64_t = ::std::os::raw::c_long;
5pub type FILE = _IO_FILE;
6#[repr(C)]
7#[derive(Debug, Copy, Clone)]
8pub struct _IO_marker {
9    _unused: [u8; 0],
10}
11#[repr(C)]
12#[derive(Debug, Copy, Clone)]
13pub struct _IO_codecvt {
14    _unused: [u8; 0],
15}
16#[repr(C)]
17#[derive(Debug, Copy, Clone)]
18pub struct _IO_wide_data {
19    _unused: [u8; 0],
20}
21pub type _IO_lock_t = ::std::os::raw::c_void;
22#[repr(C)]
23#[derive(Debug, Copy, Clone)]
24pub struct _IO_FILE {
25    pub _flags: ::std::os::raw::c_int,
26    pub _IO_read_ptr: *mut ::std::os::raw::c_char,
27    pub _IO_read_end: *mut ::std::os::raw::c_char,
28    pub _IO_read_base: *mut ::std::os::raw::c_char,
29    pub _IO_write_base: *mut ::std::os::raw::c_char,
30    pub _IO_write_ptr: *mut ::std::os::raw::c_char,
31    pub _IO_write_end: *mut ::std::os::raw::c_char,
32    pub _IO_buf_base: *mut ::std::os::raw::c_char,
33    pub _IO_buf_end: *mut ::std::os::raw::c_char,
34    pub _IO_save_base: *mut ::std::os::raw::c_char,
35    pub _IO_backup_base: *mut ::std::os::raw::c_char,
36    pub _IO_save_end: *mut ::std::os::raw::c_char,
37    pub _markers: *mut _IO_marker,
38    pub _chain: *mut _IO_FILE,
39    pub _fileno: ::std::os::raw::c_int,
40    pub _flags2: ::std::os::raw::c_int,
41    pub _old_offset: __off_t,
42    pub _cur_column: ::std::os::raw::c_ushort,
43    pub _vtable_offset: ::std::os::raw::c_schar,
44    pub _shortbuf: [::std::os::raw::c_char; 1usize],
45    pub _lock: *mut _IO_lock_t,
46    pub _offset: __off64_t,
47    pub _codecvt: *mut _IO_codecvt,
48    pub _wide_data: *mut _IO_wide_data,
49    pub _freeres_list: *mut _IO_FILE,
50    pub _freeres_buf: *mut ::std::os::raw::c_void,
51    pub __pad5: usize,
52    pub _mode: ::std::os::raw::c_int,
53    pub _unused2: [::std::os::raw::c_char; 20usize],
54}
55#[test]
56fn bindgen_test_layout__IO_FILE() {
57    const UNINIT: ::std::mem::MaybeUninit<_IO_FILE> = ::std::mem::MaybeUninit::uninit();
58    let ptr = UNINIT.as_ptr();
59    assert_eq!(
60        ::std::mem::size_of::<_IO_FILE>(),
61        216usize,
62        concat!("Size of: ", stringify!(_IO_FILE))
63    );
64    assert_eq!(
65        ::std::mem::align_of::<_IO_FILE>(),
66        8usize,
67        concat!("Alignment of ", stringify!(_IO_FILE))
68    );
69    assert_eq!(
70        unsafe { ::std::ptr::addr_of!((*ptr)._flags) as usize - ptr as usize },
71        0usize,
72        concat!(
73            "Offset of field: ",
74            stringify!(_IO_FILE),
75            "::",
76            stringify!(_flags)
77        )
78    );
79    assert_eq!(
80        unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_ptr) as usize - ptr as usize },
81        8usize,
82        concat!(
83            "Offset of field: ",
84            stringify!(_IO_FILE),
85            "::",
86            stringify!(_IO_read_ptr)
87        )
88    );
89    assert_eq!(
90        unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_end) as usize - ptr as usize },
91        16usize,
92        concat!(
93            "Offset of field: ",
94            stringify!(_IO_FILE),
95            "::",
96            stringify!(_IO_read_end)
97        )
98    );
99    assert_eq!(
100        unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_base) as usize - ptr as usize },
101        24usize,
102        concat!(
103            "Offset of field: ",
104            stringify!(_IO_FILE),
105            "::",
106            stringify!(_IO_read_base)
107        )
108    );
109    assert_eq!(
110        unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_base) as usize - ptr as usize },
111        32usize,
112        concat!(
113            "Offset of field: ",
114            stringify!(_IO_FILE),
115            "::",
116            stringify!(_IO_write_base)
117        )
118    );
119    assert_eq!(
120        unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_ptr) as usize - ptr as usize },
121        40usize,
122        concat!(
123            "Offset of field: ",
124            stringify!(_IO_FILE),
125            "::",
126            stringify!(_IO_write_ptr)
127        )
128    );
129    assert_eq!(
130        unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_end) as usize - ptr as usize },
131        48usize,
132        concat!(
133            "Offset of field: ",
134            stringify!(_IO_FILE),
135            "::",
136            stringify!(_IO_write_end)
137        )
138    );
139    assert_eq!(
140        unsafe { ::std::ptr::addr_of!((*ptr)._IO_buf_base) as usize - ptr as usize },
141        56usize,
142        concat!(
143            "Offset of field: ",
144            stringify!(_IO_FILE),
145            "::",
146            stringify!(_IO_buf_base)
147        )
148    );
149    assert_eq!(
150        unsafe { ::std::ptr::addr_of!((*ptr)._IO_buf_end) as usize - ptr as usize },
151        64usize,
152        concat!(
153            "Offset of field: ",
154            stringify!(_IO_FILE),
155            "::",
156            stringify!(_IO_buf_end)
157        )
158    );
159    assert_eq!(
160        unsafe { ::std::ptr::addr_of!((*ptr)._IO_save_base) as usize - ptr as usize },
161        72usize,
162        concat!(
163            "Offset of field: ",
164            stringify!(_IO_FILE),
165            "::",
166            stringify!(_IO_save_base)
167        )
168    );
169    assert_eq!(
170        unsafe { ::std::ptr::addr_of!((*ptr)._IO_backup_base) as usize - ptr as usize },
171        80usize,
172        concat!(
173            "Offset of field: ",
174            stringify!(_IO_FILE),
175            "::",
176            stringify!(_IO_backup_base)
177        )
178    );
179    assert_eq!(
180        unsafe { ::std::ptr::addr_of!((*ptr)._IO_save_end) as usize - ptr as usize },
181        88usize,
182        concat!(
183            "Offset of field: ",
184            stringify!(_IO_FILE),
185            "::",
186            stringify!(_IO_save_end)
187        )
188    );
189    assert_eq!(
190        unsafe { ::std::ptr::addr_of!((*ptr)._markers) as usize - ptr as usize },
191        96usize,
192        concat!(
193            "Offset of field: ",
194            stringify!(_IO_FILE),
195            "::",
196            stringify!(_markers)
197        )
198    );
199    assert_eq!(
200        unsafe { ::std::ptr::addr_of!((*ptr)._chain) as usize - ptr as usize },
201        104usize,
202        concat!(
203            "Offset of field: ",
204            stringify!(_IO_FILE),
205            "::",
206            stringify!(_chain)
207        )
208    );
209    assert_eq!(
210        unsafe { ::std::ptr::addr_of!((*ptr)._fileno) as usize - ptr as usize },
211        112usize,
212        concat!(
213            "Offset of field: ",
214            stringify!(_IO_FILE),
215            "::",
216            stringify!(_fileno)
217        )
218    );
219    assert_eq!(
220        unsafe { ::std::ptr::addr_of!((*ptr)._flags2) as usize - ptr as usize },
221        116usize,
222        concat!(
223            "Offset of field: ",
224            stringify!(_IO_FILE),
225            "::",
226            stringify!(_flags2)
227        )
228    );
229    assert_eq!(
230        unsafe { ::std::ptr::addr_of!((*ptr)._old_offset) as usize - ptr as usize },
231        120usize,
232        concat!(
233            "Offset of field: ",
234            stringify!(_IO_FILE),
235            "::",
236            stringify!(_old_offset)
237        )
238    );
239    assert_eq!(
240        unsafe { ::std::ptr::addr_of!((*ptr)._cur_column) as usize - ptr as usize },
241        128usize,
242        concat!(
243            "Offset of field: ",
244            stringify!(_IO_FILE),
245            "::",
246            stringify!(_cur_column)
247        )
248    );
249    assert_eq!(
250        unsafe { ::std::ptr::addr_of!((*ptr)._vtable_offset) as usize - ptr as usize },
251        130usize,
252        concat!(
253            "Offset of field: ",
254            stringify!(_IO_FILE),
255            "::",
256            stringify!(_vtable_offset)
257        )
258    );
259    assert_eq!(
260        unsafe { ::std::ptr::addr_of!((*ptr)._shortbuf) as usize - ptr as usize },
261        131usize,
262        concat!(
263            "Offset of field: ",
264            stringify!(_IO_FILE),
265            "::",
266            stringify!(_shortbuf)
267        )
268    );
269    assert_eq!(
270        unsafe { ::std::ptr::addr_of!((*ptr)._lock) as usize - ptr as usize },
271        136usize,
272        concat!(
273            "Offset of field: ",
274            stringify!(_IO_FILE),
275            "::",
276            stringify!(_lock)
277        )
278    );
279    assert_eq!(
280        unsafe { ::std::ptr::addr_of!((*ptr)._offset) as usize - ptr as usize },
281        144usize,
282        concat!(
283            "Offset of field: ",
284            stringify!(_IO_FILE),
285            "::",
286            stringify!(_offset)
287        )
288    );
289    assert_eq!(
290        unsafe { ::std::ptr::addr_of!((*ptr)._codecvt) as usize - ptr as usize },
291        152usize,
292        concat!(
293            "Offset of field: ",
294            stringify!(_IO_FILE),
295            "::",
296            stringify!(_codecvt)
297        )
298    );
299    assert_eq!(
300        unsafe { ::std::ptr::addr_of!((*ptr)._wide_data) as usize - ptr as usize },
301        160usize,
302        concat!(
303            "Offset of field: ",
304            stringify!(_IO_FILE),
305            "::",
306            stringify!(_wide_data)
307        )
308    );
309    assert_eq!(
310        unsafe { ::std::ptr::addr_of!((*ptr)._freeres_list) as usize - ptr as usize },
311        168usize,
312        concat!(
313            "Offset of field: ",
314            stringify!(_IO_FILE),
315            "::",
316            stringify!(_freeres_list)
317        )
318    );
319    assert_eq!(
320        unsafe { ::std::ptr::addr_of!((*ptr)._freeres_buf) as usize - ptr as usize },
321        176usize,
322        concat!(
323            "Offset of field: ",
324            stringify!(_IO_FILE),
325            "::",
326            stringify!(_freeres_buf)
327        )
328    );
329    assert_eq!(
330        unsafe { ::std::ptr::addr_of!((*ptr).__pad5) as usize - ptr as usize },
331        184usize,
332        concat!(
333            "Offset of field: ",
334            stringify!(_IO_FILE),
335            "::",
336            stringify!(__pad5)
337        )
338    );
339    assert_eq!(
340        unsafe { ::std::ptr::addr_of!((*ptr)._mode) as usize - ptr as usize },
341        192usize,
342        concat!(
343            "Offset of field: ",
344            stringify!(_IO_FILE),
345            "::",
346            stringify!(_mode)
347        )
348    );
349    assert_eq!(
350        unsafe { ::std::ptr::addr_of!((*ptr)._unused2) as usize - ptr as usize },
351        196usize,
352        concat!(
353            "Offset of field: ",
354            stringify!(_IO_FILE),
355            "::",
356            stringify!(_unused2)
357        )
358    );
359}
360#[doc = " Successfull operation, no error."]
361pub const NNError_NN_SUCCESS: NNError = 0;
362#[doc = " Internal error without a specific error code, catch-all error."]
363pub const NNError_NN_ERROR_INTERNAL: NNError = 1;
364#[doc = " The provided handle is invalid.  This error is typically used by NNEngine\n when interfacing with another API such as OpenCL or OpenVX which require\n native handles for their internal API."]
365pub const NNError_NN_ERROR_INVALID_HANDLE: NNError = 2;
366#[doc = " Out of memory error, returned if a call to malloc returns NULL or similar\n error from an underlying engine plugin."]
367pub const NNError_NN_ERROR_OUT_OF_MEMORY: NNError = 3;
368#[doc = " Out of resources errors are similar to out of memory though sometimes\n treated separately by underlying engine plugins."]
369pub const NNError_NN_ERROR_OUT_OF_RESOURCES: NNError = 4;
370#[doc = " Signals an API has not been implemented.  Can be caught by the core\n DeepViewRT library when interfacing with engine plugins to gracefully\n fallback to the native implementation."]
371pub const NNError_NN_ERROR_NOT_IMPLEMENTED: NNError = 5;
372#[doc = " A required parameter was missing or NULL or simply invalid."]
373pub const NNError_NN_ERROR_INVALID_PARAMETER: NNError = 6;
374#[doc = " When attempting to run an operation where the input/output tensors are\n of different types and the operation does not support automatic type\n conversions."]
375pub const NNError_NN_ERROR_TYPE_MISMATCH: NNError = 7;
376#[doc = " When attempting to run an operation and the input/output tensors have\n invalid or unsupported shape combinations.  Some operations require the\n shapes to be the same while others, such as arithmetic broadcasting\n operations, will support various shape combinations but if the provided\n pairs are invalid then the shape mismatch is returned."]
377pub const NNError_NN_ERROR_SHAPE_MISMATCH: NNError = 8;
378#[doc = " The tensor's shape is invalid for the given operation.  It differs from\n the shape mismatch in that the shape is invalid on its own and not\n relative to another related tensor.  An example would be a shape with\n more than one -1 dimension."]
379pub const NNError_NN_ERROR_INVALID_SHAPE: NNError = 9;
380#[doc = " The requested ordering was invalid."]
381pub const NNError_NN_ERROR_INVALID_ORDER: NNError = 10;
382#[doc = " The requested axis for an operation was invalid or unsupported."]
383pub const NNError_NN_ERROR_INVALID_AXIS: NNError = 11;
384#[doc = " A required resource was missing or the reference invalid."]
385pub const NNError_NN_ERROR_MISSING_RESOURCE: NNError = 12;
386#[doc = " The requested engine is invalid."]
387pub const NNError_NN_ERROR_INVALID_ENGINE: NNError = 13;
388#[doc = " The tensor has no data or the data is not currently accessible.  An\n example of the latter would be attempting to call @ref nn_tensor_maprw\n while the tensor was already mapped read-only or write-only."]
389pub const NNError_NN_ERROR_TENSOR_NO_DATA: NNError = 14;
390#[doc = " The internal kernel or subroutine required to complete an operation using\n the engine plugin was missing.  An example would be OpenCL or OpenVX\n operation where the kernel implementation cannot be located."]
391pub const NNError_NN_ERROR_KERNEL_MISSING: NNError = 15;
392#[doc = " The operation does not support the tensor's type."]
393pub const NNError_NN_ERROR_TENSOR_TYPE_UNSUPPORTED: NNError = 16;
394#[doc = " For operations which can operate on an array of inputs, the provided list\n of inputs was too large."]
395pub const NNError_NN_ERROR_TOO_MANY_INPUTS: NNError = 17;
396#[doc = " A system error occured when interfacing with an operating system\n function.  On some systems errno might be updated with the underlying\n error code."]
397pub const NNError_NN_ERROR_SYSTEM_ERROR: NNError = 18;
398#[doc = " When working with a model a reference was made to a layer which did not\n exist."]
399pub const NNError_NN_ERROR_INVALID_LAYER: NNError = 19;
400#[doc = " The model is invalid or corrupted."]
401pub const NNError_NN_ERROR_MODEL_INVALID: NNError = 20;
402#[doc = " An operation referenced a model but the model was not provided."]
403pub const NNError_NN_ERROR_MODEL_MISSING: NNError = 21;
404#[doc = " The string was too large."]
405pub const NNError_NN_ERROR_STRING_TOO_LARGE: NNError = 22;
406#[doc = " The quantization parameters are invalid."]
407pub const NNError_NN_ERROR_INVALID_QUANT: NNError = 23;
408#[doc = " Failed to generate graph representation of model."]
409pub const NNError_NN_ERROR_MODEL_GRAPH_FAILED: NNError = 24;
410#[doc = " Failed to verify graph generateed from model."]
411pub const NNError_NN_ERROR_GRAPH_VERIFY_FAILED: NNError = 25;
412#[doc = " Enumeration of all errors provided by DeepViewRT.  Most functions will\n return an NNError with NN_SUCCESS being zero. A common usage pattern for\n client code is to check for err using `if (err) ...` as any error condition\n will return non-zero."]
413pub type NNError = ::std::os::raw::c_uint;
414#[doc = " Raw byte-stream tensor, useful for encoded tensors such as PNG images.\n The size of this tensor would be in bytes."]
415pub const NNTensorType_NNTensorType_RAW: NNTensorType = 0;
416#[doc = " String tensor data, a single dimension would hold one null-terminated\n string of variable length.  A standard C char* array."]
417pub const NNTensorType_NNTensorType_STR: NNTensorType = 1;
418#[doc = " Signed 8-bit integer tensor data internally @ref int8_t"]
419pub const NNTensorType_NNTensorType_I8: NNTensorType = 2;
420#[doc = " Unsigned 8-bit integer tensor data internally @ref uint8_t"]
421pub const NNTensorType_NNTensorType_U8: NNTensorType = 3;
422#[doc = " Signed 16-bit integer tensor data internally @ref int16_t"]
423pub const NNTensorType_NNTensorType_I16: NNTensorType = 4;
424#[doc = " Unsigned 16-bit integer tensor data internally @ref uint16_t"]
425pub const NNTensorType_NNTensorType_U16: NNTensorType = 5;
426#[doc = " Signed 16-bit integer tensor data internally @ref int32_t"]
427pub const NNTensorType_NNTensorType_I32: NNTensorType = 6;
428#[doc = " Unsigned 16-bit integer tensor data internally @ref uint32_t"]
429pub const NNTensorType_NNTensorType_U32: NNTensorType = 7;
430#[doc = " Signed 16-bit integer tensor data internally @ref int64_t"]
431pub const NNTensorType_NNTensorType_I64: NNTensorType = 8;
432#[doc = " Unsigned 16-bit integer tensor data internally @ref uint64_t"]
433pub const NNTensorType_NNTensorType_U64: NNTensorType = 9;
434#[doc = " Half precision (16-bit) floating point tensor data."]
435pub const NNTensorType_NNTensorType_F16: NNTensorType = 10;
436#[doc = " Single precision (32-bit) floating point tensor data."]
437pub const NNTensorType_NNTensorType_F32: NNTensorType = 11;
438#[doc = " Double precision (64-bit) floating point tensor data."]
439pub const NNTensorType_NNTensorType_F64: NNTensorType = 12;
440#[doc = " @enum NNTensorType\n Enumeration of the data types supported by NNTensors in DeepViewRT."]
441pub type NNTensorType = ::std::os::raw::c_uint;
442#[doc = " No quantization for tensor."]
443pub const NNQuantizationType_NNQuantizationType_None: NNQuantizationType = 0;
444#[doc = " Affine quantization with parameters applied globally across the tensor.\n\n The scale term is queried from @ref nn_tensor_scales() while the zero\n term is queried from @ref nn_tensor_zeros().\n\n Quantization: \\f$ f(x) = \\frac{x}{scale} + zero \\f$\n\n Dequantization: \\f$ f(x) = (x - zero) * scale \\f$"]
445pub const NNQuantizationType_NNQuantizationType_Affine_PerTensor: NNQuantizationType = 1;
446#[doc = " Affine quantization with separate parameters applied to each channel.\n Also known as per-axis where the axis is always the channel \"C\" axis in\n a NCHW, NHWC, and so-on shaped tensor.\n\n Same equation as @ref NNQuantization_Affine_PerTensor but applied\n per-channel.  The scale and zero_point are vectors of channel length."]
447pub const NNQuantizationType_NNQuantizationType_Affine_PerChannel: NNQuantizationType = 2;
448#[doc = " Quantized using Dynamic Fixed Point."]
449pub const NNQuantizationType_NNQuantizationType_DFP: NNQuantizationType = 3;
450#[doc = " Enumeration of all quantization type provided by DeepViewRT."]
451pub type NNQuantizationType = ::std::os::raw::c_uint;
452#[doc = " DeepViewRT library initialization options."]
453pub type NNOptions = isize;
454#[repr(C)]
455#[derive(Debug, Copy, Clone)]
456pub struct nn_engine {
457    _unused: [u8; 0],
458}
459#[doc = " @struct NNEngine\n\n Engine structure provides the means to implement custom tensor and kernel\n implementations which implement the DeepViewRT inference backend. As an\n example the OpenCL backend is provided as a plugin which exposes an NNEngine\n which maps NNTensors to cl_mem objects and kernels as OpenCL kernels."]
460pub type NNEngine = nn_engine;
461#[repr(C)]
462#[derive(Debug, Copy, Clone)]
463pub struct nn_tensor {
464    _unused: [u8; 0],
465}
466#[doc = " @struct NNTensor\n\n Tensors are represented by the @ref NNTensor class.  The dimensions are\n variable and can be from 1 to 4 dimensions.  Internally the shape of a\n 1-dimensional tensor would be [N 1 1 1] and a scalar [1 1 1 1].\n\n Tensors can exist locally on the CPU or when initialized using an\n @ref NNEngine object the tensors can be mapped to a buffer on a compute\n device such as a GPU or NPU using the DeepViewRT OpenCL or OpenVX engine\n plugins."]
467pub type NNTensor = nn_tensor;
468#[repr(C)]
469#[derive(Debug, Copy, Clone)]
470pub struct nn_quant_param {
471    _unused: [u8; 0],
472}
473#[doc = " @struct NNQuantParam\n\n Tensor quantization parameter structure."]
474pub type NNQuantParam = nn_quant_param;
475#[doc = " @struct NNModel\n\n DeepViewRT Models \"RTM\" are reprensted in memory through the NNModel type\n which is meant to point to a static model blob.  This can point directly to\n the memory of the RTM either loaded into memory, accessed through a memmap\n or pointed directly to the flash location.  In other words if the RTM is\n saved into flash which is connected to the memory space then the model does\n not need to be copied into RAM before being loaded.\n\n Models are loaded into an @ref NNContext which handles the dynamic data\n structures required for operation of the model."]
476pub type NNModel = ::std::os::raw::c_void;
477#[doc = " @struct NNModelResource\n\n DeepViewRT Models may have resources embedded into them and this datatype is\n their handle."]
478pub type NNModelResource = ::std::os::raw::c_void;
479#[doc = " @struct NNModelParameter\n\n DeepViewRT Models use parameters to store various configuration information\n such as layer parameters."]
480pub type NNModelParameter = ::std::os::raw::c_void;
481#[repr(C)]
482#[derive(Debug, Copy, Clone)]
483pub struct nn_context {
484    _unused: [u8; 0],
485}
486#[doc = " @struct NNContext\n\n DeepViewRT models can be loaded with an NNContext and numerous contexts can\n be loaded at once.  The context manages the runtime portion of the model\n including the tensors required to hold intermediate buffers.\n\n A context itself requires @ref NN_CONTEXT_SIZEOF bytes though it will also\n allocate on the heap additional tensor handles required to support models on\n @ref nn_context_model_load() and these will then be released on a call to\n @ref nn_context_model_unload().\n\n When a context is created an @ref NNEngine plugin may optionally be provided\n which will take over the management of tensors through the engine plugin and\n attempting to run models and operators on the compute device enabled by this\n engine plugin.  If an engine is not provided DeepViewRT will use the default\n implementation which is optimized for CPU and MCU devices."]
487pub type NNContext = nn_context;
488extern "C" {
489    #[doc = " DeepViewRT library version as \"MAJOR.MINOR.PATCH\".\n\n @return library version string\n\n @since 2.0"]
490    pub fn nn_version() -> *const ::std::os::raw::c_char;
491}
492extern "C" {
493    #[doc = " Returns the string associated with a given error.\n\n @see NNError\n\n @param error The NNError to be represented as a string.\n\n @return The string representation when the error is valid.\n @return NULL when the error is not valid.\n\n @since 2.0"]
494    pub fn nn_strerror(error: NNError) -> *const ::std::os::raw::c_char;
495}
496extern "C" {
497    #[doc = " Initializes the library with optional parameters.  This function _MUST_ be\n called before any others (though nn_version and nn_strerror are safe) and\n _MUST_ not be called again unless care is taken to protect this call.\n\n @note As of DeepViewRT 2.4.32 this function does not do anything except on\n RaspberryPi platforms.  This could change in the future so it is safer to\n call the function for future compatibility.\n\n @return NN_SUCCESS after successfully initializing the library.\n @return NN_ERROR_INTERNAL if the library fails to initialize.\n\n @since 2.4"]
498    pub fn nn_init(options: *const NNOptions) -> NNError;
499}
500extern "C" {
501    #[doc = " The actual size of the NNEngine structure.  This will differ from the size\n defined by @ref NN_ENGINE_SIZEOF as the later is padded for future API\n extensions while this function returns the actual size currently required.\n\n @return NNEngine structure size as reported by @ref sizeof().\n\n @public @memberof NNEngine\n @since 2.0"]
502    pub fn nn_engine_sizeof() -> usize;
503}
504extern "C" {
505    #[doc = " Initializes the NNEngine structure using the provided memory or allocating a\n new buffer is none was provided.\n\n When providing memory it must be at least the size returned by\n @ref nn_engine_sizeof() and for statically initiallized arrays the\n @ref NN_ENGINE_SIZEOF can be used instead which is padded for future API\n extensions.\n\n @note previous to version 2.4.32 the memory parameter is required otherwise\n NULL will always be returned and no engine structure is created.\n\n @param memory Pointer to the start of where a NNEngine object should be\n initialized.\n\n @return Pointer to the initialized NNEngine structure.\n @return NULL if memory was NULL and malloc using @ref nn_engine_size()\n  returns NULL.\n\n @public @memberof NNEngine\n @since 2.0"]
506    pub fn nn_engine_init(memory: *mut ::std::os::raw::c_void) -> *mut NNEngine;
507}
508extern "C" {
509    #[doc = " Returns handle of the NNEngine object.\n\n @param memory Pointer to the NNEngine structure\n\n @public @memberof NNEngine\n @since 2.0"]
510    pub fn nn_engine_native_handle(engine: *mut NNEngine) -> *mut ::std::os::raw::c_void;
511}
512extern "C" {
513    #[doc = " Releases the memory that was being used by the engine.\n\n @param engine Pointer to the engine object.\n\n @public @memberof NNEngine\n @since 2.0"]
514    pub fn nn_engine_release(engine: *mut NNEngine);
515}
516extern "C" {
517    #[doc = " Loads the plugin to provided engine object.  The plugin should point to an\n engine plugin library either as an absolute or relative path or be found in\n the standard OS search path for shared libraries.\n\n @param engine Pointer to the engine object.\n @param plugin String of the absolute or relative path to the plugin.\n\n @return NN_ERROR_INVALID ENGINE given the engine pointer is NULL or\n the plugin does not have the necessary functions.\n @return NN_ERROR_MISSING_RESOURCE given the plugin dll cannot be found.\n @return The error returned by the plugin's init function given a valid engine\n and dll.\n\n @public @memberof NNEngine\n @since 2.0"]
518    pub fn nn_engine_load(engine: *mut NNEngine, plugin: *const ::std::os::raw::c_char) -> NNError;
519}
520extern "C" {
521    #[doc = " Unloads the plugin from the given engine object.\n\n @param engine Pointer to the engine object.\n\n @return NN_ERROR_INVALID_ENGINE given the engine pointer is NULL.\n @return NN_ERROR_INTERNAL if the plugin dll could not be closed properly.\n @return The NNError from the plugin's cleanup function.\n\n @public @memberof NNEngine\n @since 2.0"]
522    pub fn nn_engine_unload(engine: *mut NNEngine);
523}
524extern "C" {
525    #[doc = " Returns the name of the engine object.\n\n @param engine Pointer to the engine object.\n\n @public @memberof NNEngine\n @since 2.0"]
526    pub fn nn_engine_name(engine: *mut NNEngine) -> *const ::std::os::raw::c_char;
527}
528extern "C" {
529    #[doc = " Returns the version of the engine object.\n\n @param engine Pointer to the engine object.\n\n @public @memberof NNEngine\n @since 2.0"]
530    pub fn nn_engine_version(engine: *mut NNEngine) -> *const ::std::os::raw::c_char;
531}
532extern "C" {
533    #[doc = " Returns the size of the tensor object for preparing memory allocations.\n\n @public @memberof NNTensor\n @since 2.0"]
534    pub fn nn_tensor_sizeof() -> usize;
535}
536extern "C" {
537    #[doc = " Initializes the tensor using provided memory.  The memory MUST be at least\n the size returned by @ref nn_tensor_sizeof().  This size does not include\n the actual tensor data which is allocated separately, either by requesting\n the implementation to allocate the buffer or attaching to externally\n allocated memory.\n\n The tensor created by this function has no data associated to it and is of\n rank-0.\n\n @param memory The pointer to be initialized to a NNTensor object.\n @param engine Pointer to the engine object.\n\n @return NULL given the memory pointer is a null pointer.\n @return Pointer to the newly created NNTensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
538    pub fn nn_tensor_init(
539        memory: *mut ::std::os::raw::c_void,
540        engine: *mut NNEngine,
541    ) -> *mut NNTensor;
542}
543extern "C" {
544    #[doc = " Releases the memory used by the tensor object.\n\n @param tensor Pointer to the tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
545    pub fn nn_tensor_release(tensor: *mut NNTensor);
546}
547extern "C" {
548    #[doc = " Returns the engine owning this tensor, could be NULL.\n\n @param tensor Pointer to the tensor object.\n\n @return Pointer to the engine object to which the tensor is associated.\n\n @public @memberof NNTensor\n @since 2.0"]
549    pub fn nn_tensor_engine(tensor: *mut NNTensor) -> *mut NNEngine;
550}
551extern "C" {
552    #[doc = " Returns the native handle of the tensor object.  This is an internal API for\n access internal structures.\n\n @param tensor Pointer to the tensor object.\n\n @private @memberof NNTensor\n @since 2.0"]
553    pub fn nn_tensor_native_handle(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
554}
555extern "C" {
556    #[doc = " Sets the tensor objects native handle to the one provided.\n\n @param tensor Pointer to the tensor object.\n @param handle Pointer to the handle object.\n\n @private @memberof NNTensor\n @since 2.0"]
557    pub fn nn_tensor_set_native_handle(tensor: *mut NNTensor, handle: *mut ::std::os::raw::c_void);
558}
559#[doc = " Callback function to free an auxiliary object, called from nn_tensor_release.\n\n @private @memberof NNTensor\n @since 2.1"]
560pub type nn_aux_object_free = ::std::option::Option<unsafe extern "C" fn(tensor: *mut NNTensor)>;
561extern "C" {
562    #[doc = " Configures an auxiliary object for the tensor.  This is a private API used\n for attaching auxiliary buffers.\n\n @private @memberof NNTensor\n @since 2.1"]
563    pub fn nn_tensor_set_aux_object(
564        tensor: *mut NNTensor,
565        aux_object: *mut ::std::os::raw::c_void,
566        aux_object_free: nn_aux_object_free,
567    );
568}
569extern "C" {
570    #[doc = " Returns the auxiliary object for the tensor, or NULL if none is attached.\n\n @private @memberof NNTensor\n @since 2.1"]
571    pub fn nn_tensor_aux_object(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
572}
573extern "C" {
574    #[doc = " Returns the auxiliary object's free function, or NULL if none is attached.\n\n @private @memberof NNTensor\n @since 2.3"]
575    pub fn nn_tensor_aux_free(tensor: *mut NNTensor) -> nn_aux_object_free;
576}
577extern "C" {
578    #[doc = " Extended version of the auxiliary object API which allows additional objects\n to be attached to the tensor using name-based indexing.\n\n @private @memberof NNTensor\n @since 2.4"]
579    pub fn nn_tensor_set_aux_object_by_name(
580        tensor: *mut NNTensor,
581        name: *const ::std::os::raw::c_char,
582        aux_object: *mut ::std::os::raw::c_void,
583        aux_object_free: nn_aux_object_free,
584        buffer_ownership: bool,
585        name_ownership: bool,
586    );
587}
588extern "C" {
589    #[doc = " Acquire the auxiliary object associated with the given name parameter.\n\n @private @memberof NNTensor\n @since 2.4"]
590    pub fn nn_tensor_aux_object_by_name(
591        tensor: *mut NNTensor,
592        name: *const ::std::os::raw::c_char,
593    ) -> *mut ::std::os::raw::c_void;
594}
595extern "C" {
596    #[doc = " Frees the auxiliary object associated with the given name parameter.\n\n @private @memberof NNTensor\n @since 2.4"]
597    pub fn nn_tensor_aux_free_by_name(
598        tensor: *mut NNTensor,
599        name: *const ::std::os::raw::c_char,
600    ) -> nn_aux_object_free;
601}
602extern "C" {
603    #[doc = " Retrieves the panel size of the tensor when it has been panel-shuffled for\n improved tiling performance.  The panel size is the vectorization length.\n\n @private @memberof NNTensor\n @since 2.4"]
604    pub fn nn_tensor_panel_size(tensor: *mut NNTensor) -> ::std::os::raw::c_int;
605}
606extern "C" {
607    #[doc = " Sets the panel size of the tensor.  This is primarily an internal API used\n to store the vectorization length when shuffling tensors into an optimized\n tile format.\n\n @private @memberof NNTensor\n @since 2.4"]
608    pub fn nn_tensor_set_panel_size(tensor: *mut NNTensor, panel_size: ::std::os::raw::c_int);
609}
610extern "C" {
611    #[doc = " Synchronize the tensor and all preceeding events in the chain.\n\n This is used for engines which may not immediately evaluate tensor operations\n but instead pass events around, this call will synchronize the event chain\n leading to this tensor.\n\n @param tensor Pointer to the tensor object.\n\n @return NN_SUCCESS if the sync was successful or ignored by engines which do\n not implement this API.\n\n @public @memberof NNTensor\n @since 2.0"]
612    pub fn nn_tensor_sync(tensor: *mut NNTensor) -> NNError;
613}
614extern "C" {
615    #[doc = " Returns the time information stored in the tensor.  The time is returned\n in nanoseconds of the duration of the last operation the wrote into this\n tensor.  causes a nn_tensor_sync on the target tensor.\n\n This is used for measuring the time an operation takes by capturing the time\n the operation took into the destination tensor of the operation.  The time\n is not the time it takes to write to the tensor, this is captured by the\n @ref nn_tensor_io_time() function, but the time it took the operation to\n complete (not including map/unmap times).\n\n @param tensor Pointer to the tensor object.\n\n @return Nanoseconds of processing time for the last operation which wrote\n into this tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
616    pub fn nn_tensor_time(tensor: *mut NNTensor) -> i64;
617}
618extern "C" {
619    #[doc = " Returns the I/O time information stored in the tensor.  The time is returned\n in nanoseconds of the duration of the last map/unmap pair.  When tensors are\n mapped to the CPU (no accelerator engine is loaded) then times are expected\n to be zero time as no mapping is actually required and the internal pointer\n is simply returned.  When an accelerator engine is used, such as OpenVX,\n then the io_time measures the time the map/unmap or copy operations took to\n complete.\n\n @param tensor Pointer to the tensor object.\n\n @return Nanoseconds of time spent in the map/unmap calls.\n\n @public @memberof NNTensor\n @since 2.1"]
620    pub fn nn_tensor_io_time(tensor: *mut NNTensor) -> i64;
621}
622extern "C" {
623    #[doc = " Writes the  tensor inforamtion to the FILE stream provided.  The format is\n \"[D0 D1 D2 D3]\" where D0..D3 are the dimensions provided.  If the data\n parameter is true the format will be followed by \": ...\" where ... is the\n string representation of the tensor's data.\n\n @warning Before version 2.4.32 this function always assumes float32 tensors\n and will therefore lead to segmentation faults when used with integer\n tensors.\n\n @param out Pointer to the FILE stream where the tensor will be written to.\n @param tensor Pointer to the tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
624    pub fn nn_tensor_printf(tensor: *mut NNTensor, data: bool, out: *mut FILE);
625}
626extern "C" {
627    #[doc = " Assigns the tensor parameters and optionally data pointer.  The default\n implementation uses the data buffer as the internal storage for tensor data\n and it MUST outlive the tensor.  For engine plugins they may choose how to\n use the data but for the OpenCL example if data is provided it will be copied\n into the OpenCL buffer then otherwise never used again.  If NULL is provided\n for data the OpenCL engine would create the memory and leave it unassigned.\n\n If using the default implementation and leaving data NULL then all operations\n which require data will fail.  The most dynamic tensor setup with optional\n data would be to call assign to setup the parameters with NULL data, then\n calling @ref nn_tensor_native_handle to see if one was created, if not the\n data buffer can be malloc'ed followed by a call to @ref\n nn_tensor_set_native_handle with this buffer.  One could also call\n nn_tensor_assign a second time with data set to the malloc'ed data.\n\n @param tensor Pointer to the given tensor object.\n @param type The data type that the tensor is storing (The type of the\n provided data).\n @param n_dims The number of dimensions in the provided tensor.\n @param shape The shape of the given tensor.\n @param data The new tensor data to be placed within the tensor provided.\n\n @public @memberof NNTensor\n @since 2.0"]
628    pub fn nn_tensor_assign(
629        tensor: *mut NNTensor,
630        type_: NNTensorType,
631        n_dims: i32,
632        shape: *const i32,
633        data: *mut ::std::os::raw::c_void,
634    ) -> NNError;
635}
636extern "C" {
637    #[doc = " Maps the tensor using the memory from the parent tensor.\n\n @param tensor Pointer to the tensor object where the view will be stored.\n @param type The data type that the tensor is storing.\n @param n_dims The number of dimensions in the provided tensor.\n @param shape The shape of the given tensor.\n @param parent Pointer to the tensor object that holds the original tensor.\n @param offset TO BE DETERMINED.\n\n @public @memberof NNTensor\n @since 2.0"]
638    pub fn nn_tensor_view(
639        tensor: *mut NNTensor,
640        type_: NNTensorType,
641        n_dims: i32,
642        shape: *const i32,
643        parent: *mut NNTensor,
644        offset: i32,
645    ) -> NNError;
646}
647extern "C" {
648    #[doc = " Allocates the internal memory for the tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
649    pub fn nn_tensor_alloc(
650        tensor: *mut NNTensor,
651        type_: NNTensorType,
652        n_dims: i32,
653        shape: *const i32,
654    ) -> NNError;
655}
656extern "C" {
657    #[doc = " Returns the shape of the given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
658    pub fn nn_tensor_shape(tensor: *const NNTensor) -> *const i32;
659}
660extern "C" {
661    #[doc = " Returns the strides of the given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
662    pub fn nn_tensor_strides(tensor: *const NNTensor) -> *const i32;
663}
664extern "C" {
665    #[doc = " Returns the number of dimensions of the given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
666    pub fn nn_tensor_dims(tensor: *const NNTensor) -> i32;
667}
668extern "C" {
669    #[doc = " Maps the tensor's memory and returns the client accessible pointer.  This is\n the read-only version which causes the engine to download buffers to the CPU\n memory space if required but will not flush back to the device on unmap.\n\n If the tensor is already mapped read-only or read-write a pointer is returned\n and the reference count increased, if it was already mapped write-only NULL\n is returned.\n\n @public @memberof NNTensor\n @since 2.0"]
670    pub fn nn_tensor_mapro(tensor: *mut NNTensor) -> *const ::std::os::raw::c_void;
671}
672extern "C" {
673    #[doc = " Maps the tensor's memory and returns the client accessible pointer.  This is\n the read-write version which causes the engine to download buffers to the CPU\n memory space if required and will also flush back to the device on unmap.\n\n If the tensor is already mapped read-only it needs to be unmapped before\n calling maprw otherwise NULL is returned.  A tensor already mapped as rw will\n simply increase the reference count.  A write-only mapped tensor will also\n return NULL.\n\n @public @memberof NNTensor\n @since 2.0"]
674    pub fn nn_tensor_maprw(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
675}
676extern "C" {
677    #[doc = " Maps the tensor's memory and returns the client accessible pointer.  This is\n the write-only version which will not cause a download of the buffers to the\n CPU memory space on map but will upload to the device on unmap.\n\n If the tensor is already mapped write-only or read-write a pointer is\n returned and the reference count increased.  If it was previously mapped as\n read-only NULL is returned.\n\n @public @memberof NNTensor\n @since 2.0"]
678    pub fn nn_tensor_mapwo(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
679}
680extern "C" {
681    #[doc = " Returns the tensor's mapping count, 0 means the tensor is unmapped.\n\n @public @memberof NNTensor\n @since 2.0"]
682    pub fn nn_tensor_mapped(tensor: *const NNTensor) -> ::std::os::raw::c_int;
683}
684extern "C" {
685    #[doc = " Releases the tensor mapping, if the reference count reaches 0 it will be\n fully unmapped and will force the flush to the device, if required.\n\n @public @memberof NNTensor\n @since 2.0"]
686    pub fn nn_tensor_unmap(tensor: *mut NNTensor);
687}
688extern "C" {
689    #[doc = " Returns the type of a given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
690    pub fn nn_tensor_type(tensor: *const NNTensor) -> NNTensorType;
691}
692extern "C" {
693    #[doc = " Sets the type of a given tensor object.\n\n @public @memberof NNTensor\n @since 2.4"]
694    pub fn nn_tensor_set_type(tensor: *mut NNTensor, type_: NNTensorType) -> NNError;
695}
696extern "C" {
697    #[doc = " Returns the element size of a given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
698    pub fn nn_tensor_element_size(tensor: *const NNTensor) -> usize;
699}
700extern "C" {
701    #[doc = " Calculates the total tensor volume (product of dimensions).\n\n @public @memberof NNTensor\n @since 2.0"]
702    pub fn nn_tensor_volume(tensor: *const NNTensor) -> i32;
703}
704extern "C" {
705    #[doc = " Calculates the total byte size of the tensor (volume * element_size).\n\n @public @memberof NNTensor\n @since 2.0"]
706    pub fn nn_tensor_size(tensor: *const NNTensor) -> i32;
707}
708extern "C" {
709    #[doc = " Returns the natural data axis of the tensor.\n\n @public @memberof NNTensor\n @since 2.4"]
710    pub fn nn_tensor_axis(tensor: *const NNTensor) -> ::std::os::raw::c_char;
711}
712extern "C" {
713    #[doc = " Returns the zero-points for the tensor and optionally the number of\n zero-points.\n\n @public @memberof NNTensor\n @since 2.4"]
714    pub fn nn_tensor_zeros(tensor: *const NNTensor, n_zeros: *mut usize) -> *const i32;
715}
716extern "C" {
717    #[doc = " Sets the quantization zero-points for the tensor.  If n_zeros>1 it should\n match the channel dimension (axis) of the tensor.\n\n If own=1 then the tensor will take ownership of the buffer and free it when\n the tensor is released.  Otherwise the buffer must outlive the tensor.\n\n @public @memberof NNTensor\n @since 2.4"]
718    pub fn nn_tensor_set_zeros(
719        tensor: *mut NNTensor,
720        n_zeros: usize,
721        zeros: *const i32,
722        own: ::std::os::raw::c_int,
723    );
724}
725extern "C" {
726    #[doc = " Configures the channel axis of the tensor.  This refers to the \"C\" in\n orderings such as NHWC and NCHW.\n\n @public @memberof NNTensor\n @since 2.0"]
727    pub fn nn_tensor_set_axis(tensor: *mut NNTensor, axis: i32);
728}
729extern "C" {
730    #[doc = " Returns the scales array for the tensor and optionally the number of scales.\n\n @public @memberof NNTensor\n @since 2.4"]
731    pub fn nn_tensor_scales(tensor: *const NNTensor, n_scales: *mut usize) -> *const f32;
732}
733extern "C" {
734    #[doc = " Internal API used by the RTM loader to associate quantization parameters to\n the tensor.\n\n @private @memberof NNTensor\n @since 2.4"]
735    pub fn nn_tensor_quant_params(tensor: *const NNTensor, quant_params: *mut NNQuantParam);
736}
737extern "C" {
738    #[doc = " Sets the quantization scales for the tensor.  If n_scales>1 it should match\n the channel dimension (axis) of the tensor.\n\n If own=1 then the tensor will take ownership of the buffer and free it when\n the tensor is released.  Otherwise the buffer must outlive the tensor.\n\n @public @memberof NNTensor\n @since 2.4"]
739    pub fn nn_tensor_set_scales(
740        tensor: *mut NNTensor,
741        n_scales: usize,
742        scales: *const f32,
743        own: ::std::os::raw::c_int,
744    );
745}
746extern "C" {
747    #[doc = " Returns the quantization type for the tensor.\n\n @note This API was missing before version 2.4.32 and instead the\n quantization format is inferred as affine when scales and zeros are provided\n and per-tensor vs. per-channel is inferred based on scales/zeros being 1 or\n greater than 1.\n\n @param tensor the tensor object used to query quantization type.\n\n @returns @ref NNQuantizationType_None for tensors which do not provide\n  quantization parameters.\n @returns @ref NNQuantizationType_Affine_PerTensor for tensors which provide\n  quantization parameters which map globally to the tensor.\n @returns @ref NNQuantizationType_Affine_PerChannel for tensors which provide\n  quantization parameters which map to each channel \"C\" of the tensor.\n @returns @ref NNQuantizationType_DFP for tensors which provide DFP\n  parameters.  Currently unsupported.\n\n @public @memberof NNTensor\n @since 2.4.32"]
748    pub fn nn_tensor_quantization_type(tensor: *mut NNTensor) -> NNQuantizationType;
749}
750extern "C" {
751    #[doc = " Tensor shape comparison.\n\n @returns true if both shapes are equal otherwise false.\n\n @since 2.0\n @deprecated 2.3"]
752    pub fn nn_tensor_shape_equal(left: *const i32, right: *const i32) -> bool;
753}
754extern "C" {
755    #[doc = " Copys the source shape array to the destination array.\n\n @since 2.0\n @deprecated 2.3"]
756    pub fn nn_tensor_shape_copy(dst: *mut i32, src: *const i32);
757}
758extern "C" {
759    #[doc = " Returns the offset of a given tensor.  This function can be used to calculate\n the index across numerous dimensions.\n\n @note Avoid using this function as part of inner loops as it requires a\n multiply and add for each dimenions.  Instead it can be used in an outer loop\n to get the starting index then increment this index in the inner loop,\n possibly using the tensor strides.\n\n @param tensor the tensor object used in the operation\n @param n_dims the number of dimensions provided in the @p shape\n @param shape the multi-dimensional index used to calculate the linear index\n\n @return the element index into the tensor based on the muliple dimenional\n indices provided.\n\n @public @memberof NNTensor\n @since 2.0"]
760    pub fn nn_tensor_offset(
761        tensor: *const NNTensor,
762        n_dims: i32,
763        shape: *const i32,
764    ) -> ::std::os::raw::c_int;
765}
766extern "C" {
767    #[doc = " Returns the offset of a given tensor using variable length dimensions. This\n works the same as @ref nn_tensor_offset() but uses variable arguments. The\n user **must** provide @p n_dims number of parameters after the\n @p n_dims parameter.\n\n @param tensor the tensor object used in the operation\n @param n_dims the number of dimensions to use when calculating the index\n @param … variable number of shape elements which **must** be of type int32_t\n\n @return the element index into the tensor based on the muliple dimenional\n indices provided.\n\n @public @memberof NNTensor\n @since 2.0"]
768    pub fn nn_tensor_offsetv(tensor: *const NNTensor, n_dims: i32, ...) -> ::std::os::raw::c_int;
769}
770extern "C" {
771    #[doc = " Element-wise comparison of two tensors within a given tolerance, returning\n total number of errors relative to the left tensor.  If the two tensors are\n incompatible the volume of the left tensor is returned (all elements\n invalid).\n\n @public @memberof NNTensor\n @since 2.0\n @deprecated 2.3"]
772    pub fn nn_tensor_compare(
773        left: *mut NNTensor,
774        right: *mut NNTensor,
775        tolerance: f64,
776    ) -> ::std::os::raw::c_int;
777}
778extern "C" {
779    #[doc = " Reshapes the given tensor to the provided new shape.\n\n @param tensor the tensor object used in the operation\n @param n_dims the number of dimensions which the tensor will contain after\n the operation completes successfully.  It must also match the number of\n elements in @p shape.\n @param shape the new shape for the tensor.  The array must be at least\n @p n_dims elements in size.\n\n @return @ref NN_SUCCESS if the reshape is able to be performed\n @return @ref NN_ERROR_SHAPE_MISMATCH if the new shape cannot be represented\n given the previous shape of the tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
780    pub fn nn_tensor_reshape(tensor: *mut NNTensor, n_dims: i32, shape: *const i32) -> NNError;
781}
782extern "C" {
783    #[doc = " Shuffles (transpose) the tensor moving the current dimensions into the\n ordering defined in the order parameter.\n\n For example a traditional matrix transpose is done using order[] = { 1, 0 }\n in other words, the 0 dimension of the output references the 1 dimension of\n the input and the 1 dimension of the output references the 0 dimension of the\n input.\n\n Another example would be shuffling an NCHW tensor to NHWC using order[] = {\n 0, 2, 3, 1 }\n\n @public @memberof NNTensor\n @since 2.0"]
784    pub fn nn_tensor_shuffle(
785        output: *mut NNTensor,
786        input: *mut NNTensor,
787        n_dims: i32,
788        order: *const i32,
789    ) -> NNError;
790}
791extern "C" {
792    #[doc = " Fills the tensor with the provided constant.  The constant is captured\n as double precision (64-bit floating point) which has 53-bits of precision\n on whole numbers.  This means the constant CANNOT represent all 64-bit\n integers but it CAN represent all 32-bit and lower integers.  If full\n 64-bit integer support is required @ref nn_tensor_map can be used though\n it is less efficient with some engines because of the addition memory\n transfer required.\n\n The double will be cast appropriately to the target tensor's type before\n filling the tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
793    pub fn nn_tensor_fill(tensor: *mut NNTensor, constant: f64) -> NNError;
794}
795extern "C" {
796    #[doc = " Randomizes the data within the tensor.\n\n @public @memberof NNTensor\n @since 2.0\n @deprecated 2.3"]
797    pub fn nn_tensor_randomize(tensor: *mut NNTensor) -> NNError;
798}
799extern "C" {
800    #[doc = " Copies the contents of source tensor into destination tensor.\n\n This operation only copies the data and does not affect the\n destination tensor's properties.  The destination tensor must\n have an equal or larger volume.  If required data will be converted.\n\n @public @memberof NNTensor\n @since 2.0"]
801    pub fn nn_tensor_copy(dest: *mut NNTensor, source: *mut NNTensor) -> NNError;
802}
803extern "C" {
804    #[doc = " Loads a tensor with data from a user buffer\n User has to maintain the buffer and ensure compatibility with NHWC tensor\n Function will return error if there is a size mismatch\n i.e (bufsize != nn_tensor_size(tensor)) or tensor is invalid\n\n @public @memberof NNTensor\n @since 2.4"]
805    pub fn nn_tensor_copy_buffer(
806        tensor: *mut NNTensor,
807        buffer: *const ::std::os::raw::c_void,
808        bufsize: usize,
809    ) -> NNError;
810}
811extern "C" {
812    #[doc = " Requantizes the source tensor into the destination tensor.\n\n The source tensor and destination tensor should be either I8 or U8, and\n per tensor quantized.\n\n @public @memberof NNTensor\n @since 2.4"]
813    pub fn nn_tensor_requantize(dest: *mut NNTensor, source: *mut NNTensor) -> NNError;
814}
815extern "C" {
816    #[doc = " Quantizes the source tensor into the destination tensor.\n\n The source tensor should be float and the destination integer.  If the\n destination tensor does not have quantization parameters they will be\n calculated from the source tensor and stored into the destination tensor.\n\n When calculating the quantization parameters if axis is a valid axis* then\n per-channel quantization will be performed along the axis, otherwise\n per-tensor quantization will be performed.  If the destination tensor has\n quantization parameters axis is ignored.\n\n Valid Axis: (axis > 0 && axis < n_dims)\n\n @public @memberof NNTensor\n @since 2.4"]
817    pub fn nn_tensor_quantize(
818        dest: *mut NNTensor,
819        source: *mut NNTensor,
820        axis: ::std::os::raw::c_int,
821    ) -> NNError;
822}
823extern "C" {
824    #[doc = " Quantizes the source buffer into the destination tensor.\n\n The source tensor should be float and the destination integer.  If the\n destination tensor does not have quantization parameters they will be\n calculated from the source buffer and stored into the destination tensor.\n\n When calculating the quantization parameters if axis is a valid axis* then\n per-channel quantization will be performed along the axis, otherwise\n per-tensor quantization will be performed. If the destination tensor has\n quantization parameters axis is ignored.\n\n Valid Axis: (axis > 0 && axis < n_dims)\n\n @public @memberof NNTensor\n @since 2.4"]
825    pub fn nn_tensor_quantize_buffer(
826        dest: *mut NNTensor,
827        buffer_length: usize,
828        buffer: *const f32,
829        axis: ::std::os::raw::c_int,
830    ) -> NNError;
831}
832extern "C" {
833    #[doc = " De-quantizes the source tensor into the destination tensor.\n\n The source tensor should be integer and the destination float.  The source\n tensor must have quantization parameters otherwise the operation will simply\n cast the integer data to float.\n\n @public @memberof NNTensor\n @since 2.4"]
834    pub fn nn_tensor_dequantize(dest: *mut NNTensor, source: *mut NNTensor) -> NNError;
835}
836extern "C" {
837    #[doc = " De-quantizes the source tensor into the destination buffer.\n\n The source tensor should be integer and the destination float.  The source\n tensor must have quantization parameters otherwise the operation will simply\n cast the integer data to float.\n\n The buffer must be at least buffer_length*sizeof(float) size in bytes.\n\n @public @memberof NNTensor\n @since 2.4"]
838    pub fn nn_tensor_dequantize_buffer(
839        source: *mut NNTensor,
840        buffer_length: usize,
841        buffer: *mut f32,
842    ) -> NNError;
843}
844extern "C" {
845    #[doc = " nn_tensor_concat concatenates all of the given input tensors into\n the given output tensor.\n\n @output pointer to the output tensor\n @inputs list of pointers to the input tensors\n @n_inputs the number of inputs\n @axis the axis along which to concatenate the inputs\n\n @public @memberof NNTensor\n @since 2.0"]
846    pub fn nn_tensor_concat(
847        output: *mut NNTensor,
848        n_inputs: i32,
849        inputs: *mut *mut NNTensor,
850        axis: i32,
851    ) -> NNError;
852}
853extern "C" {
854    #[doc = " nn_tensor_slice copies a slice of the tensor into output. For a version which\n supports strides see @ref nn_tensor_strided_slice.\n\n The axes, head, and tail must be of length n_axes or NULL.  Calling slice\n with axes==NULL will ignore head/tail and is effectively @ref nn_tensor_copy.\n\n When head is NULL all axes are assumed to start at 0.  When tail is NULL all\n axes are assumed to end at (len(axis) - head) for the given axis.\n\n @public @memberof NNTensor\n @since 2.0"]
855    pub fn nn_tensor_slice(
856        output: *mut NNTensor,
857        input: *mut NNTensor,
858        n_axes: i32,
859        axes: *const i32,
860        head: *const i32,
861        tail: *const i32,
862    ) -> NNError;
863}
864extern "C" {
865    pub fn nn_tensor_strided_slice(
866        output: *mut NNTensor,
867        input: *mut NNTensor,
868        n_axes: i32,
869        axes: *const i32,
870        head_: *const i32,
871        tail_: *const i32,
872        strides_: *const i32,
873    ) -> NNError;
874}
875extern "C" {
876    #[doc = " nn_tensor_padding calculates the paddings for the given tensor, padtype,\n window, stride, and dilation given n_dims being queried from the tensor's\n nn_tensor_dims().\n\n The paddings pointer must point to an array of 2 * n_dims elements into which\n the function will write the head/tail padding tuples for each of the n_dims\n provided dimensions.  The padded_shape parameter must point to an array of\n n_dims elemens which will receive the output (padded) shape.\n\n The padtype can be \"VALID\" or \"SAME\".  When padtype is \"SAME\" padded_shape\n will equal the shape of the input tensor and the paddings will be provided to\n achieve this shape.  When padtype is \"VALID\" then paddings will be all zeros\n and the padded_shape will provide the target output shape given the provided\n parameters.\n\n @public @memberof NNTensor\n @since 2.3"]
877    pub fn nn_tensor_padding(
878        tensor: *mut NNTensor,
879        padtype: *const ::std::os::raw::c_char,
880        window: *const i32,
881        stride: *const i32,
882        dilation: *const i32,
883        padded_shape: *mut i32,
884        paddings: *mut i32,
885    ) -> NNError;
886}
887extern "C" {
888    #[doc = " nn_tensor_pad implements a padded Tensor to Tensor copy.  This can be used to\n achieve the various convolution padding strategies (SAME, FULL).  For example\n SAME conv2d would use the following padded_copy before running the conv2d\n layer.\n\n output_shape = { input_shape[0],\n                  int(ceil(float(input_shape[1]) /\n strides[1])), int(ceil(float(input_shape[2]) / strides[2])), weights_shape[3]\n };\n\n pad_height = (output_shape[1] - 1) * strides[1] + weights_shape[0] -\n input_shape[1]; pad_width  = (output_shape[2] - 1) * strides[2] +\n weights_shape[1] - input_shape[2];\n\n @output pointer to the output tensor\n @input pointer to the input tensor\n @head lead-in length of the pad for dimension NHWC\n @tail lead-out length of the pad for dimension NHWC\n\n @public @memberof NNTensor\n @since 2.0"]
889    pub fn nn_tensor_pad(
890        output: *mut NNTensor,
891        input: *mut NNTensor,
892        head: *const i32,
893        tail: *const i32,
894        constant: f64,
895    ) -> NNError;
896}
897extern "C" {
898    #[doc = " Loads an image from file into the provided tensor.\n\n @public @memberof NNTensor\n @since 2.2\n @deprecated 2.3"]
899    pub fn nn_tensor_load_file(
900        tensor: *mut NNTensor,
901        filename: *const ::std::os::raw::c_char,
902    ) -> NNError;
903}
904extern "C" {
905    #[doc = " Loads an image from file into the provided tensor.\n\n @public @memberof NNTensor\n @since 2.2\n @deprecated 2.3"]
906    pub fn nn_tensor_load_file_ex(
907        tensor: *mut NNTensor,
908        filename: *const ::std::os::raw::c_char,
909        proc_: u32,
910    ) -> NNError;
911}
912extern "C" {
913    #[doc = " Loads an image from the provided buffer and decodes it accordingly, the\n function uses the images headers to find an appropriate decoder.  The\n function will handle any required casting to the target tensor's format.\n\n @public @memberof NNTensor\n @since 2.0"]
914    pub fn nn_tensor_load_image(
915        tensor: *mut NNTensor,
916        image: *const ::std::os::raw::c_void,
917        image_size: usize,
918    ) -> NNError;
919}
920extern "C" {
921    #[doc = " Loads an image from the provided buffer and decodes it accordingly, the\n function uses the images headers to find an appropriate decoder.  The\n function will handle any required casting to the target tensor's format and\n will apply image standardization (compatible with tensorflow's\n tf.image.per_image_standardization) if the proc parameter is set to\n NN_IMAGE_PROC_WHITENING.\n\n When called with proc==0 it is the same as nn_tensor_load_image().\n\n NN_IMAGE_PROC_UNSIGNED_NORM\n NN_IMAGE_PROC_WHITENING_NORM\n NN_IMAGE_PROC_SIGNED_NORM\n\n @public @memberof NNTensor\n @since 2.1"]
922    pub fn nn_tensor_load_image_ex(
923        tensor: *mut NNTensor,
924        image: *const ::std::os::raw::c_void,
925        image_size: usize,
926        proc_: u32,
927    ) -> NNError;
928}
929extern "C" {
930    #[doc = " Attempts to validate model, this is automatically called by nn_model_load and\n nn_model_mmap.  The function returns 0 on success, otherwise it will return\n an error code which can be turned into a string by calling\n @ref nn_model_validate_error() with the return value from\n @ref nn_model_validate().\n\n @public @memberof NNModel\n @since 2.0"]
931    pub fn nn_model_validate(memory: *const NNModel, size: usize) -> ::std::os::raw::c_int;
932}
933extern "C" {
934    #[doc = " Returns the string associated with a given error returned from\n @ref nn_model_validate().\n\n @public @memberof NNModel\n @since 2.0"]
935    pub fn nn_model_validate_error(err: ::std::os::raw::c_int) -> *const ::std::os::raw::c_char;
936}
937extern "C" {
938    #[doc = " Returns the name of the given model object.  Names are optional and if the\n model does not contain a name then NULL will be returned.\n\n @public @memberof NNModel\n @since 2.0"]
939    pub fn nn_model_name(model: *const NNModel) -> *const ::std::os::raw::c_char;
940}
941extern "C" {
942    #[doc = " Currently returns NULL (UPDATE WHEN FUNCTION IS UPDATED)\n\n @public @memberof NNModel\n @since 2.0"]
943    pub fn nn_model_uuid(model: *const NNModel) -> *const ::std::os::raw::c_char;
944}
945extern "C" {
946    #[doc = " Currently returns 0\n\n @public @memberof NNModel\n @since 2.0"]
947    pub fn nn_model_serial(model: *const NNModel) -> u32;
948}
949extern "C" {
950    #[doc = " Returns the number of labels within a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
951    pub fn nn_model_label_count(model: *const NNModel) -> ::std::os::raw::c_int;
952}
953extern "C" {
954    #[doc = " Returns the label of the given index within the given model object.  If the\n model contains no labels or the index is out of range then NULL will be\n returned.\n\n @public @memberof NNModel\n @since 2.0"]
955    pub fn nn_model_label(
956        model: *const NNModel,
957        index: ::std::os::raw::c_int,
958    ) -> *const ::std::os::raw::c_char;
959}
960extern "C" {
961    #[doc = " Returns an optional icon resource for the provided label index.\n\n @public @memberof NNModel\n @since 2.0"]
962    pub fn nn_model_label_icon(
963        model: *const NNModel,
964        index: ::std::os::raw::c_int,
965        size: *mut usize,
966    ) -> *const u8;
967}
968extern "C" {
969    #[doc = " Returns the list of model input indices and optionally the number of inputs.\n\n If the field is missing from the model NULL is returned.\n\n @public @memberof NNModel\n @since 2.4"]
970    pub fn nn_model_inputs(model: *const NNModel, n_inputs: *mut usize) -> *const u32;
971}
972extern "C" {
973    #[doc = " Returns the list of model output indices and optionally the number of\n outputs.\n\n If the field is missing from the model 0 is returned.\n\n @public @memberof NNModel\n @since 2.4"]
974    pub fn nn_model_outputs(model: *const NNModel, n_outputs: *mut usize) -> *const u32;
975}
976extern "C" {
977    #[doc = " Returns the number of layers within a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
978    pub fn nn_model_layer_count(model: *const NNModel) -> usize;
979}
980extern "C" {
981    #[doc = " Returns the name of a layer at a given index within the given model object.\n\n @public @memberof NNModel\n @since 2.0"]
982    pub fn nn_model_layer_name(
983        model: *const NNModel,
984        index: usize,
985    ) -> *const ::std::os::raw::c_char;
986}
987extern "C" {
988    #[doc = " Returns the index of a given layer with the name provided in the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
989    pub fn nn_model_layer_lookup(
990        model: *const NNModel,
991        name: *const ::std::os::raw::c_char,
992    ) -> ::std::os::raw::c_int;
993}
994extern "C" {
995    #[doc = " Returns the type of a layer at the given index within the given model object.\n\n @public @memberof NNModel\n @since 2.0"]
996    pub fn nn_model_layer_type(
997        model: *const NNModel,
998        index: usize,
999    ) -> *const ::std::os::raw::c_char;
1000}
1001extern "C" {
1002    #[doc = " Returns the type ID of the layer.\n\n @public @memberof NNModel\n @since 2.4"]
1003    pub fn nn_model_layer_type_id(model: *const NNModel, index: usize) -> i16;
1004}
1005extern "C" {
1006    #[doc = " Returns the datatype of a layer at the given index within the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
1007    pub fn nn_model_layer_datatype(
1008        model: *const NNModel,
1009        index: usize,
1010    ) -> *const ::std::os::raw::c_char;
1011}
1012extern "C" {
1013    #[doc = " Returns the datatype of a layer at the given index within the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
1014    pub fn nn_model_layer_datatype_id(model: *const NNModel, index: usize) -> NNTensorType;
1015}
1016extern "C" {
1017    #[doc = " Returns the array of quantization zero-points, and optionally the number of\n zero-points in the array.  The length will either be 0, 1, or equal to the\n number of channels in an NHWC/NCHW tensor.\n\n The channel axis can be queried using @ref nn_model_layer_axis().\n\n If no quantization parameters are available then n_zeros will be 0.\n If the tensor is quantized using full tensor quantization n_zeros will be 1.\n If the tensor is quantized using per-channel quantization n_zeros will be C\n which will equal the channel dimension of the tensor.  For an NHWC tensor it\n would equal shape[3].\n\n @public @memberof NNModel\n @since 2.4"]
1018    pub fn nn_model_layer_zeros(
1019        model: *const NNModel,
1020        index: usize,
1021        n_zeros: *mut usize,
1022    ) -> *const i32;
1023}
1024extern "C" {
1025    #[doc = " Returns the array of quantization scales, and optionally the number of scales\n in the array.  The length will either be 0, 1, or equal to the number of\n channels in an NHWC/NCHW tensor.\n\n The channel axis can be queried using @ref nn_model_layer_axis().\n\n If no quantization parameters are available then n_scales will be 0.\n If the tensor is quantized using full tensor quantization n_scales will be 1.\n If the tensor is quantized using per-channel quantization n_scales will be C\n which will equal the channel dimension of the tensor.  For an NHWC tensor it\n would equal shape[3].\n\n @public @memberof NNModel\n @since 2.4"]
1026    pub fn nn_model_layer_scales(
1027        model: *const NNModel,
1028        index: usize,
1029        n_scales: *mut usize,
1030    ) -> *const f32;
1031}
1032extern "C" {
1033    #[doc = " Returns the natural data axis for the tensor or -1 if one is not set.\n\n @public @memberof NNModel\n @since 2.4"]
1034    pub fn nn_model_layer_axis(model: *const NNModel, index: usize) -> ::std::os::raw::c_int;
1035}
1036extern "C" {
1037    #[doc = " Returns the shape of a layer at the given index within the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
1038    pub fn nn_model_layer_shape(
1039        model: *const NNModel,
1040        index: usize,
1041        n_dims: *mut usize,
1042    ) -> *const i32;
1043}
1044extern "C" {
1045    #[doc = " Returns the number of inputs to a layer at the given index within the given\n model object.\n\n @public @memberof NNModel\n @since 2.0"]
1046    pub fn nn_model_layer_inputs(
1047        model: *const NNModel,
1048        index: usize,
1049        inputs: *mut *const u32,
1050    ) -> usize;
1051}
1052extern "C" {
1053    #[doc = " Returns an NNModelParameter from the model at the layer index defined by\n layer using the parameter key.  If the layer does not contain this parameter\n NULL is returned.\n\n @public @memberof NNModel\n @since 2.4"]
1054    pub fn nn_model_layer_parameter(
1055        model: *const NNModel,
1056        layer: usize,
1057        key: *const ::std::os::raw::c_char,
1058    ) -> *const NNModelParameter;
1059}
1060extern "C" {
1061    #[doc = " Returns the shape of the model parameter for layer at index <layer>.\n\n @ref nn_model_parameter_shape()\n\n Returns NULL if either the parameter is not found or the shape is missing.\n\n @public @memberof NNModel\n @since 2.4"]
1062    pub fn nn_model_layer_parameter_shape(
1063        model: *const NNModel,
1064        layer: usize,
1065        key: *const ::std::os::raw::c_char,
1066        n_dims: *mut usize,
1067    ) -> *const i32;
1068}
1069extern "C" {
1070    #[doc = " Returns float data for parameter <key> at layer index <layer>.  This is a\n convenience wrapper around acquiring the parameter followed by acquiring the\n data.\n\n @ref nn_model_parameter_data_f32()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
1071    pub fn nn_model_layer_parameter_data_f32(
1072        model: *const NNModel,
1073        layer: usize,
1074        key: *const ::std::os::raw::c_char,
1075        length: *mut usize,
1076    ) -> *const f32;
1077}
1078extern "C" {
1079    #[doc = " Returns int16 data for parameter <key> at layer index <layer>.  This is a\n convenience wrapper around acquiring the parameter followed by acquiring the\n data.\n\n @ref nn_model_parameter_data_i16()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
1080    pub fn nn_model_layer_parameter_data_i16(
1081        model: *const NNModel,
1082        layer: usize,
1083        key: *const ::std::os::raw::c_char,
1084        length: *mut usize,
1085    ) -> *const i16;
1086}
1087extern "C" {
1088    #[doc = " Returns raw data for parameter <key> at layer index <layer>.  This is a\n convenience wrapper around acquiring the parameter followed by acquiring the\n data.\n\n @ref nn_model_parameter_data_raw()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
1089    pub fn nn_model_layer_parameter_data_raw(
1090        model: *const NNModel,
1091        layer: usize,
1092        key: *const ::std::os::raw::c_char,
1093        length: *mut usize,
1094    ) -> *const u8;
1095}
1096extern "C" {
1097    #[doc = " Returns string data for parameter <key> at layer index <layer> for string\n array element <index>.  This is a convenience wrapper around acquiring the\n parameter followed by acquiring the data.\n\n @ref nn_model_parameter_data_str()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
1098    pub fn nn_model_layer_parameter_data_str(
1099        model: *const NNModel,
1100        layer: usize,
1101        key: *const ::std::os::raw::c_char,
1102        index: usize,
1103    ) -> *const ::std::os::raw::c_char;
1104}
1105extern "C" {
1106    #[doc = " Returns number of string elements in the data_str array for the specified\n layer and parameter key.  This is a convenience wrapper around acquiring the\n parameter followed by acquiring the data.\n\n @ref nn_model_parameter_data_str_len()\n\n Returns number of string elements in the array.\n\n @public @memberof NNModel\n @since 2.4"]
1107    pub fn nn_model_layer_parameter_data_str_len(
1108        model: *const NNModel,
1109        layer: usize,
1110        key: *const ::std::os::raw::c_char,
1111    ) -> usize;
1112}
1113extern "C" {
1114    #[doc = " Returns the memory size of the given model object.\n\n @public @memberof NNModel\n @since 2.0"]
1115    pub fn nn_model_memory_size(model: *const NNModel) -> usize;
1116}
1117extern "C" {
1118    #[doc = " Returns the minimum cache size of a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
1119    pub fn nn_model_cache_minimum_size(model: *const NNModel) -> usize;
1120}
1121extern "C" {
1122    #[doc = " Returns the optimum cache size of a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
1123    pub fn nn_model_cache_optimum_size(model: *const NNModel) -> usize;
1124}
1125extern "C" {
1126    #[doc = " The number of resources defined in the model.\n\n @param model pointer to the RTM model\n\n @returns number of resources defined in the model.\n\n @public @memberof NNModel\n @since 2.4"]
1127    pub fn nn_model_resource_count(model: *const NNModel) -> usize;
1128}
1129extern "C" {
1130    #[doc = " Retrieves a reference to the resource at the provided index.\n\n @param model pointer to the RTM model\n @param index resource index\n\n @returns an @ref NNModelResource pointer for the provided @p index in the\n given model.\n @returns NULL if either the model or index are invalid.\n\n @public @memberof NNModel\n @since 2.4"]
1131    pub fn nn_model_resource_at(model: *const NNModel, index: usize) -> *const NNModelResource;
1132}
1133extern "C" {
1134    #[doc = " Retrieves a reference to the resource with the given name.\n\n @param model pointer to the RTM model\n @param name the unique name of the resource\n\n @returns an @ref NNModelResource pointer for the provided unique @p name.\n @returns NULL if either the @p model or @p name are invalid, NULL, or the\n  @p name is not found.\n\n @public @memberof NNModel\n @since 2.4"]
1135    pub fn nn_model_resource(
1136        model: *const NNModel,
1137        name: *const ::std::os::raw::c_char,
1138    ) -> *const NNModelResource;
1139}
1140extern "C" {
1141    #[doc = " Returns the shape of the parameter data or NULL if no shape was defined.  If\n n_dims is non-NULL the number of dimensions will be stored there.  The shape\n attribute is not required for parameters but can be used either on its own\n or as part of defining layout of data attributes.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1142    pub fn nn_model_parameter_shape(
1143        parameter: *const NNModelParameter,
1144        n_dims: *mut usize,
1145    ) -> *const i32;
1146}
1147extern "C" {
1148    #[doc = " Returns parameter float data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1149    pub fn nn_model_parameter_data_f32(
1150        parameter: *const NNModelParameter,
1151        length: *mut usize,
1152    ) -> *const f32;
1153}
1154extern "C" {
1155    #[doc = " Returns parameter int32_t data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1156    pub fn nn_model_parameter_data_i32(
1157        parameter: *const NNModelParameter,
1158        length: *mut usize,
1159    ) -> *const i32;
1160}
1161extern "C" {
1162    #[doc = " Returns parameter int16_t data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1163    pub fn nn_model_parameter_data_i16(
1164        parameter: *const NNModelParameter,
1165        length: *mut usize,
1166    ) -> *const i16;
1167}
1168extern "C" {
1169    #[doc = " Returns parameter int8_t data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1170    pub fn nn_model_parameter_data_i8(
1171        parameter: *const NNModelParameter,
1172        length: *mut usize,
1173    ) -> *const i8;
1174}
1175extern "C" {
1176    #[doc = " Returns parameter raw data pointer, length of the array is optionally stored\n into the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1177    pub fn nn_model_parameter_data_raw(
1178        parameter: *const NNModelParameter,
1179        length: *mut usize,
1180    ) -> *const u8;
1181}
1182extern "C" {
1183    #[doc = " Returns parameter string data at desired index.  This data handler is\n different from the others which return the array as strings are themselves\n arrays and need special handling. Refer to @ref\n nn_model_parameter_data_str_len() to query the size of the data_str array,\n which refers to the number of strings in this parameter.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1184    pub fn nn_model_parameter_data_str(
1185        parameter: *const NNModelParameter,
1186        index: usize,
1187    ) -> *const ::std::os::raw::c_char;
1188}
1189extern "C" {
1190    #[doc = " Returns the number of strings in the parameter's data_str attribute.\n\n @public @memberof NNModelParameter\n @since 2.4"]
1191    pub fn nn_model_parameter_data_str_len(parameter: *const NNModelParameter) -> usize;
1192}
1193extern "C" {
1194    #[doc = " The unique name of the resource as can be used to retrieve the resource using\n @ref nn_model_resource().\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n\n @returns A string with the name of the resource.\n @returns NULL if the resource or name is NULL.\n\n @public @memberof NNModelResource\n @since 2.4"]
1195    pub fn nn_model_resource_name(
1196        resource: *const NNModelResource,
1197    ) -> *const ::std::os::raw::c_char;
1198}
1199extern "C" {
1200    #[doc = " Returns the meta string for the resource.\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n\n @returns A string with the meta parameter of the resource.\n @returns NULL if the resource or meta are NULL.\n\n @public @memberof NNModelResource\n @since 2.4"]
1201    pub fn nn_model_resource_meta(
1202        resource: *const NNModelResource,
1203    ) -> *const ::std::os::raw::c_char;
1204}
1205extern "C" {
1206    #[doc = " Returns the mime type string for the resource.\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n\n @returns A string with the mime parameter of the resource.\n @returns NULL if the resource or mime are NULL.\n\n @public @memberof NNModelResource\n @since 2.4"]
1207    pub fn nn_model_resource_mime(
1208        resource: *const NNModelResource,
1209    ) -> *const ::std::os::raw::c_char;
1210}
1211extern "C" {
1212    #[doc = " Returns the raw binary data for the resource, the size of the data will be\n saved in @p data_size if non-NULL.\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n @param data_size optional pointer to a size_t to receive the length in bytes\n of the data, if provided.\n\n @returns pointer to the start of the data stream of length @p data_size.\n @returns NULL if resource has no data associated.\n\n @public @memberof NNModelResource\n @since 2.4"]
1213    pub fn nn_model_resource_data(
1214        resource: *const NNModelResource,
1215        data_size: *mut usize,
1216    ) -> *const u8;
1217}
1218extern "C" {
1219    #[doc = " Returns the actual size of the context structure.  This size will be smaller\n than @ref NN_CONTEXT_SIZEOF which contains additional padding for future\n extension.  Since @ref nn_context_sizeof() is called dynamically at runtime\n it can return the true and unpadded size.\n\n @public @memberof NNContext\n @since 2.0"]
1220    pub fn nn_context_sizeof() -> usize;
1221}
1222extern "C" {
1223    #[doc = " Initializes an NNContext and allocates required memories.  If any of the\n pointers are NULL malloc will be called automatically to create the memory\n using the provided sizes.  For memory_size and cache_size if these are 0\n then they will not be initialized.\n\n @public @memberof NNContext\n @since 2.0"]
1224    pub fn nn_context_init(
1225        engine: *mut NNEngine,
1226        memory_size: usize,
1227        memory: *mut ::std::os::raw::c_void,
1228        cache_size: usize,
1229        cache: *mut ::std::os::raw::c_void,
1230    ) -> *mut NNContext;
1231}
1232extern "C" {
1233    #[doc = " Initializes an NNContext into the provided memory which *MUST* be at least\n NN_CONTEXT_SIZEOF bytes.  If any of the pointers are NULL malloc will be\n called automatically to create the memory using the provided sizes.  For\n memory_size and cache_size if these are 0 then they will not be initialized.\n\n @public @memberof NNContext\n @since 2.0"]
1234    pub fn nn_context_init_ex(
1235        context_memory: *mut ::std::os::raw::c_void,
1236        engine: *mut NNEngine,
1237        memory_size: usize,
1238        memory: *mut ::std::os::raw::c_void,
1239        cache_size: usize,
1240        cache: *mut ::std::os::raw::c_void,
1241    ) -> *mut NNContext;
1242}
1243extern "C" {
1244    #[doc = " Release the memory being used by the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
1245    pub fn nn_context_release(context: *mut NNContext);
1246}
1247#[doc = " Callback function for custom user ops.\n\n @public @memberof NNContext\n @since 2.4"]
1248pub type nn_user_ops = ::std::option::Option<
1249    unsafe extern "C" fn(
1250        context: *mut NNContext,
1251        opname: *const ::std::os::raw::c_char,
1252        index: usize,
1253    ) -> NNError,
1254>;
1255extern "C" {
1256    #[doc = " @public @memberof NNContext\n @since 2.4"]
1257    pub fn nn_context_user_ops_register(context: *mut NNContext, callback: nn_user_ops) -> NNError;
1258}
1259extern "C" {
1260    #[doc = " @public @memberof NNContext\n @since 2.4"]
1261    pub fn nn_context_user_ops(context: *mut NNContext) -> nn_user_ops;
1262}
1263extern "C" {
1264    #[doc = " @public @memberof NNContext\n @since 2.2"]
1265    pub fn nn_context_cache(context: *mut NNContext) -> *mut NNTensor;
1266}
1267extern "C" {
1268    #[doc = " @public @memberof NNContext\n @since 2.2"]
1269    pub fn nn_context_mempool(context: *mut NNContext) -> *mut NNTensor;
1270}
1271extern "C" {
1272    #[doc = " Returns the engine used by the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
1273    pub fn nn_context_engine(context: *mut NNContext) -> *mut NNEngine;
1274}
1275extern "C" {
1276    #[doc = " Returns the currently loaded model blob for the context.\n\n @public @memberof NNContext\n @since 2.0"]
1277    pub fn nn_context_model(context: *mut NNContext) -> *const NNModel;
1278}
1279extern "C" {
1280    #[doc = " Loads the model provided by the input into the context.\n\n @context pointer to the context object\n @memory pointer to the memory that contains the model\n @memory_size the size of the memory that is used by the model\n\n @public @memberof NNContext\n @since 2.0"]
1281    pub fn nn_context_model_load(
1282        context: *mut NNContext,
1283        memory_size: usize,
1284        memory: *const ::std::os::raw::c_void,
1285    ) -> NNError;
1286}
1287extern "C" {
1288    #[doc = " Frees the memory used by the model within the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
1289    pub fn nn_context_model_unload(context: *mut NNContext);
1290}
1291extern "C" {
1292    #[doc = " Returns the tensor with the given name within the model provided by the given\n context object.\n\n @public @memberof NNContext\n @since 2.0"]
1293    pub fn nn_context_tensor(
1294        context: *mut NNContext,
1295        name: *const ::std::os::raw::c_char,
1296    ) -> *mut NNTensor;
1297}
1298extern "C" {
1299    #[doc = " Returns the tensor at the given index with the model provided by the given\n context object.\n\n @public @memberof NNContext\n @since 2.0"]
1300    pub fn nn_context_tensor_index(context: *mut NNContext, index: usize) -> *mut NNTensor;
1301}
1302extern "C" {
1303    #[doc = " Runs the model within the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
1304    pub fn nn_context_run(context: *mut NNContext) -> NNError;
1305}
1306extern "C" {
1307    #[doc = " Runs layer with index from model within the given context object.\n If index is invalid NN_ERROR_INVALID_LAYER is returned, this can be\n used to determine when at the end of the model.\n\n @public @memberof NNContext\n @since 2.3"]
1308    pub fn nn_context_step(context: *mut NNContext, index: usize) -> NNError;
1309}
1310extern "C" {
1311    #[doc = " Exposes the free() function\n"]
1312    pub fn nn_free(ptr: *mut ::std::os::raw::c_void);
1313}
1314extern "C" {
1315    #[doc = " Exposes the malloc() function\n"]
1316    pub fn nn_malloc(size: usize) -> *mut ::std::os::raw::c_void;
1317}