1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
/* automatically generated by rust-bindgen 0.64.0 */

pub type __off_t = ::std::os::raw::c_long;
pub type __off64_t = ::std::os::raw::c_long;
pub type FILE = _IO_FILE;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_marker {
    _unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_codecvt {
    _unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_wide_data {
    _unused: [u8; 0],
}
pub type _IO_lock_t = ::std::os::raw::c_void;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_FILE {
    pub _flags: ::std::os::raw::c_int,
    pub _IO_read_ptr: *mut ::std::os::raw::c_char,
    pub _IO_read_end: *mut ::std::os::raw::c_char,
    pub _IO_read_base: *mut ::std::os::raw::c_char,
    pub _IO_write_base: *mut ::std::os::raw::c_char,
    pub _IO_write_ptr: *mut ::std::os::raw::c_char,
    pub _IO_write_end: *mut ::std::os::raw::c_char,
    pub _IO_buf_base: *mut ::std::os::raw::c_char,
    pub _IO_buf_end: *mut ::std::os::raw::c_char,
    pub _IO_save_base: *mut ::std::os::raw::c_char,
    pub _IO_backup_base: *mut ::std::os::raw::c_char,
    pub _IO_save_end: *mut ::std::os::raw::c_char,
    pub _markers: *mut _IO_marker,
    pub _chain: *mut _IO_FILE,
    pub _fileno: ::std::os::raw::c_int,
    pub _flags2: ::std::os::raw::c_int,
    pub _old_offset: __off_t,
    pub _cur_column: ::std::os::raw::c_ushort,
    pub _vtable_offset: ::std::os::raw::c_schar,
    pub _shortbuf: [::std::os::raw::c_char; 1usize],
    pub _lock: *mut _IO_lock_t,
    pub _offset: __off64_t,
    pub _codecvt: *mut _IO_codecvt,
    pub _wide_data: *mut _IO_wide_data,
    pub _freeres_list: *mut _IO_FILE,
    pub _freeres_buf: *mut ::std::os::raw::c_void,
    pub __pad5: usize,
    pub _mode: ::std::os::raw::c_int,
    pub _unused2: [::std::os::raw::c_char; 20usize],
}
#[test]
fn bindgen_test_layout__IO_FILE() {
    const UNINIT: ::std::mem::MaybeUninit<_IO_FILE> = ::std::mem::MaybeUninit::uninit();
    let ptr = UNINIT.as_ptr();
    assert_eq!(
        ::std::mem::size_of::<_IO_FILE>(),
        216usize,
        concat!("Size of: ", stringify!(_IO_FILE))
    );
    assert_eq!(
        ::std::mem::align_of::<_IO_FILE>(),
        8usize,
        concat!("Alignment of ", stringify!(_IO_FILE))
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._flags) as usize - ptr as usize },
        0usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_flags)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_ptr) as usize - ptr as usize },
        8usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_read_ptr)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_end) as usize - ptr as usize },
        16usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_read_end)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_base) as usize - ptr as usize },
        24usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_read_base)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_base) as usize - ptr as usize },
        32usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_write_base)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_ptr) as usize - ptr as usize },
        40usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_write_ptr)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_end) as usize - ptr as usize },
        48usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_write_end)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_buf_base) as usize - ptr as usize },
        56usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_buf_base)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_buf_end) as usize - ptr as usize },
        64usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_buf_end)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_save_base) as usize - ptr as usize },
        72usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_save_base)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_backup_base) as usize - ptr as usize },
        80usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_backup_base)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._IO_save_end) as usize - ptr as usize },
        88usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_IO_save_end)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._markers) as usize - ptr as usize },
        96usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_markers)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._chain) as usize - ptr as usize },
        104usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_chain)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._fileno) as usize - ptr as usize },
        112usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_fileno)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._flags2) as usize - ptr as usize },
        116usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_flags2)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._old_offset) as usize - ptr as usize },
        120usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_old_offset)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._cur_column) as usize - ptr as usize },
        128usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_cur_column)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._vtable_offset) as usize - ptr as usize },
        130usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_vtable_offset)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._shortbuf) as usize - ptr as usize },
        131usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_shortbuf)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._lock) as usize - ptr as usize },
        136usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_lock)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._offset) as usize - ptr as usize },
        144usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_offset)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._codecvt) as usize - ptr as usize },
        152usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_codecvt)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._wide_data) as usize - ptr as usize },
        160usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_wide_data)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._freeres_list) as usize - ptr as usize },
        168usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_freeres_list)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._freeres_buf) as usize - ptr as usize },
        176usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_freeres_buf)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr).__pad5) as usize - ptr as usize },
        184usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(__pad5)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._mode) as usize - ptr as usize },
        192usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_mode)
        )
    );
    assert_eq!(
        unsafe { ::std::ptr::addr_of!((*ptr)._unused2) as usize - ptr as usize },
        196usize,
        concat!(
            "Offset of field: ",
            stringify!(_IO_FILE),
            "::",
            stringify!(_unused2)
        )
    );
}
#[doc = " Successfull operation, no error."]
pub const NNError_NN_SUCCESS: NNError = 0;
#[doc = " Internal error without a specific error code, catch-all error."]
pub const NNError_NN_ERROR_INTERNAL: NNError = 1;
#[doc = " The provided handle is invalid.  This error is typically used by NNEngine\n when interfacing with another API such as OpenCL or OpenVX which require\n native handles for their internal API."]
pub const NNError_NN_ERROR_INVALID_HANDLE: NNError = 2;
#[doc = " Out of memory error, returned if a call to malloc returns NULL or similar\n error from an underlying engine plugin."]
pub const NNError_NN_ERROR_OUT_OF_MEMORY: NNError = 3;
#[doc = " Out of resources errors are similar to out of memory though sometimes\n treated separately by underlying engine plugins."]
pub const NNError_NN_ERROR_OUT_OF_RESOURCES: NNError = 4;
#[doc = " Signals an API has not been implemented.  Can be caught by the core\n DeepViewRT library when interfacing with engine plugins to gracefully\n fallback to the native implementation."]
pub const NNError_NN_ERROR_NOT_IMPLEMENTED: NNError = 5;
#[doc = " A required parameter was missing or NULL or simply invalid."]
pub const NNError_NN_ERROR_INVALID_PARAMETER: NNError = 6;
#[doc = " When attempting to run an operation where the input/output tensors are\n of different types and the operation does not support automatic type\n conversions."]
pub const NNError_NN_ERROR_TYPE_MISMATCH: NNError = 7;
#[doc = " When attempting to run an operation and the input/output tensors have\n invalid or unsupported shape combinations.  Some operations require the\n shapes to be the same while others, such as arithmetic broadcasting\n operations, will support various shape combinations but if the provided\n pairs are invalid then the shape mismatch is returned."]
pub const NNError_NN_ERROR_SHAPE_MISMATCH: NNError = 8;
#[doc = " The tensor's shape is invalid for the given operation.  It differs from\n the shape mismatch in that the shape is invalid on its own and not\n relative to another related tensor.  An example would be a shape with\n more than one -1 dimension."]
pub const NNError_NN_ERROR_INVALID_SHAPE: NNError = 9;
#[doc = " The requested ordering was invalid."]
pub const NNError_NN_ERROR_INVALID_ORDER: NNError = 10;
#[doc = " The requested axis for an operation was invalid or unsupported."]
pub const NNError_NN_ERROR_INVALID_AXIS: NNError = 11;
#[doc = " A required resource was missing or the reference invalid."]
pub const NNError_NN_ERROR_MISSING_RESOURCE: NNError = 12;
#[doc = " The requested engine is invalid."]
pub const NNError_NN_ERROR_INVALID_ENGINE: NNError = 13;
#[doc = " The tensor has no data or the data is not currently accessible.  An\n example of the latter would be attempting to call @ref nn_tensor_maprw\n while the tensor was already mapped read-only or write-only."]
pub const NNError_NN_ERROR_TENSOR_NO_DATA: NNError = 14;
#[doc = " The internal kernel or subroutine required to complete an operation using\n the engine plugin was missing.  An example would be OpenCL or OpenVX\n operation where the kernel implementation cannot be located."]
pub const NNError_NN_ERROR_KERNEL_MISSING: NNError = 15;
#[doc = " The operation does not support the tensor's type."]
pub const NNError_NN_ERROR_TENSOR_TYPE_UNSUPPORTED: NNError = 16;
#[doc = " For operations which can operate on an array of inputs, the provided list\n of inputs was too large."]
pub const NNError_NN_ERROR_TOO_MANY_INPUTS: NNError = 17;
#[doc = " A system error occured when interfacing with an operating system\n function.  On some systems errno might be updated with the underlying\n error code."]
pub const NNError_NN_ERROR_SYSTEM_ERROR: NNError = 18;
#[doc = " When working with a model a reference was made to a layer which did not\n exist."]
pub const NNError_NN_ERROR_INVALID_LAYER: NNError = 19;
#[doc = " The model is invalid or corrupted."]
pub const NNError_NN_ERROR_MODEL_INVALID: NNError = 20;
#[doc = " An operation referenced a model but the model was not provided."]
pub const NNError_NN_ERROR_MODEL_MISSING: NNError = 21;
#[doc = " The string was too large."]
pub const NNError_NN_ERROR_STRING_TOO_LARGE: NNError = 22;
#[doc = " The quantization parameters are invalid."]
pub const NNError_NN_ERROR_INVALID_QUANT: NNError = 23;
#[doc = " Failed to generate graph representation of model."]
pub const NNError_NN_ERROR_MODEL_GRAPH_FAILED: NNError = 24;
#[doc = " Failed to verify graph generateed from model."]
pub const NNError_NN_ERROR_GRAPH_VERIFY_FAILED: NNError = 25;
#[doc = " Enumeration of all errors provided by DeepViewRT.  Most functions will\n return an NNError with NN_SUCCESS being zero. A common usage pattern for\n client code is to check for err using `if (err) ...` as any error condition\n will return non-zero."]
pub type NNError = ::std::os::raw::c_uint;
#[doc = " Raw byte-stream tensor, useful for encoded tensors such as PNG images.\n The size of this tensor would be in bytes."]
pub const NNTensorType_NNTensorType_RAW: NNTensorType = 0;
#[doc = " String tensor data, a single dimension would hold one null-terminated\n string of variable length.  A standard C char* array."]
pub const NNTensorType_NNTensorType_STR: NNTensorType = 1;
#[doc = " Signed 8-bit integer tensor data internally @ref int8_t"]
pub const NNTensorType_NNTensorType_I8: NNTensorType = 2;
#[doc = " Unsigned 8-bit integer tensor data internally @ref uint8_t"]
pub const NNTensorType_NNTensorType_U8: NNTensorType = 3;
#[doc = " Signed 16-bit integer tensor data internally @ref int16_t"]
pub const NNTensorType_NNTensorType_I16: NNTensorType = 4;
#[doc = " Unsigned 16-bit integer tensor data internally @ref uint16_t"]
pub const NNTensorType_NNTensorType_U16: NNTensorType = 5;
#[doc = " Signed 16-bit integer tensor data internally @ref int32_t"]
pub const NNTensorType_NNTensorType_I32: NNTensorType = 6;
#[doc = " Unsigned 16-bit integer tensor data internally @ref uint32_t"]
pub const NNTensorType_NNTensorType_U32: NNTensorType = 7;
#[doc = " Signed 16-bit integer tensor data internally @ref int64_t"]
pub const NNTensorType_NNTensorType_I64: NNTensorType = 8;
#[doc = " Unsigned 16-bit integer tensor data internally @ref uint64_t"]
pub const NNTensorType_NNTensorType_U64: NNTensorType = 9;
#[doc = " Half precision (16-bit) floating point tensor data."]
pub const NNTensorType_NNTensorType_F16: NNTensorType = 10;
#[doc = " Single precision (32-bit) floating point tensor data."]
pub const NNTensorType_NNTensorType_F32: NNTensorType = 11;
#[doc = " Double precision (64-bit) floating point tensor data."]
pub const NNTensorType_NNTensorType_F64: NNTensorType = 12;
#[doc = " @enum NNTensorType\n Enumeration of the data types supported by NNTensors in DeepViewRT."]
pub type NNTensorType = ::std::os::raw::c_uint;
#[doc = " No quantization for tensor."]
pub const NNQuantizationType_NNQuantizationType_None: NNQuantizationType = 0;
#[doc = " Affine quantization with parameters applied globally across the tensor.\n\n The scale term is queried from @ref nn_tensor_scales() while the zero\n term is queried from @ref nn_tensor_zeros().\n\n Quantization: \\f$ f(x) = \\frac{x}{scale} + zero \\f$\n\n Dequantization: \\f$ f(x) = (x - zero) * scale \\f$"]
pub const NNQuantizationType_NNQuantizationType_Affine_PerTensor: NNQuantizationType = 1;
#[doc = " Affine quantization with separate parameters applied to each channel.\n Also known as per-axis where the axis is always the channel \"C\" axis in\n a NCHW, NHWC, and so-on shaped tensor.\n\n Same equation as @ref NNQuantization_Affine_PerTensor but applied\n per-channel.  The scale and zero_point are vectors of channel length."]
pub const NNQuantizationType_NNQuantizationType_Affine_PerChannel: NNQuantizationType = 2;
#[doc = " Quantized using Dynamic Fixed Point."]
pub const NNQuantizationType_NNQuantizationType_DFP: NNQuantizationType = 3;
#[doc = " Enumeration of all quantization type provided by DeepViewRT."]
pub type NNQuantizationType = ::std::os::raw::c_uint;
#[doc = " DeepViewRT library initialization options."]
pub type NNOptions = isize;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct nn_engine {
    _unused: [u8; 0],
}
#[doc = " @struct NNEngine\n\n Engine structure provides the means to implement custom tensor and kernel\n implementations which implement the DeepViewRT inference backend. As an\n example the OpenCL backend is provided as a plugin which exposes an NNEngine\n which maps NNTensors to cl_mem objects and kernels as OpenCL kernels."]
pub type NNEngine = nn_engine;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct nn_tensor {
    _unused: [u8; 0],
}
#[doc = " @struct NNTensor\n\n Tensors are represented by the @ref NNTensor class.  The dimensions are\n variable and can be from 1 to 4 dimensions.  Internally the shape of a\n 1-dimensional tensor would be [N 1 1 1] and a scalar [1 1 1 1].\n\n Tensors can exist locally on the CPU or when initialized using an\n @ref NNEngine object the tensors can be mapped to a buffer on a compute\n device such as a GPU or NPU using the DeepViewRT OpenCL or OpenVX engine\n plugins."]
pub type NNTensor = nn_tensor;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct nn_quant_param {
    _unused: [u8; 0],
}
#[doc = " @struct NNQuantParam\n\n Tensor quantization parameter structure."]
pub type NNQuantParam = nn_quant_param;
#[doc = " @struct NNModel\n\n DeepViewRT Models \"RTM\" are reprensted in memory through the NNModel type\n which is meant to point to a static model blob.  This can point directly to\n the memory of the RTM either loaded into memory, accessed through a memmap\n or pointed directly to the flash location.  In other words if the RTM is\n saved into flash which is connected to the memory space then the model does\n not need to be copied into RAM before being loaded.\n\n Models are loaded into an @ref NNContext which handles the dynamic data\n structures required for operation of the model."]
pub type NNModel = ::std::os::raw::c_void;
#[doc = " @struct NNModelResource\n\n DeepViewRT Models may have resources embedded into them and this datatype is\n their handle."]
pub type NNModelResource = ::std::os::raw::c_void;
#[doc = " @struct NNModelParameter\n\n DeepViewRT Models use parameters to store various configuration information\n such as layer parameters."]
pub type NNModelParameter = ::std::os::raw::c_void;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct nn_context {
    _unused: [u8; 0],
}
#[doc = " @struct NNContext\n\n DeepViewRT models can be loaded with an NNContext and numerous contexts can\n be loaded at once.  The context manages the runtime portion of the model\n including the tensors required to hold intermediate buffers.\n\n A context itself requires @ref NN_CONTEXT_SIZEOF bytes though it will also\n allocate on the heap additional tensor handles required to support models on\n @ref nn_context_model_load() and these will then be released on a call to\n @ref nn_context_model_unload().\n\n When a context is created an @ref NNEngine plugin may optionally be provided\n which will take over the management of tensors through the engine plugin and\n attempting to run models and operators on the compute device enabled by this\n engine plugin.  If an engine is not provided DeepViewRT will use the default\n implementation which is optimized for CPU and MCU devices."]
pub type NNContext = nn_context;
extern "C" {
    #[doc = " DeepViewRT library version as \"MAJOR.MINOR.PATCH\".\n\n @return library version string\n\n @since 2.0"]
    pub fn nn_version() -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the string associated with a given error.\n\n @see NNError\n\n @param error The NNError to be represented as a string.\n\n @return The string representation when the error is valid.\n @return NULL when the error is not valid.\n\n @since 2.0"]
    pub fn nn_strerror(error: NNError) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Initializes the library with optional parameters.  This function _MUST_ be\n called before any others (though nn_version and nn_strerror are safe) and\n _MUST_ not be called again unless care is taken to protect this call.\n\n @note As of DeepViewRT 2.4.32 this function does not do anything except on\n RaspberryPi platforms.  This could change in the future so it is safer to\n call the function for future compatibility.\n\n @return NN_SUCCESS after successfully initializing the library.\n @return NN_ERROR_INTERNAL if the library fails to initialize.\n\n @since 2.4"]
    pub fn nn_init(options: *const NNOptions) -> NNError;
}
extern "C" {
    #[doc = " The actual size of the NNEngine structure.  This will differ from the size\n defined by @ref NN_ENGINE_SIZEOF as the later is padded for future API\n extensions while this function returns the actual size currently required.\n\n @return NNEngine structure size as reported by @ref sizeof().\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_sizeof() -> usize;
}
extern "C" {
    #[doc = " Initializes the NNEngine structure using the provided memory or allocating a\n new buffer is none was provided.\n\n When providing memory it must be at least the size returned by\n @ref nn_engine_sizeof() and for statically initiallized arrays the\n @ref NN_ENGINE_SIZEOF can be used instead which is padded for future API\n extensions.\n\n @note previous to version 2.4.32 the memory parameter is required otherwise\n NULL will always be returned and no engine structure is created.\n\n @param memory Pointer to the start of where a NNEngine object should be\n initialized.\n\n @return Pointer to the initialized NNEngine structure.\n @return NULL if memory was NULL and malloc using @ref nn_engine_size()\n  returns NULL.\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_init(memory: *mut ::std::os::raw::c_void) -> *mut NNEngine;
}
extern "C" {
    #[doc = " Returns handle of the NNEngine object.\n\n @param memory Pointer to the NNEngine structure\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_native_handle(engine: *mut NNEngine) -> *mut ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Releases the memory that was being used by the engine.\n\n @param engine Pointer to the engine object.\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_release(engine: *mut NNEngine);
}
extern "C" {
    #[doc = " Loads the plugin to provided engine object.  The plugin should point to an\n engine plugin library either as an absolute or relative path or be found in\n the standard OS search path for shared libraries.\n\n @param engine Pointer to the engine object.\n @param plugin String of the absolute or relative path to the plugin.\n\n @return NN_ERROR_INVALID ENGINE given the engine pointer is NULL or\n the plugin does not have the necessary functions.\n @return NN_ERROR_MISSING_RESOURCE given the plugin dll cannot be found.\n @return The error returned by the plugin's init function given a valid engine\n and dll.\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_load(engine: *mut NNEngine, plugin: *const ::std::os::raw::c_char) -> NNError;
}
extern "C" {
    #[doc = " Unloads the plugin from the given engine object.\n\n @param engine Pointer to the engine object.\n\n @return NN_ERROR_INVALID_ENGINE given the engine pointer is NULL.\n @return NN_ERROR_INTERNAL if the plugin dll could not be closed properly.\n @return The NNError from the plugin's cleanup function.\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_unload(engine: *mut NNEngine);
}
extern "C" {
    #[doc = " Returns the name of the engine object.\n\n @param engine Pointer to the engine object.\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_name(engine: *mut NNEngine) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the version of the engine object.\n\n @param engine Pointer to the engine object.\n\n @public @memberof NNEngine\n @since 2.0"]
    pub fn nn_engine_version(engine: *mut NNEngine) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the size of the tensor object for preparing memory allocations.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_sizeof() -> usize;
}
extern "C" {
    #[doc = " Initializes the tensor using provided memory.  The memory MUST be at least\n the size returned by @ref nn_tensor_sizeof().  This size does not include\n the actual tensor data which is allocated separately, either by requesting\n the implementation to allocate the buffer or attaching to externally\n allocated memory.\n\n The tensor created by this function has no data associated to it and is of\n rank-0.\n\n @param memory The pointer to be initialized to a NNTensor object.\n @param engine Pointer to the engine object.\n\n @return NULL given the memory pointer is a null pointer.\n @return Pointer to the newly created NNTensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_init(
        memory: *mut ::std::os::raw::c_void,
        engine: *mut NNEngine,
    ) -> *mut NNTensor;
}
extern "C" {
    #[doc = " Releases the memory used by the tensor object.\n\n @param tensor Pointer to the tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_release(tensor: *mut NNTensor);
}
extern "C" {
    #[doc = " Returns the engine owning this tensor, could be NULL.\n\n @param tensor Pointer to the tensor object.\n\n @return Pointer to the engine object to which the tensor is associated.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_engine(tensor: *mut NNTensor) -> *mut NNEngine;
}
extern "C" {
    #[doc = " Returns the native handle of the tensor object.  This is an internal API for\n access internal structures.\n\n @param tensor Pointer to the tensor object.\n\n @private @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_native_handle(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Sets the tensor objects native handle to the one provided.\n\n @param tensor Pointer to the tensor object.\n @param handle Pointer to the handle object.\n\n @private @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_set_native_handle(tensor: *mut NNTensor, handle: *mut ::std::os::raw::c_void);
}
#[doc = " Callback function to free an auxiliary object, called from nn_tensor_release.\n\n @private @memberof NNTensor\n @since 2.1"]
pub type nn_aux_object_free = ::std::option::Option<unsafe extern "C" fn(tensor: *mut NNTensor)>;
extern "C" {
    #[doc = " Configures an auxiliary object for the tensor.  This is a private API used\n for attaching auxiliary buffers.\n\n @private @memberof NNTensor\n @since 2.1"]
    pub fn nn_tensor_set_aux_object(
        tensor: *mut NNTensor,
        aux_object: *mut ::std::os::raw::c_void,
        aux_object_free: nn_aux_object_free,
    );
}
extern "C" {
    #[doc = " Returns the auxiliary object for the tensor, or NULL if none is attached.\n\n @private @memberof NNTensor\n @since 2.1"]
    pub fn nn_tensor_aux_object(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Returns the auxiliary object's free function, or NULL if none is attached.\n\n @private @memberof NNTensor\n @since 2.3"]
    pub fn nn_tensor_aux_free(tensor: *mut NNTensor) -> nn_aux_object_free;
}
extern "C" {
    #[doc = " Extended version of the auxiliary object API which allows additional objects\n to be attached to the tensor using name-based indexing.\n\n @private @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_set_aux_object_by_name(
        tensor: *mut NNTensor,
        name: *const ::std::os::raw::c_char,
        aux_object: *mut ::std::os::raw::c_void,
        aux_object_free: nn_aux_object_free,
        buffer_ownership: bool,
        name_ownership: bool,
    );
}
extern "C" {
    #[doc = " Acquire the auxiliary object associated with the given name parameter.\n\n @private @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_aux_object_by_name(
        tensor: *mut NNTensor,
        name: *const ::std::os::raw::c_char,
    ) -> *mut ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Frees the auxiliary object associated with the given name parameter.\n\n @private @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_aux_free_by_name(
        tensor: *mut NNTensor,
        name: *const ::std::os::raw::c_char,
    ) -> nn_aux_object_free;
}
extern "C" {
    #[doc = " Retrieves the panel size of the tensor when it has been panel-shuffled for\n improved tiling performance.  The panel size is the vectorization length.\n\n @private @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_panel_size(tensor: *mut NNTensor) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Sets the panel size of the tensor.  This is primarily an internal API used\n to store the vectorization length when shuffling tensors into an optimized\n tile format.\n\n @private @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_set_panel_size(tensor: *mut NNTensor, panel_size: ::std::os::raw::c_int);
}
extern "C" {
    #[doc = " Synchronize the tensor and all preceeding events in the chain.\n\n This is used for engines which may not immediately evaluate tensor operations\n but instead pass events around, this call will synchronize the event chain\n leading to this tensor.\n\n @param tensor Pointer to the tensor object.\n\n @return NN_SUCCESS if the sync was successful or ignored by engines which do\n not implement this API.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_sync(tensor: *mut NNTensor) -> NNError;
}
extern "C" {
    #[doc = " Returns the time information stored in the tensor.  The time is returned\n in nanoseconds of the duration of the last operation the wrote into this\n tensor.  causes a nn_tensor_sync on the target tensor.\n\n This is used for measuring the time an operation takes by capturing the time\n the operation took into the destination tensor of the operation.  The time\n is not the time it takes to write to the tensor, this is captured by the\n @ref nn_tensor_io_time() function, but the time it took the operation to\n complete (not including map/unmap times).\n\n @param tensor Pointer to the tensor object.\n\n @return Nanoseconds of processing time for the last operation which wrote\n into this tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_time(tensor: *mut NNTensor) -> i64;
}
extern "C" {
    #[doc = " Returns the I/O time information stored in the tensor.  The time is returned\n in nanoseconds of the duration of the last map/unmap pair.  When tensors are\n mapped to the CPU (no accelerator engine is loaded) then times are expected\n to be zero time as no mapping is actually required and the internal pointer\n is simply returned.  When an accelerator engine is used, such as OpenVX,\n then the io_time measures the time the map/unmap or copy operations took to\n complete.\n\n @param tensor Pointer to the tensor object.\n\n @return Nanoseconds of time spent in the map/unmap calls.\n\n @public @memberof NNTensor\n @since 2.1"]
    pub fn nn_tensor_io_time(tensor: *mut NNTensor) -> i64;
}
extern "C" {
    #[doc = " Writes the  tensor inforamtion to the FILE stream provided.  The format is\n \"[D0 D1 D2 D3]\" where D0..D3 are the dimensions provided.  If the data\n parameter is true the format will be followed by \": ...\" where ... is the\n string representation of the tensor's data.\n\n @warning Before version 2.4.32 this function always assumes float32 tensors\n and will therefore lead to segmentation faults when used with integer\n tensors.\n\n @param out Pointer to the FILE stream where the tensor will be written to.\n @param tensor Pointer to the tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_printf(tensor: *mut NNTensor, data: bool, out: *mut FILE);
}
extern "C" {
    #[doc = " Assigns the tensor parameters and optionally data pointer.  The default\n implementation uses the data buffer as the internal storage for tensor data\n and it MUST outlive the tensor.  For engine plugins they may choose how to\n use the data but for the OpenCL example if data is provided it will be copied\n into the OpenCL buffer then otherwise never used again.  If NULL is provided\n for data the OpenCL engine would create the memory and leave it unassigned.\n\n If using the default implementation and leaving data NULL then all operations\n which require data will fail.  The most dynamic tensor setup with optional\n data would be to call assign to setup the parameters with NULL data, then\n calling @ref nn_tensor_native_handle to see if one was created, if not the\n data buffer can be malloc'ed followed by a call to @ref\n nn_tensor_set_native_handle with this buffer.  One could also call\n nn_tensor_assign a second time with data set to the malloc'ed data.\n\n @param tensor Pointer to the given tensor object.\n @param type The data type that the tensor is storing (The type of the\n provided data).\n @param n_dims The number of dimensions in the provided tensor.\n @param shape The shape of the given tensor.\n @param data The new tensor data to be placed within the tensor provided.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_assign(
        tensor: *mut NNTensor,
        type_: NNTensorType,
        n_dims: i32,
        shape: *const i32,
        data: *mut ::std::os::raw::c_void,
    ) -> NNError;
}
extern "C" {
    #[doc = " Maps the tensor using the memory from the parent tensor.\n\n @param tensor Pointer to the tensor object where the view will be stored.\n @param type The data type that the tensor is storing.\n @param n_dims The number of dimensions in the provided tensor.\n @param shape The shape of the given tensor.\n @param parent Pointer to the tensor object that holds the original tensor.\n @param offset TO BE DETERMINED.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_view(
        tensor: *mut NNTensor,
        type_: NNTensorType,
        n_dims: i32,
        shape: *const i32,
        parent: *mut NNTensor,
        offset: i32,
    ) -> NNError;
}
extern "C" {
    #[doc = " Allocates the internal memory for the tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_alloc(
        tensor: *mut NNTensor,
        type_: NNTensorType,
        n_dims: i32,
        shape: *const i32,
    ) -> NNError;
}
extern "C" {
    #[doc = " Returns the shape of the given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_shape(tensor: *const NNTensor) -> *const i32;
}
extern "C" {
    #[doc = " Returns the strides of the given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_strides(tensor: *const NNTensor) -> *const i32;
}
extern "C" {
    #[doc = " Returns the number of dimensions of the given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_dims(tensor: *const NNTensor) -> i32;
}
extern "C" {
    #[doc = " Maps the tensor's memory and returns the client accessible pointer.  This is\n the read-only version which causes the engine to download buffers to the CPU\n memory space if required but will not flush back to the device on unmap.\n\n If the tensor is already mapped read-only or read-write a pointer is returned\n and the reference count increased, if it was already mapped write-only NULL\n is returned.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_mapro(tensor: *mut NNTensor) -> *const ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Maps the tensor's memory and returns the client accessible pointer.  This is\n the read-write version which causes the engine to download buffers to the CPU\n memory space if required and will also flush back to the device on unmap.\n\n If the tensor is already mapped read-only it needs to be unmapped before\n calling maprw otherwise NULL is returned.  A tensor already mapped as rw will\n simply increase the reference count.  A write-only mapped tensor will also\n return NULL.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_maprw(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Maps the tensor's memory and returns the client accessible pointer.  This is\n the write-only version which will not cause a download of the buffers to the\n CPU memory space on map but will upload to the device on unmap.\n\n If the tensor is already mapped write-only or read-write a pointer is\n returned and the reference count increased.  If it was previously mapped as\n read-only NULL is returned.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_mapwo(tensor: *mut NNTensor) -> *mut ::std::os::raw::c_void;
}
extern "C" {
    #[doc = " Returns the tensor's mapping count, 0 means the tensor is unmapped.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_mapped(tensor: *const NNTensor) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Releases the tensor mapping, if the reference count reaches 0 it will be\n fully unmapped and will force the flush to the device, if required.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_unmap(tensor: *mut NNTensor);
}
extern "C" {
    #[doc = " Returns the type of a given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_type(tensor: *const NNTensor) -> NNTensorType;
}
extern "C" {
    #[doc = " Sets the type of a given tensor object.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_set_type(tensor: *mut NNTensor, type_: NNTensorType) -> NNError;
}
extern "C" {
    #[doc = " Returns the element size of a given tensor object.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_element_size(tensor: *const NNTensor) -> usize;
}
extern "C" {
    #[doc = " Calculates the total tensor volume (product of dimensions).\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_volume(tensor: *const NNTensor) -> i32;
}
extern "C" {
    #[doc = " Calculates the total byte size of the tensor (volume * element_size).\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_size(tensor: *const NNTensor) -> i32;
}
extern "C" {
    #[doc = " Returns the natural data axis of the tensor.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_axis(tensor: *const NNTensor) -> ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the zero-points for the tensor and optionally the number of\n zero-points.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_zeros(tensor: *const NNTensor, n_zeros: *mut usize) -> *const i32;
}
extern "C" {
    #[doc = " Sets the quantization zero-points for the tensor.  If n_zeros>1 it should\n match the channel dimension (axis) of the tensor.\n\n If own=1 then the tensor will take ownership of the buffer and free it when\n the tensor is released.  Otherwise the buffer must outlive the tensor.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_set_zeros(
        tensor: *mut NNTensor,
        n_zeros: usize,
        zeros: *const i32,
        own: ::std::os::raw::c_int,
    );
}
extern "C" {
    #[doc = " Configures the channel axis of the tensor.  This refers to the \"C\" in\n orderings such as NHWC and NCHW.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_set_axis(tensor: *mut NNTensor, axis: i32);
}
extern "C" {
    #[doc = " Returns the scales array for the tensor and optionally the number of scales.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_scales(tensor: *const NNTensor, n_scales: *mut usize) -> *const f32;
}
extern "C" {
    #[doc = " Internal API used by the RTM loader to associate quantization parameters to\n the tensor.\n\n @private @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_quant_params(tensor: *const NNTensor, quant_params: *mut NNQuantParam);
}
extern "C" {
    #[doc = " Sets the quantization scales for the tensor.  If n_scales>1 it should match\n the channel dimension (axis) of the tensor.\n\n If own=1 then the tensor will take ownership of the buffer and free it when\n the tensor is released.  Otherwise the buffer must outlive the tensor.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_set_scales(
        tensor: *mut NNTensor,
        n_scales: usize,
        scales: *const f32,
        own: ::std::os::raw::c_int,
    );
}
extern "C" {
    #[doc = " Returns the quantization type for the tensor.\n\n @note This API was missing before version 2.4.32 and instead the\n quantization format is inferred as affine when scales and zeros are provided\n and per-tensor vs. per-channel is inferred based on scales/zeros being 1 or\n greater than 1.\n\n @param tensor the tensor object used to query quantization type.\n\n @returns @ref NNQuantizationType_None for tensors which do not provide\n  quantization parameters.\n @returns @ref NNQuantizationType_Affine_PerTensor for tensors which provide\n  quantization parameters which map globally to the tensor.\n @returns @ref NNQuantizationType_Affine_PerChannel for tensors which provide\n  quantization parameters which map to each channel \"C\" of the tensor.\n @returns @ref NNQuantizationType_DFP for tensors which provide DFP\n  parameters.  Currently unsupported.\n\n @public @memberof NNTensor\n @since 2.4.32"]
    pub fn nn_tensor_quantization_type(tensor: *mut NNTensor) -> NNQuantizationType;
}
extern "C" {
    #[doc = " Tensor shape comparison.\n\n @returns true if both shapes are equal otherwise false.\n\n @since 2.0\n @deprecated 2.3"]
    pub fn nn_tensor_shape_equal(left: *const i32, right: *const i32) -> bool;
}
extern "C" {
    #[doc = " Copys the source shape array to the destination array.\n\n @since 2.0\n @deprecated 2.3"]
    pub fn nn_tensor_shape_copy(dst: *mut i32, src: *const i32);
}
extern "C" {
    #[doc = " Returns the offset of a given tensor.  This function can be used to calculate\n the index across numerous dimensions.\n\n @note Avoid using this function as part of inner loops as it requires a\n multiply and add for each dimenions.  Instead it can be used in an outer loop\n to get the starting index then increment this index in the inner loop,\n possibly using the tensor strides.\n\n @param tensor the tensor object used in the operation\n @param n_dims the number of dimensions provided in the @p shape\n @param shape the multi-dimensional index used to calculate the linear index\n\n @return the element index into the tensor based on the muliple dimenional\n indices provided.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_offset(
        tensor: *const NNTensor,
        n_dims: i32,
        shape: *const i32,
    ) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Returns the offset of a given tensor using variable length dimensions. This\n works the same as @ref nn_tensor_offset() but uses variable arguments. The\n user **must** provide @p n_dims number of parameters after the\n @p n_dims parameter.\n\n @param tensor the tensor object used in the operation\n @param n_dims the number of dimensions to use when calculating the index\n @param … variable number of shape elements which **must** be of type int32_t\n\n @return the element index into the tensor based on the muliple dimenional\n indices provided.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_offsetv(tensor: *const NNTensor, n_dims: i32, ...) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Element-wise comparison of two tensors within a given tolerance, returning\n total number of errors relative to the left tensor.  If the two tensors are\n incompatible the volume of the left tensor is returned (all elements\n invalid).\n\n @public @memberof NNTensor\n @since 2.0\n @deprecated 2.3"]
    pub fn nn_tensor_compare(
        left: *mut NNTensor,
        right: *mut NNTensor,
        tolerance: f64,
    ) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Reshapes the given tensor to the provided new shape.\n\n @param tensor the tensor object used in the operation\n @param n_dims the number of dimensions which the tensor will contain after\n the operation completes successfully.  It must also match the number of\n elements in @p shape.\n @param shape the new shape for the tensor.  The array must be at least\n @p n_dims elements in size.\n\n @return @ref NN_SUCCESS if the reshape is able to be performed\n @return @ref NN_ERROR_SHAPE_MISMATCH if the new shape cannot be represented\n given the previous shape of the tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_reshape(tensor: *mut NNTensor, n_dims: i32, shape: *const i32) -> NNError;
}
extern "C" {
    #[doc = " Shuffles (transpose) the tensor moving the current dimensions into the\n ordering defined in the order parameter.\n\n For example a traditional matrix transpose is done using order[] = { 1, 0 }\n in other words, the 0 dimension of the output references the 1 dimension of\n the input and the 1 dimension of the output references the 0 dimension of the\n input.\n\n Another example would be shuffling an NCHW tensor to NHWC using order[] = {\n 0, 2, 3, 1 }\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_shuffle(
        output: *mut NNTensor,
        input: *mut NNTensor,
        n_dims: i32,
        order: *const i32,
    ) -> NNError;
}
extern "C" {
    #[doc = " Fills the tensor with the provided constant.  The constant is captured\n as double precision (64-bit floating point) which has 53-bits of precision\n on whole numbers.  This means the constant CANNOT represent all 64-bit\n integers but it CAN represent all 32-bit and lower integers.  If full\n 64-bit integer support is required @ref nn_tensor_map can be used though\n it is less efficient with some engines because of the addition memory\n transfer required.\n\n The double will be cast appropriately to the target tensor's type before\n filling the tensor.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_fill(tensor: *mut NNTensor, constant: f64) -> NNError;
}
extern "C" {
    #[doc = " Randomizes the data within the tensor.\n\n @public @memberof NNTensor\n @since 2.0\n @deprecated 2.3"]
    pub fn nn_tensor_randomize(tensor: *mut NNTensor) -> NNError;
}
extern "C" {
    #[doc = " Copies the contents of source tensor into destination tensor.\n\n This operation only copies the data and does not affect the\n destination tensor's properties.  The destination tensor must\n have an equal or larger volume.  If required data will be converted.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_copy(dest: *mut NNTensor, source: *mut NNTensor) -> NNError;
}
extern "C" {
    #[doc = " Loads a tensor with data from a user buffer\n User has to maintain the buffer and ensure compatibility with NHWC tensor\n Function will return error if there is a size mismatch\n i.e (bufsize != nn_tensor_size(tensor)) or tensor is invalid\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_copy_buffer(
        tensor: *mut NNTensor,
        buffer: *const ::std::os::raw::c_void,
        bufsize: usize,
    ) -> NNError;
}
extern "C" {
    #[doc = " Requantizes the source tensor into the destination tensor.\n\n The source tensor and destination tensor should be either I8 or U8, and\n per tensor quantized.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_requantize(dest: *mut NNTensor, source: *mut NNTensor) -> NNError;
}
extern "C" {
    #[doc = " Quantizes the source tensor into the destination tensor.\n\n The source tensor should be float and the destination integer.  If the\n destination tensor does not have quantization parameters they will be\n calculated from the source tensor and stored into the destination tensor.\n\n When calculating the quantization parameters if axis is a valid axis* then\n per-channel quantization will be performed along the axis, otherwise\n per-tensor quantization will be performed.  If the destination tensor has\n quantization parameters axis is ignored.\n\n Valid Axis: (axis > 0 && axis < n_dims)\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_quantize(
        dest: *mut NNTensor,
        source: *mut NNTensor,
        axis: ::std::os::raw::c_int,
    ) -> NNError;
}
extern "C" {
    #[doc = " Quantizes the source buffer into the destination tensor.\n\n The source tensor should be float and the destination integer.  If the\n destination tensor does not have quantization parameters they will be\n calculated from the source buffer and stored into the destination tensor.\n\n When calculating the quantization parameters if axis is a valid axis* then\n per-channel quantization will be performed along the axis, otherwise\n per-tensor quantization will be performed. If the destination tensor has\n quantization parameters axis is ignored.\n\n Valid Axis: (axis > 0 && axis < n_dims)\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_quantize_buffer(
        dest: *mut NNTensor,
        buffer_length: usize,
        buffer: *const f32,
        axis: ::std::os::raw::c_int,
    ) -> NNError;
}
extern "C" {
    #[doc = " De-quantizes the source tensor into the destination tensor.\n\n The source tensor should be integer and the destination float.  The source\n tensor must have quantization parameters otherwise the operation will simply\n cast the integer data to float.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_dequantize(dest: *mut NNTensor, source: *mut NNTensor) -> NNError;
}
extern "C" {
    #[doc = " De-quantizes the source tensor into the destination buffer.\n\n The source tensor should be integer and the destination float.  The source\n tensor must have quantization parameters otherwise the operation will simply\n cast the integer data to float.\n\n The buffer must be at least buffer_length*sizeof(float) size in bytes.\n\n @public @memberof NNTensor\n @since 2.4"]
    pub fn nn_tensor_dequantize_buffer(
        source: *mut NNTensor,
        buffer_length: usize,
        buffer: *mut f32,
    ) -> NNError;
}
extern "C" {
    #[doc = " nn_tensor_concat concatenates all of the given input tensors into\n the given output tensor.\n\n @output pointer to the output tensor\n @inputs list of pointers to the input tensors\n @n_inputs the number of inputs\n @axis the axis along which to concatenate the inputs\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_concat(
        output: *mut NNTensor,
        n_inputs: i32,
        inputs: *mut *mut NNTensor,
        axis: i32,
    ) -> NNError;
}
extern "C" {
    #[doc = " nn_tensor_slice copies a slice of the tensor into output. For a version which\n supports strides see @ref nn_tensor_strided_slice.\n\n The axes, head, and tail must be of length n_axes or NULL.  Calling slice\n with axes==NULL will ignore head/tail and is effectively @ref nn_tensor_copy.\n\n When head is NULL all axes are assumed to start at 0.  When tail is NULL all\n axes are assumed to end at (len(axis) - head) for the given axis.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_slice(
        output: *mut NNTensor,
        input: *mut NNTensor,
        n_axes: i32,
        axes: *const i32,
        head: *const i32,
        tail: *const i32,
    ) -> NNError;
}
extern "C" {
    pub fn nn_tensor_strided_slice(
        output: *mut NNTensor,
        input: *mut NNTensor,
        n_axes: i32,
        axes: *const i32,
        head_: *const i32,
        tail_: *const i32,
        strides_: *const i32,
    ) -> NNError;
}
extern "C" {
    #[doc = " nn_tensor_padding calculates the paddings for the given tensor, padtype,\n window, stride, and dilation given n_dims being queried from the tensor's\n nn_tensor_dims().\n\n The paddings pointer must point to an array of 2 * n_dims elements into which\n the function will write the head/tail padding tuples for each of the n_dims\n provided dimensions.  The padded_shape parameter must point to an array of\n n_dims elemens which will receive the output (padded) shape.\n\n The padtype can be \"VALID\" or \"SAME\".  When padtype is \"SAME\" padded_shape\n will equal the shape of the input tensor and the paddings will be provided to\n achieve this shape.  When padtype is \"VALID\" then paddings will be all zeros\n and the padded_shape will provide the target output shape given the provided\n parameters.\n\n @public @memberof NNTensor\n @since 2.3"]
    pub fn nn_tensor_padding(
        tensor: *mut NNTensor,
        padtype: *const ::std::os::raw::c_char,
        window: *const i32,
        stride: *const i32,
        dilation: *const i32,
        padded_shape: *mut i32,
        paddings: *mut i32,
    ) -> NNError;
}
extern "C" {
    #[doc = " nn_tensor_pad implements a padded Tensor to Tensor copy.  This can be used to\n achieve the various convolution padding strategies (SAME, FULL).  For example\n SAME conv2d would use the following padded_copy before running the conv2d\n layer.\n\n output_shape = { input_shape[0],\n                  int(ceil(float(input_shape[1]) /\n strides[1])), int(ceil(float(input_shape[2]) / strides[2])), weights_shape[3]\n };\n\n pad_height = (output_shape[1] - 1) * strides[1] + weights_shape[0] -\n input_shape[1]; pad_width  = (output_shape[2] - 1) * strides[2] +\n weights_shape[1] - input_shape[2];\n\n @output pointer to the output tensor\n @input pointer to the input tensor\n @head lead-in length of the pad for dimension NHWC\n @tail lead-out length of the pad for dimension NHWC\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_pad(
        output: *mut NNTensor,
        input: *mut NNTensor,
        head: *const i32,
        tail: *const i32,
        constant: f64,
    ) -> NNError;
}
extern "C" {
    #[doc = " Loads an image from file into the provided tensor.\n\n @public @memberof NNTensor\n @since 2.2\n @deprecated 2.3"]
    pub fn nn_tensor_load_file(
        tensor: *mut NNTensor,
        filename: *const ::std::os::raw::c_char,
    ) -> NNError;
}
extern "C" {
    #[doc = " Loads an image from file into the provided tensor.\n\n @public @memberof NNTensor\n @since 2.2\n @deprecated 2.3"]
    pub fn nn_tensor_load_file_ex(
        tensor: *mut NNTensor,
        filename: *const ::std::os::raw::c_char,
        proc_: u32,
    ) -> NNError;
}
extern "C" {
    #[doc = " Loads an image from the provided buffer and decodes it accordingly, the\n function uses the images headers to find an appropriate decoder.  The\n function will handle any required casting to the target tensor's format.\n\n @public @memberof NNTensor\n @since 2.0"]
    pub fn nn_tensor_load_image(
        tensor: *mut NNTensor,
        image: *const ::std::os::raw::c_void,
        image_size: usize,
    ) -> NNError;
}
extern "C" {
    #[doc = " Loads an image from the provided buffer and decodes it accordingly, the\n function uses the images headers to find an appropriate decoder.  The\n function will handle any required casting to the target tensor's format and\n will apply image standardization (compatible with tensorflow's\n tf.image.per_image_standardization) if the proc parameter is set to\n NN_IMAGE_PROC_WHITENING.\n\n When called with proc==0 it is the same as nn_tensor_load_image().\n\n NN_IMAGE_PROC_UNSIGNED_NORM\n NN_IMAGE_PROC_WHITENING_NORM\n NN_IMAGE_PROC_SIGNED_NORM\n\n @public @memberof NNTensor\n @since 2.1"]
    pub fn nn_tensor_load_image_ex(
        tensor: *mut NNTensor,
        image: *const ::std::os::raw::c_void,
        image_size: usize,
        proc_: u32,
    ) -> NNError;
}
extern "C" {
    #[doc = " Attempts to validate model, this is automatically called by nn_model_load and\n nn_model_mmap.  The function returns 0 on success, otherwise it will return\n an error code which can be turned into a string by calling\n @ref nn_model_validate_error() with the return value from\n @ref nn_model_validate().\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_validate(memory: *const NNModel, size: usize) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Returns the string associated with a given error returned from\n @ref nn_model_validate().\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_validate_error(err: ::std::os::raw::c_int) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the name of the given model object.  Names are optional and if the\n model does not contain a name then NULL will be returned.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_name(model: *const NNModel) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Currently returns NULL (UPDATE WHEN FUNCTION IS UPDATED)\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_uuid(model: *const NNModel) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Currently returns 0\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_serial(model: *const NNModel) -> u32;
}
extern "C" {
    #[doc = " Returns the number of labels within a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_label_count(model: *const NNModel) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Returns the label of the given index within the given model object.  If the\n model contains no labels or the index is out of range then NULL will be\n returned.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_label(
        model: *const NNModel,
        index: ::std::os::raw::c_int,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns an optional icon resource for the provided label index.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_label_icon(
        model: *const NNModel,
        index: ::std::os::raw::c_int,
        size: *mut usize,
    ) -> *const u8;
}
extern "C" {
    #[doc = " Returns the list of model input indices and optionally the number of inputs.\n\n If the field is missing from the model NULL is returned.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_inputs(model: *const NNModel, n_inputs: *mut usize) -> *const u32;
}
extern "C" {
    #[doc = " Returns the list of model output indices and optionally the number of\n outputs.\n\n If the field is missing from the model 0 is returned.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_outputs(model: *const NNModel, n_outputs: *mut usize) -> *const u32;
}
extern "C" {
    #[doc = " Returns the number of layers within a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_count(model: *const NNModel) -> usize;
}
extern "C" {
    #[doc = " Returns the name of a layer at a given index within the given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_name(
        model: *const NNModel,
        index: usize,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the index of a given layer with the name provided in the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_lookup(
        model: *const NNModel,
        name: *const ::std::os::raw::c_char,
    ) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Returns the type of a layer at the given index within the given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_type(
        model: *const NNModel,
        index: usize,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the type ID of the layer.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_type_id(model: *const NNModel, index: usize) -> i16;
}
extern "C" {
    #[doc = " Returns the datatype of a layer at the given index within the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_datatype(
        model: *const NNModel,
        index: usize,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the datatype of a layer at the given index within the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_datatype_id(model: *const NNModel, index: usize) -> NNTensorType;
}
extern "C" {
    #[doc = " Returns the array of quantization zero-points, and optionally the number of\n zero-points in the array.  The length will either be 0, 1, or equal to the\n number of channels in an NHWC/NCHW tensor.\n\n The channel axis can be queried using @ref nn_model_layer_axis().\n\n If no quantization parameters are available then n_zeros will be 0.\n If the tensor is quantized using full tensor quantization n_zeros will be 1.\n If the tensor is quantized using per-channel quantization n_zeros will be C\n which will equal the channel dimension of the tensor.  For an NHWC tensor it\n would equal shape[3].\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_zeros(
        model: *const NNModel,
        index: usize,
        n_zeros: *mut usize,
    ) -> *const i32;
}
extern "C" {
    #[doc = " Returns the array of quantization scales, and optionally the number of scales\n in the array.  The length will either be 0, 1, or equal to the number of\n channels in an NHWC/NCHW tensor.\n\n The channel axis can be queried using @ref nn_model_layer_axis().\n\n If no quantization parameters are available then n_scales will be 0.\n If the tensor is quantized using full tensor quantization n_scales will be 1.\n If the tensor is quantized using per-channel quantization n_scales will be C\n which will equal the channel dimension of the tensor.  For an NHWC tensor it\n would equal shape[3].\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_scales(
        model: *const NNModel,
        index: usize,
        n_scales: *mut usize,
    ) -> *const f32;
}
extern "C" {
    #[doc = " Returns the natural data axis for the tensor or -1 if one is not set.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_axis(model: *const NNModel, index: usize) -> ::std::os::raw::c_int;
}
extern "C" {
    #[doc = " Returns the shape of a layer at the given index within the given model\n object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_shape(
        model: *const NNModel,
        index: usize,
        n_dims: *mut usize,
    ) -> *const i32;
}
extern "C" {
    #[doc = " Returns the number of inputs to a layer at the given index within the given\n model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_layer_inputs(
        model: *const NNModel,
        index: usize,
        inputs: *mut *const u32,
    ) -> usize;
}
extern "C" {
    #[doc = " Returns an NNModelParameter from the model at the layer index defined by\n layer using the parameter key.  If the layer does not contain this parameter\n NULL is returned.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
    ) -> *const NNModelParameter;
}
extern "C" {
    #[doc = " Returns the shape of the model parameter for layer at index <layer>.\n\n @ref nn_model_parameter_shape()\n\n Returns NULL if either the parameter is not found or the shape is missing.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter_shape(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
        n_dims: *mut usize,
    ) -> *const i32;
}
extern "C" {
    #[doc = " Returns float data for parameter <key> at layer index <layer>.  This is a\n convenience wrapper around acquiring the parameter followed by acquiring the\n data.\n\n @ref nn_model_parameter_data_f32()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter_data_f32(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
        length: *mut usize,
    ) -> *const f32;
}
extern "C" {
    #[doc = " Returns int16 data for parameter <key> at layer index <layer>.  This is a\n convenience wrapper around acquiring the parameter followed by acquiring the\n data.\n\n @ref nn_model_parameter_data_i16()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter_data_i16(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
        length: *mut usize,
    ) -> *const i16;
}
extern "C" {
    #[doc = " Returns raw data for parameter <key> at layer index <layer>.  This is a\n convenience wrapper around acquiring the parameter followed by acquiring the\n data.\n\n @ref nn_model_parameter_data_raw()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter_data_raw(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
        length: *mut usize,
    ) -> *const u8;
}
extern "C" {
    #[doc = " Returns string data for parameter <key> at layer index <layer> for string\n array element <index>.  This is a convenience wrapper around acquiring the\n parameter followed by acquiring the data.\n\n @ref nn_model_parameter_data_str()\n\n Returns NULL if either the parameter is not found or the data is missing.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter_data_str(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
        index: usize,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns number of string elements in the data_str array for the specified\n layer and parameter key.  This is a convenience wrapper around acquiring the\n parameter followed by acquiring the data.\n\n @ref nn_model_parameter_data_str_len()\n\n Returns number of string elements in the array.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_layer_parameter_data_str_len(
        model: *const NNModel,
        layer: usize,
        key: *const ::std::os::raw::c_char,
    ) -> usize;
}
extern "C" {
    #[doc = " Returns the memory size of the given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_memory_size(model: *const NNModel) -> usize;
}
extern "C" {
    #[doc = " Returns the minimum cache size of a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_cache_minimum_size(model: *const NNModel) -> usize;
}
extern "C" {
    #[doc = " Returns the optimum cache size of a given model object.\n\n @public @memberof NNModel\n @since 2.0"]
    pub fn nn_model_cache_optimum_size(model: *const NNModel) -> usize;
}
extern "C" {
    #[doc = " The number of resources defined in the model.\n\n @param model pointer to the RTM model\n\n @returns number of resources defined in the model.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_resource_count(model: *const NNModel) -> usize;
}
extern "C" {
    #[doc = " Retrieves a reference to the resource at the provided index.\n\n @param model pointer to the RTM model\n @param index resource index\n\n @returns an @ref NNModelResource pointer for the provided @p index in the\n given model.\n @returns NULL if either the model or index are invalid.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_resource_at(model: *const NNModel, index: usize) -> *const NNModelResource;
}
extern "C" {
    #[doc = " Retrieves a reference to the resource with the given name.\n\n @param model pointer to the RTM model\n @param name the unique name of the resource\n\n @returns an @ref NNModelResource pointer for the provided unique @p name.\n @returns NULL if either the @p model or @p name are invalid, NULL, or the\n  @p name is not found.\n\n @public @memberof NNModel\n @since 2.4"]
    pub fn nn_model_resource(
        model: *const NNModel,
        name: *const ::std::os::raw::c_char,
    ) -> *const NNModelResource;
}
extern "C" {
    #[doc = " Returns the shape of the parameter data or NULL if no shape was defined.  If\n n_dims is non-NULL the number of dimensions will be stored there.  The shape\n attribute is not required for parameters but can be used either on its own\n or as part of defining layout of data attributes.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_shape(
        parameter: *const NNModelParameter,
        n_dims: *mut usize,
    ) -> *const i32;
}
extern "C" {
    #[doc = " Returns parameter float data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_f32(
        parameter: *const NNModelParameter,
        length: *mut usize,
    ) -> *const f32;
}
extern "C" {
    #[doc = " Returns parameter int32_t data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_i32(
        parameter: *const NNModelParameter,
        length: *mut usize,
    ) -> *const i32;
}
extern "C" {
    #[doc = " Returns parameter int16_t data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_i16(
        parameter: *const NNModelParameter,
        length: *mut usize,
    ) -> *const i16;
}
extern "C" {
    #[doc = " Returns parameter int8_t data, length of the array is optionally stored into\n the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_i8(
        parameter: *const NNModelParameter,
        length: *mut usize,
    ) -> *const i8;
}
extern "C" {
    #[doc = " Returns parameter raw data pointer, length of the array is optionally stored\n into the length parameter if non-NULL.\n\n If parameter does not have this data type, then NULL is returned.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_raw(
        parameter: *const NNModelParameter,
        length: *mut usize,
    ) -> *const u8;
}
extern "C" {
    #[doc = " Returns parameter string data at desired index.  This data handler is\n different from the others which return the array as strings are themselves\n arrays and need special handling. Refer to @ref\n nn_model_parameter_data_str_len() to query the size of the data_str array,\n which refers to the number of strings in this parameter.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_str(
        parameter: *const NNModelParameter,
        index: usize,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the number of strings in the parameter's data_str attribute.\n\n @public @memberof NNModelParameter\n @since 2.4"]
    pub fn nn_model_parameter_data_str_len(parameter: *const NNModelParameter) -> usize;
}
extern "C" {
    #[doc = " The unique name of the resource as can be used to retrieve the resource using\n @ref nn_model_resource().\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n\n @returns A string with the name of the resource.\n @returns NULL if the resource or name is NULL.\n\n @public @memberof NNModelResource\n @since 2.4"]
    pub fn nn_model_resource_name(
        resource: *const NNModelResource,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the meta string for the resource.\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n\n @returns A string with the meta parameter of the resource.\n @returns NULL if the resource or meta are NULL.\n\n @public @memberof NNModelResource\n @since 2.4"]
    pub fn nn_model_resource_meta(
        resource: *const NNModelResource,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the mime type string for the resource.\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n\n @returns A string with the mime parameter of the resource.\n @returns NULL if the resource or mime are NULL.\n\n @public @memberof NNModelResource\n @since 2.4"]
    pub fn nn_model_resource_mime(
        resource: *const NNModelResource,
    ) -> *const ::std::os::raw::c_char;
}
extern "C" {
    #[doc = " Returns the raw binary data for the resource, the size of the data will be\n saved in @p data_size if non-NULL.\n\n @param resource pointer to a @ref NNModelResource retrieved from the model.\n @param data_size optional pointer to a size_t to receive the length in bytes\n of the data, if provided.\n\n @returns pointer to the start of the data stream of length @p data_size.\n @returns NULL if resource has no data associated.\n\n @public @memberof NNModelResource\n @since 2.4"]
    pub fn nn_model_resource_data(
        resource: *const NNModelResource,
        data_size: *mut usize,
    ) -> *const u8;
}
extern "C" {
    #[doc = " Returns the actual size of the context structure.  This size will be smaller\n than @ref NN_CONTEXT_SIZEOF which contains additional padding for future\n extension.  Since @ref nn_context_sizeof() is called dynamically at runtime\n it can return the true and unpadded size.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_sizeof() -> usize;
}
extern "C" {
    #[doc = " Initializes an NNContext and allocates required memories.  If any of the\n pointers are NULL malloc will be called automatically to create the memory\n using the provided sizes.  For memory_size and cache_size if these are 0\n then they will not be initialized.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_init(
        engine: *mut NNEngine,
        memory_size: usize,
        memory: *mut ::std::os::raw::c_void,
        cache_size: usize,
        cache: *mut ::std::os::raw::c_void,
    ) -> *mut NNContext;
}
extern "C" {
    #[doc = " Initializes an NNContext into the provided memory which *MUST* be at least\n NN_CONTEXT_SIZEOF bytes.  If any of the pointers are NULL malloc will be\n called automatically to create the memory using the provided sizes.  For\n memory_size and cache_size if these are 0 then they will not be initialized.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_init_ex(
        context_memory: *mut ::std::os::raw::c_void,
        engine: *mut NNEngine,
        memory_size: usize,
        memory: *mut ::std::os::raw::c_void,
        cache_size: usize,
        cache: *mut ::std::os::raw::c_void,
    ) -> *mut NNContext;
}
extern "C" {
    #[doc = " Release the memory being used by the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_release(context: *mut NNContext);
}
#[doc = " Callback function for custom user ops.\n\n @public @memberof NNContext\n @since 2.4"]
pub type nn_user_ops = ::std::option::Option<
    unsafe extern "C" fn(
        context: *mut NNContext,
        opname: *const ::std::os::raw::c_char,
        index: usize,
    ) -> NNError,
>;
extern "C" {
    #[doc = " @public @memberof NNContext\n @since 2.4"]
    pub fn nn_context_user_ops_register(context: *mut NNContext, callback: nn_user_ops) -> NNError;
}
extern "C" {
    #[doc = " @public @memberof NNContext\n @since 2.4"]
    pub fn nn_context_user_ops(context: *mut NNContext) -> nn_user_ops;
}
extern "C" {
    #[doc = " @public @memberof NNContext\n @since 2.2"]
    pub fn nn_context_cache(context: *mut NNContext) -> *mut NNTensor;
}
extern "C" {
    #[doc = " @public @memberof NNContext\n @since 2.2"]
    pub fn nn_context_mempool(context: *mut NNContext) -> *mut NNTensor;
}
extern "C" {
    #[doc = " Returns the engine used by the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_engine(context: *mut NNContext) -> *mut NNEngine;
}
extern "C" {
    #[doc = " Returns the currently loaded model blob for the context.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_model(context: *mut NNContext) -> *const NNModel;
}
extern "C" {
    #[doc = " Loads the model provided by the input into the context.\n\n @context pointer to the context object\n @memory pointer to the memory that contains the model\n @memory_size the size of the memory that is used by the model\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_model_load(
        context: *mut NNContext,
        memory_size: usize,
        memory: *const ::std::os::raw::c_void,
    ) -> NNError;
}
extern "C" {
    #[doc = " Frees the memory used by the model within the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_model_unload(context: *mut NNContext);
}
extern "C" {
    #[doc = " Returns the tensor with the given name within the model provided by the given\n context object.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_tensor(
        context: *mut NNContext,
        name: *const ::std::os::raw::c_char,
    ) -> *mut NNTensor;
}
extern "C" {
    #[doc = " Returns the tensor at the given index with the model provided by the given\n context object.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_tensor_index(context: *mut NNContext, index: usize) -> *mut NNTensor;
}
extern "C" {
    #[doc = " Runs the model within the given context object.\n\n @public @memberof NNContext\n @since 2.0"]
    pub fn nn_context_run(context: *mut NNContext) -> NNError;
}
extern "C" {
    #[doc = " Runs layer with index from model within the given context object.\n If index is invalid NN_ERROR_INVALID_LAYER is returned, this can be\n used to determine when at the end of the model.\n\n @public @memberof NNContext\n @since 2.3"]
    pub fn nn_context_step(context: *mut NNContext, index: usize) -> NNError;
}
extern "C" {
    #[doc = " Exposes the free() function\n"]
    pub fn nn_free(ptr: *mut ::std::os::raw::c_void);
}
extern "C" {
    #[doc = " Exposes the malloc() function\n"]
    pub fn nn_malloc(size: usize) -> *mut ::std::os::raw::c_void;
}