#![cfg_attr(docsrs, feature(doc_cfg))]
#[cfg(feature = "raw")]
#[cfg_attr(docsrs, doc(cfg(feature = "raw")))]
pub mod bindings {
#![allow(unused_imports)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#[cfg(fallback)]
include!("bindings_docs.rs");
#[cfg(not(fallback))]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
#[cfg(not(feature = "raw"))]
mod bindings {
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#[cfg(fallback)]
include!("bindings_docs.rs");
#[cfg(not(fallback))]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
#[cfg(feature = "rknn")]
#[cfg_attr(docsrs, doc(cfg(feature = "rknn")))]
pub mod rknn {
pub mod flag {
pub use crate::bindings::{
RKNN_FLAG_PRIOR_HIGH,
RKNN_FLAG_PRIOR_MEDIUM,
RKNN_FLAG_PRIOR_LOW,
RKNN_FLAG_ASYNC_MASK,
RKNN_FLAG_COLLECT_PERF_MASK,
RKNN_FLAG_MEM_ALLOC_OUTSIDE,
RKNN_FLAG_SHARE_WEIGHT_MEM,
RKNN_FLAG_FENCE_IN_OUTSIDE,
RKNN_FLAG_FENCE_OUT_OUTSIDE,
RKNN_FLAG_COLLECT_MODEL_INFO_ONLY,
RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE,
RKNN_FLAG_EXECUTE_FALLBACK_PRIOR_DEVICE_GPU,
RKNN_FLAG_ENABLE_SRAM,
RKNN_FLAG_SHARE_SRAM,
RKNN_FLAG_DISABLE_PROC_HIGH_PRIORITY,
RKNN_FLAG_DISABLE_FLUSH_INPUT_MEM_CACHE,
RKNN_FLAG_DISABLE_FLUSH_OUTPUT_MEM_CACHE,
RKNN_FLAG_MODEL_BUFFER_ZERO_COPY,
RKNN_MEM_FLAG_ALLOC_NO_CONTEXT,
};
}
pub mod error {
pub use crate::bindings::{
RKNN_SUCC,
RKNN_ERR_FAIL,
RKNN_ERR_TIMEOUT,
RKNN_ERR_DEVICE_UNAVAILABLE,
RKNN_ERR_MALLOC_FAIL,
RKNN_ERR_PARAM_INVALID,
RKNN_ERR_MODEL_INVALID,
RKNN_ERR_CTX_INVALID,
RKNN_ERR_INPUT_INVALID,
RKNN_ERR_OUTPUT_INVALID,
RKNN_ERR_DEVICE_UNMATCH,
RKNN_ERR_INCOMPATILE_PRE_COMPILE_MODEL,
RKNN_ERR_INCOMPATILE_OPTIMIZATION_LEVEL_VERSION,
RKNN_ERR_TARGET_PLATFORM_UNMATCH,
};
}
pub mod limits {
pub use crate::bindings::{
RKNN_MAX_DIMS,
RKNN_MAX_NUM_CHANNEL,
RKNN_MAX_NAME_LEN,
RKNN_MAX_DYNAMIC_SHAPE_NUM,
};
}
pub use crate::bindings::{
rknn_context,
rknn_query_cmd,
rknn_tensor_type,
rknn_tensor_qnt_type,
rknn_tensor_format,
rknn_core_mask,
rknn_input_output_num,
rknn_tensor_attr,
rknn_input_range,
rknn_perf_detail,
rknn_perf_run,
rknn_sdk_version,
rknn_mem_size,
rknn_custom_string,
rknn_tensor_mem_flags,
rknn_mem_alloc_flags,
rknn_mem_sync_mode,
rknn_tensor_mem,
rknn_input,
rknn_output,
rknn_init_extend,
rknn_run_extend,
rknn_output_extend,
};
pub use crate::bindings::{
rknn_init,
rknn_destroy,
rknn_query,
rknn_inputs_set,
rknn_run,
rknn_outputs_get,
rknn_outputs_release,
rknn_create_mem,
rknn_destroy_mem,
rknn_set_io_mem,
};
pub fn get_type_string(t: rknn_tensor_type) -> &'static str {
match t {
rknn_tensor_type::RKNN_TENSOR_FLOAT32 => "FP32",
rknn_tensor_type::RKNN_TENSOR_FLOAT16 => "FP16",
rknn_tensor_type::RKNN_TENSOR_INT8 => "INT8",
rknn_tensor_type::RKNN_TENSOR_UINT8 => "UINT8",
rknn_tensor_type::RKNN_TENSOR_INT16 => "INT16",
rknn_tensor_type::RKNN_TENSOR_UINT16 => "UINT16",
rknn_tensor_type::RKNN_TENSOR_INT32 => "INT32",
rknn_tensor_type::RKNN_TENSOR_UINT32 => "UINT32",
rknn_tensor_type::RKNN_TENSOR_INT64 => "INT64",
rknn_tensor_type::RKNN_TENSOR_BOOL => "BOOL",
rknn_tensor_type::RKNN_TENSOR_INT4 => "INT4",
rknn_tensor_type::RKNN_TENSOR_BFLOAT16 => "BF16",
_ => "UNKNOW",
}
}
pub fn get_qnt_type_string(t: rknn_tensor_qnt_type) -> &'static str {
match t {
rknn_tensor_qnt_type::RKNN_TENSOR_QNT_NONE => "NONE",
rknn_tensor_qnt_type::RKNN_TENSOR_QNT_DFP => "DFP",
rknn_tensor_qnt_type::RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC => "AFFINE",
_ => "UNKNOW",
}
}
}
#[cfg(feature = "matmul")]
#[cfg_attr(docsrs, doc(cfg(feature = "matmul")))]
pub mod matmul {}
#[cfg(feature = "custom-op")]
#[cfg_attr(docsrs, doc(cfg(feature = "custom-op")))]
pub mod custom_op {}