#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
use std::fmt::Debug;
pub const __bool_true_false_are_defined: u32 = 1;
pub const true_: u32 = 1;
pub const false_: u32 = 0;
pub const __WORDSIZE: u32 = 64;
pub const __DARWIN_ONLY_64_BIT_INO_T: u32 = 1;
pub const __DARWIN_ONLY_UNIX_CONFORMANCE: u32 = 1;
pub const __DARWIN_ONLY_VERS_1050: u32 = 1;
pub const __DARWIN_UNIX03: u32 = 1;
pub const __DARWIN_64_BIT_INO_T: u32 = 1;
pub const __DARWIN_VERS_1050: u32 = 1;
pub const __DARWIN_NON_CANCELABLE: u32 = 0;
pub const __DARWIN_SUF_EXTSN: &[u8; 14usize] = b"$DARWIN_EXTSN\0";
pub const __DARWIN_C_ANSI: u32 = 4096;
pub const __DARWIN_C_FULL: u32 = 900000;
pub const __DARWIN_C_LEVEL: u32 = 900000;
pub const __STDC_WANT_LIB_EXT1__: u32 = 1;
pub const __DARWIN_NO_LONG_LONG: u32 = 0;
pub const _DARWIN_FEATURE_64_BIT_INODE: u32 = 1;
pub const _DARWIN_FEATURE_ONLY_64_BIT_INODE: u32 = 1;
pub const _DARWIN_FEATURE_ONLY_VERS_1050: u32 = 1;
pub const _DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE: u32 = 1;
pub const _DARWIN_FEATURE_UNIX_CONFORMANCE: u32 = 3;
pub const __has_ptrcheck: u32 = 0;
pub const __PTHREAD_SIZE__: u32 = 8176;
pub const __PTHREAD_ATTR_SIZE__: u32 = 56;
pub const __PTHREAD_MUTEXATTR_SIZE__: u32 = 8;
pub const __PTHREAD_MUTEX_SIZE__: u32 = 56;
pub const __PTHREAD_CONDATTR_SIZE__: u32 = 8;
pub const __PTHREAD_COND_SIZE__: u32 = 40;
pub const __PTHREAD_ONCE_SIZE__: u32 = 8;
pub const __PTHREAD_RWLOCK_SIZE__: u32 = 192;
pub const __PTHREAD_RWLOCKATTR_SIZE__: u32 = 16;
pub const INT8_MAX: u32 = 127;
pub const INT16_MAX: u32 = 32767;
pub const INT32_MAX: u32 = 2147483647;
pub const INT64_MAX: u64 = 9223372036854775807;
pub const INT8_MIN: i32 = -128;
pub const INT16_MIN: i32 = -32768;
pub const INT32_MIN: i32 = -2147483648;
pub const INT64_MIN: i64 = -9223372036854775808;
pub const UINT8_MAX: u32 = 255;
pub const UINT16_MAX: u32 = 65535;
pub const UINT32_MAX: u32 = 4294967295;
pub const UINT64_MAX: i32 = -1;
pub const INT_LEAST8_MIN: i32 = -128;
pub const INT_LEAST16_MIN: i32 = -32768;
pub const INT_LEAST32_MIN: i32 = -2147483648;
pub const INT_LEAST64_MIN: i64 = -9223372036854775808;
pub const INT_LEAST8_MAX: u32 = 127;
pub const INT_LEAST16_MAX: u32 = 32767;
pub const INT_LEAST32_MAX: u32 = 2147483647;
pub const INT_LEAST64_MAX: u64 = 9223372036854775807;
pub const UINT_LEAST8_MAX: u32 = 255;
pub const UINT_LEAST16_MAX: u32 = 65535;
pub const UINT_LEAST32_MAX: u32 = 4294967295;
pub const UINT_LEAST64_MAX: i32 = -1;
pub const INT_FAST8_MIN: i32 = -128;
pub const INT_FAST16_MIN: i32 = -32768;
pub const INT_FAST32_MIN: i32 = -2147483648;
pub const INT_FAST64_MIN: i64 = -9223372036854775808;
pub const INT_FAST8_MAX: u32 = 127;
pub const INT_FAST16_MAX: u32 = 32767;
pub const INT_FAST32_MAX: u32 = 2147483647;
pub const INT_FAST64_MAX: u64 = 9223372036854775807;
pub const UINT_FAST8_MAX: u32 = 255;
pub const UINT_FAST16_MAX: u32 = 65535;
pub const UINT_FAST32_MAX: u32 = 4294967295;
pub const UINT_FAST64_MAX: i32 = -1;
pub const INTPTR_MAX: u64 = 9223372036854775807;
pub const INTPTR_MIN: i64 = -9223372036854775808;
pub const UINTPTR_MAX: i32 = -1;
pub const SIZE_MAX: i32 = -1;
pub const RSIZE_MAX: i32 = -1;
pub const WINT_MIN: i32 = -2147483648;
pub const WINT_MAX: u32 = 2147483647;
pub const SIG_ATOMIC_MIN: i32 = -2147483648;
pub const SIG_ATOMIC_MAX: u32 = 2147483647;
pub type wchar_t = ::std::os::raw::c_int;
pub type max_align_t = f64;
pub type int_least8_t = i8;
pub type int_least16_t = i16;
pub type int_least32_t = i32;
pub type int_least64_t = i64;
pub type uint_least8_t = u8;
pub type uint_least16_t = u16;
pub type uint_least32_t = u32;
pub type uint_least64_t = u64;
pub type int_fast8_t = i8;
pub type int_fast16_t = i16;
pub type int_fast32_t = i32;
pub type int_fast64_t = i64;
pub type uint_fast8_t = u8;
pub type uint_fast16_t = u16;
pub type uint_fast32_t = u32;
pub type uint_fast64_t = u64;
pub type __int8_t = ::std::os::raw::c_schar;
pub type __uint8_t = ::std::os::raw::c_uchar;
pub type __int16_t = ::std::os::raw::c_short;
pub type __uint16_t = ::std::os::raw::c_ushort;
pub type __int32_t = ::std::os::raw::c_int;
pub type __uint32_t = ::std::os::raw::c_uint;
pub type __int64_t = ::std::os::raw::c_longlong;
pub type __uint64_t = ::std::os::raw::c_ulonglong;
pub type __darwin_intptr_t = ::std::os::raw::c_long;
pub type __darwin_natural_t = ::std::os::raw::c_uint;
pub type __darwin_ct_rune_t = ::std::os::raw::c_int;
#[repr(C)]
#[derive(Copy, Clone)]
pub union __mbstate_t {
pub __mbstate8: [::std::os::raw::c_char; 128usize],
pub _mbstateL: ::std::os::raw::c_longlong,
}
#[test]
fn bindgen_test_layout___mbstate_t() {
const UNINIT: ::std::mem::MaybeUninit<__mbstate_t> = ::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<__mbstate_t>(),
128usize,
concat!("Size of: ", stringify!(__mbstate_t))
);
assert_eq!(
::std::mem::align_of::<__mbstate_t>(),
8usize,
concat!("Alignment of ", stringify!(__mbstate_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__mbstate8) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__mbstate_t),
"::",
stringify!(__mbstate8)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr)._mbstateL) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__mbstate_t),
"::",
stringify!(_mbstateL)
)
);
}
pub type __darwin_mbstate_t = __mbstate_t;
pub type __darwin_ptrdiff_t = ::std::os::raw::c_long;
pub type __darwin_size_t = ::std::os::raw::c_ulong;
pub type __darwin_va_list = __builtin_va_list;
pub type __darwin_wchar_t = ::std::os::raw::c_int;
pub type __darwin_rune_t = __darwin_wchar_t;
pub type __darwin_wint_t = ::std::os::raw::c_int;
pub type __darwin_clock_t = ::std::os::raw::c_ulong;
pub type __darwin_socklen_t = __uint32_t;
pub type __darwin_ssize_t = ::std::os::raw::c_long;
pub type __darwin_time_t = ::std::os::raw::c_long;
pub type __darwin_blkcnt_t = __int64_t;
pub type __darwin_blksize_t = __int32_t;
pub type __darwin_dev_t = __int32_t;
pub type __darwin_fsblkcnt_t = ::std::os::raw::c_uint;
pub type __darwin_fsfilcnt_t = ::std::os::raw::c_uint;
pub type __darwin_gid_t = __uint32_t;
pub type __darwin_id_t = __uint32_t;
pub type __darwin_ino64_t = __uint64_t;
pub type __darwin_ino_t = __darwin_ino64_t;
pub type __darwin_mach_port_name_t = __darwin_natural_t;
pub type __darwin_mach_port_t = __darwin_mach_port_name_t;
pub type __darwin_mode_t = __uint16_t;
pub type __darwin_off_t = __int64_t;
pub type __darwin_pid_t = __int32_t;
pub type __darwin_sigset_t = __uint32_t;
pub type __darwin_suseconds_t = __int32_t;
pub type __darwin_uid_t = __uint32_t;
pub type __darwin_useconds_t = __uint32_t;
pub type __darwin_uuid_t = [::std::os::raw::c_uchar; 16usize];
pub type __darwin_uuid_string_t = [::std::os::raw::c_char; 37usize];
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct __darwin_pthread_handler_rec {
pub __routine: ::std::option::Option<unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void)>,
pub __arg: *mut ::std::os::raw::c_void,
pub __next: *mut __darwin_pthread_handler_rec,
}
#[test]
fn bindgen_test_layout___darwin_pthread_handler_rec() {
const UNINIT: ::std::mem::MaybeUninit<__darwin_pthread_handler_rec> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<__darwin_pthread_handler_rec>(),
24usize,
concat!("Size of: ", stringify!(__darwin_pthread_handler_rec))
);
assert_eq!(
::std::mem::align_of::<__darwin_pthread_handler_rec>(),
8usize,
concat!("Alignment of ", stringify!(__darwin_pthread_handler_rec))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__routine) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__darwin_pthread_handler_rec),
"::",
stringify!(__routine)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__arg) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(__darwin_pthread_handler_rec),
"::",
stringify!(__arg)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__next) as usize - ptr as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(__darwin_pthread_handler_rec),
"::",
stringify!(__next)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_attr_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 56usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_attr_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_attr_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_attr_t>(),
64usize,
concat!("Size of: ", stringify!(_opaque_pthread_attr_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_attr_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_attr_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_attr_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_attr_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_cond_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 40usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_cond_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_cond_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_cond_t>(),
48usize,
concat!("Size of: ", stringify!(_opaque_pthread_cond_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_cond_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_cond_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_cond_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_cond_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_condattr_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 8usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_condattr_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_condattr_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_condattr_t>(),
16usize,
concat!("Size of: ", stringify!(_opaque_pthread_condattr_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_condattr_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_condattr_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_condattr_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_condattr_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_mutex_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 56usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_mutex_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_mutex_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_mutex_t>(),
64usize,
concat!("Size of: ", stringify!(_opaque_pthread_mutex_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_mutex_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_mutex_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_mutex_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_mutex_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_mutexattr_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 8usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_mutexattr_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_mutexattr_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_mutexattr_t>(),
16usize,
concat!("Size of: ", stringify!(_opaque_pthread_mutexattr_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_mutexattr_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_mutexattr_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_mutexattr_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_mutexattr_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_once_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 8usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_once_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_once_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_once_t>(),
16usize,
concat!("Size of: ", stringify!(_opaque_pthread_once_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_once_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_once_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_once_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_once_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_rwlock_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 192usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_rwlock_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_rwlock_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_rwlock_t>(),
200usize,
concat!("Size of: ", stringify!(_opaque_pthread_rwlock_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_rwlock_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_rwlock_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_rwlock_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_rwlock_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_rwlockattr_t {
pub __sig: ::std::os::raw::c_long,
pub __opaque: [::std::os::raw::c_char; 16usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_rwlockattr_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_rwlockattr_t> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_rwlockattr_t>(),
24usize,
concat!("Size of: ", stringify!(_opaque_pthread_rwlockattr_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_rwlockattr_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_rwlockattr_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_rwlockattr_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_rwlockattr_t),
"::",
stringify!(__opaque)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _opaque_pthread_t {
pub __sig: ::std::os::raw::c_long,
pub __cleanup_stack: *mut __darwin_pthread_handler_rec,
pub __opaque: [::std::os::raw::c_char; 8176usize],
}
#[test]
fn bindgen_test_layout__opaque_pthread_t() {
const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_t> = ::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<_opaque_pthread_t>(),
8192usize,
concat!("Size of: ", stringify!(_opaque_pthread_t))
);
assert_eq!(
::std::mem::align_of::<_opaque_pthread_t>(),
8usize,
concat!("Alignment of ", stringify!(_opaque_pthread_t))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_t),
"::",
stringify!(__sig)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__cleanup_stack) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_t),
"::",
stringify!(__cleanup_stack)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(_opaque_pthread_t),
"::",
stringify!(__opaque)
)
);
}
pub type __darwin_pthread_attr_t = _opaque_pthread_attr_t;
pub type __darwin_pthread_cond_t = _opaque_pthread_cond_t;
pub type __darwin_pthread_condattr_t = _opaque_pthread_condattr_t;
pub type __darwin_pthread_key_t = ::std::os::raw::c_ulong;
pub type __darwin_pthread_mutex_t = _opaque_pthread_mutex_t;
pub type __darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t;
pub type __darwin_pthread_once_t = _opaque_pthread_once_t;
pub type __darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t;
pub type __darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t;
pub type __darwin_pthread_t = *mut _opaque_pthread_t;
pub type u_int8_t = ::std::os::raw::c_uchar;
pub type u_int16_t = ::std::os::raw::c_ushort;
pub type u_int32_t = ::std::os::raw::c_uint;
pub type u_int64_t = ::std::os::raw::c_ulonglong;
pub type register_t = i64;
pub type user_addr_t = u_int64_t;
pub type user_size_t = u_int64_t;
pub type user_ssize_t = i64;
pub type user_long_t = i64;
pub type user_ulong_t = u_int64_t;
pub type user_time_t = i64;
pub type user_off_t = i64;
pub type syscall_arg_t = u_int64_t;
pub type intmax_t = ::std::os::raw::c_long;
pub type uintmax_t = ::std::os::raw::c_ulong;
#[doc = " Operand types.\n\n The type of an operand in a model.\n\n Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors\n with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent\n scalar values and must have no dimensions.\n\n Although we define many types, most operators accept just a few\n types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32},\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n and {@link ANEURALNETWORKS_INT32}.\n\n Available since API level 27."]
#[repr(C)]
pub enum OperandCode {
#[doc = " A 32 bit floating point scalar value."]
ANEURALNETWORKS_FLOAT32 = 0,
#[doc = " A signed 32 bit integer scalar value."]
ANEURALNETWORKS_INT32 = 1,
#[doc = " An unsigned 32 bit integer scalar value."]
ANEURALNETWORKS_UINT32 = 2,
#[doc = " A tensor of 32 bit floating point values."]
ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
#[doc = " A tensor of 32 bit signed integers."]
ANEURALNETWORKS_TENSOR_INT32 = 4,
#[doc = " A tensor of 8 bit unsigned integers."]
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
#[doc = " A tensor of 8 bit signed integers."]
ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 6,
#[doc = " A tensor of 8 bit signed integers."]
ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 7,
#[doc = " A tensor of 16 bit floating point values."]
ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
#[doc = " A tensor of 8 bit boolean values."]
ANEURALNETWORKS_TENSOR_BOOL8 = 9,
#[doc = " A tensor of 16 bit unsigned integers."]
ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 10,
#[doc = " A tensor of 16 bit signed integers."]
ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 11,
#[doc = " A tensor of 8 bit signed integers."]
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
#[doc = " A reference to a model."]
ANEURALNETWORKS_MODEL = 15,
}
#[doc = " Operation types.\n\n The type of an operation in a model.\n\n Available since API level 27."]
pub enum OperationCode {
#[doc = " Adds two tensors, element-wise.\n\n Takes two input tensors of identical {@link OperandCode} and compatible\n dimensions. The output is the sum of both input tensors, optionally\n modified by an activation function.\n\n Two dimensions are compatible when:\n 1. they are equal, or\n 2. one of them is 1\n\n The size of the output is the maximum size along each dimension of the\n input operands. It starts with the trailing dimensions, and works its\n way forward.\n\n Example:\n\n input1.dimension = {4, 1, 2}\n input2.dimension = {5, 4, 3, 1}\n output.dimension = {5, 4, 3, 2}\n\n Since API level 29, generic zero-sized input tensor is supported. Zero\n dimension is only compatible with 0 or 1. The size of the output\n dimension is zero if either of corresponding input dimension is zero.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode}, and compatible dimensions\n as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scales and zeroPoint can be different from input0 scale and zeroPoint.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,\n the {@link FuseCode} must be \"NONE\".\n\n Outputs:\n * 0: The sum, a tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 27."]
ANEURALNETWORKS_ADD = 0,
#[doc = " Performs a 2-D average pooling operation.\n\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n The values in the output tensor are computed as:\n\n output[b, i, j, channel] =\n sum_{di, dj}(\n input[b, strides[1] * i + di, strides[2] * j + dj, channel]\n ) / sum(1)\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n width.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n height.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n width.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n height.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
#[doc = " Concatenates the input tensors along the given dimension.\n\n The input tensors must have identical {@link OperandCode} and the same\n dimensions except the dimension along the concatenation axis.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n (full support since API level 29, see the input section)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0 ~ n-1: The list of n input tensors, of shape\n [D0, D1, ..., Daxis(i), ..., Dm].\n Before API level 29, all input tensors of\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n must have the same scale and zeroPoint as the output tensor.\n Input tensors of\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n are allowed to have different scale and zeroPoint.\n Since API level 29, zero-sized tensors are supported.\n * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the\n concatenation axis.\n\n Outputs:\n * 0: The output, a tensor of the same {@link OperandCode} as the input\n tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].\n Since API level 29, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scale and zeroPoint values can be different from\n input tensors. Before API level 29 they have to be the same as for the input tensors.\n\n Available since API level 27."]
ANEURALNETWORKS_CONCATENATION = 2,
#[doc = " Performs a 2-D convolution operation.\n\n The CONV_2D op sweeps a 2-D filter that can mix channels together over a\n batch of images, applying the filter to each window of each image of the\n appropriate size.\n\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n The values in the output tensor are computed as:\n\n output[b, i, j, channel] =\n sum_{di, dj, k} (\n input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[channel, di, dj, k]\n ) + bias[channel]\n\n Supported tensor {@link OperandCode} configurations:\n * 32 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.\n\n * Quantized:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n Available since API level 29:\n * 16 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.\n\n * Quantized with symmetric per channel quantization for the filter:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Available since API level 30:\n * Quantized signed (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n * Quantized signed with filter symmetric per channel quantization (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: A 4-D tensor, of shape\n [depth_out, filter_height, filter_width, depth_in], specifying the\n filter.\n For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}\n the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)\n must be set to 0.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint\n of 0 and bias_scale == input_scale * filter_scale.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0\n and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on width dimension. If this input is set,\n input 12 (dilation factor for height) must be specified as well.\n Available since API level 29.\n * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on height dimension. If this input is set,\n input 11 (dilation factor for width) must be specified as well.\n Available since API level 29.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: A 4-D tensor, of shape\n [depth_out, filter_height, filter_width, depth_in], specifying the\n filter.\n For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}\n the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)\n must be set to 0.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same\n type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint\n of 0 and bias_scale == input_scale * filter_scale.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0\n and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on width dimension. If this input is set,\n input 9 (dilation factor for height) must be specified as well.\n Available since API level 29.\n * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on height dimension. If this input is set,\n input 8 (dilation factor for width) must be specified as well.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth_out].\n Before API level 29, for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n the following condition must be satisfied: output_scale > input_scale * filter_scale\n\n Available since API level 27."]
ANEURALNETWORKS_CONV_2D = 3,
#[doc = " Performs a depthwise 2-D convolution operation.\n\n Given an input tensor of shape [batches, height, width, depth_in] and a\n filter tensor of shape [1, filter_height, filter_width, depth_out]\n containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV\n applies a different filter to each input channel (expanding from 1\n channel to channel_multiplier channels for each), then concatenates the\n results together.\n\n The output has depth_out = depth_in * depth_multiplier channels.\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n The values in the output tensor are computed as:\n\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} (\n input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[1, di, dj, k * channel_multiplier + q]\n ) + bias[k * channel_multiplier + q]\n\n Supported tensor {@link OperandCode} configurations:\n * 32 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.\n\n * Quantized:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n Available since API level 29:\n * 16 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.\n\n * Quantized with symmetric per channel quantization for the filter:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Available since API level 30:\n * Quantized signed (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n * Quantized signed with filter symmetric per channel quantization (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],\n specifying the filter.\n For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}\n the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)\n must be set to 3.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint\n of 0 and bias_scale == input_scale * filter_scale.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0\n and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise\n multiplier.\n * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on width dimension. If this input is set,\n input 13 (dilation factor for height) must be specified as well.\n Available since API level 29.\n * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on height dimension. If this input is set,\n input 12 (dilation factor for width) must be specified as well.\n Available since API level 29.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],\n specifying the filter.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint\n of 0 and bias_scale == input_scale * filter_scale.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0\n and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise\n multiplier.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on width dimension. If this input is set,\n input 10 (dilation factor for height) must be specified as well.\n Available since API level 29.\n * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation\n factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped\n cells between each filter element on height dimension. If this input is set,\n input 9 (dilation factor for width) must be specified as well.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth_out]. Before API level 29, for\n output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n the following condition must be satisfied:\n output_scale > input_scale * filter_scale\n\n Available since API level 27."]
ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
#[doc = " Rearranges data from depth into blocks of spatial data.\n\n More specifically, this op outputs a copy of the input tensor where\n values from the depth dimension are moved in spatial blocks to the height\n and width dimensions. The value block_size indicates the input block size\n and how the data is moved.\n\n Chunks of data of size block_size * block_size from depth are rearranged\n into non-overlapping blocks of size block_size x block_size.\n\n The width of the output tensor is input_depth * block_size, whereas the\n height is input_height * block_size. The depth of the input tensor must\n be divisible by block_size * block_size\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Inputs:\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.\n block_size must be >=1 and block_size * block_size must be a divisor\n of the input depth.\n * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape [batch, height*block_size,\n width*block_size, depth/(block_size*block_size)].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
#[doc = " Dequantizes the input tensor.\n\n The formula is:\n\n output = (input - zeroPoint) * scale.\n\n Supported input tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported output tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: A tensor.\n Since API level 29, this tensor may be zero-sized.\n\n Outputs:\n * 0: A tensor with the same shape as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_DEQUANTIZE = 6,
#[doc = " Looks up sub-tensors in the input tensor.\n\n This operator takes for input a tensor of values (Values) and\n a one-dimensional tensor of selection indices (Lookups).\n The output tensor is the concatenation of sub-tensors of Values as\n selected by Lookups.\n\n Think of Values as being sliced along its first dimension:\n The entries in Lookups select which slices are concatenated together\n to create the output tensor.\n\n For example, if Values has shape of [40, 200, 300] and\n Lookups has shape of [3], all three values found in Lookups are\n expected to be between 0 and 39. The resulting tensor must\n have shape of [3, 200, 300].\n\n If a value in Lookups is out of bounds, the operation must fail\n and an error must be reported.\n\n Supported value tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 30)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported value tensor rank: from 2\n\n Inputs:\n * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.\n The values are indices into the first dimension of Values.\n * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are\n extracted.\n\n Output:\n * 0: A n-D tensor with the same rank and shape as the Values\n tensor, except for the first dimension which has the same size\n as Lookups' only dimension.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input1.\n\n Available since API level 27."]
ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
#[doc = " Computes element-wise floor() on the input tensor.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor, of the same {@link OperandCode} and dimensions as\n the input tensor.\n\n Available since API level 27."]
ANEURALNETWORKS_FLOOR = 8,
#[doc = " Denotes a fully (densely) connected layer, which connects all elements\n in the input tensor with each element in the output tensor.\n\n This layer implements the operation:\n\n outputs = activation(inputs * weights’ + bias)\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor of at least rank 2, specifying the input. If rank is\n greater than 2, then it gets flattened to a 2-D Tensor. The\n (flattened) 2-D Tensor is reshaped (if necessary) to\n [batch_size, input_size], where \"input_size\" corresponds to the\n number of inputs to the layer, matching the second dimension of\n weights, and \"batch_size\" is calculated by dividing the number of\n elements by \"input_size\".\n Since API level 29, zero batch_size is supported for this tensor.\n * 1: A 2-D tensor, specifying the weights, of shape\n [num_units, input_size], where \"num_units\" corresponds to the number\n of output nodes.\n * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input\n tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should\n also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},\n with zeroPoint of 0 and bias_scale == input_scale * filter_scale.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n\n Outputs:\n * 0: The output tensor, of shape [batch_size, num_units]. Before API level 29, for\n output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following\n condition must be satisfied: output_scale > input_scale * filter_scale.\n\n Available since API level 27."]
ANEURALNETWORKS_FULLY_CONNECTED = 9,
#[doc = " Looks up sub-tensors in the input tensor using a key-value map.\n\n This operator takes for input a tensor of values (Values),\n a one-dimensional tensor of selection values (Lookups) and\n a one-dimensional tensor that maps these values to Values\n indexes. The output tensor is the concatenation of sub-tensors of\n Values as selected by Lookups via Keys.\n\n Think of Values as being sliced along its outer-most dimension.\n The output is a concatenation of selected slices, with one slice\n for each entry of Lookups. The slice selected is the one at the\n same index as the Maps entry that matches the value in Lookups.\n\n For a hit, the corresponding sub-tensor of Values is included\n in the Output tensor. For a miss, the corresponding sub-tensor in\n Output must have zero values.\n\n For example, if Values has shape of [40, 200, 300],\n Keys should have a shape of [40]. If Lookups tensor has shape\n of [3], three slices are being concatenated, so the resulting tensor\n must have the shape of [3, 200, 300]. If the first entry in Lookups\n has the value 123456, that value must be located in Keys tensor.\n If the sixth entry of Keys contains 123456, the sixth slice of Values\n must be selected. If no entry in Keys has 123456, a slice of zeroes\n must be concatenated.\n\n Supported value tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n\n Supported value tensor rank: from 2\n\n Inputs:\n * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with\n shape [ k ].\n * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape\n [ n ]; Keys and Values pair represent a map, i.e., the ith element\n in Keys (Keys[i]) is the key to select the ith sub-tensor in Values\n (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in\n ascending order.\n * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension\n must be n.\n\n Outputs:\n * 0: Output. A tensor with shape [ k …].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scale and zeroPoint must be the same as input2.\n * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup\n hits (True) or not (False).\n Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0\n and scale 1.0f.\n A non-zero byte represents True, a hit. A zero indicates otherwise.\n\n Available since API level 27."]
ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
#[doc = " Applies L2 normalization along the axis dimension.\n\n The values in the output tensor are computed as:\n\n output[batch, row, col, channel] =\n input[batch, row, col, channel] /\n sqrt(sum_{c} pow(input[batch, row, col, c], 2))\n\n By default the axis dimension is the last dimension of the input tensor.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n Tensors with rank less than 4 are only supported since API level 29.\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be normalized.\n * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,\n specifying the dimension normalization would be performed on.\n Negative index is used to specify axis from the end (e.g. -1 for\n the last axis). Must be in the range [-n, n).\n Available since API level 29.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} and same shape as input0.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n the scale must be 1.f / 128 and the zeroPoint must be 128.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the scale must be 1.f / 128 and the zeroPoint must be 0.\n\n NOTE: Before API level 30, if the elements along an axis are all zeros,\n the result is undefined. Since API level 30, if the elements along an axis\n are all zeros, the result is logical zero.\n\n Available since API level 27."]
ANEURALNETWORKS_L2_NORMALIZATION = 11,
#[doc = " Performs an 2-D L2 pooling operation.\n\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n The values in the output tensor are computed as:\n\n output[b, i, j, c] =\n sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /\n sum(1))\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n width.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n height.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n width.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n height.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth].\n\n Available since API level 27."]
ANEURALNETWORKS_L2_POOL_2D = 12,
#[doc = " Applies Local Response Normalization along the depth dimension.\n\n The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the\n last dimension), and each vector is normalized independently. Within a\n given vector, each component is divided by the weighted, squared sum of\n inputs within depth_radius.\n\n The output is calculated using this formula:\n\n sqr_sum[a, b, c, d] = sum(\n pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))\n output = input / pow((bias + alpha * sqr_sum), beta)\n\n For input tensor with rank less than 4, independently normalizes each\n 1-D slice along specified dimension.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: up to 4\n Tensors with rank less than 4 are only supported since API level 29.\n\n Inputs:\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of\n the normalization window.\n * 2: A scalar, specifying the bias, must not be zero.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias\n value must be of {@link ANEURALNETWORKS_FLOAT16}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias\n value must be of {@link ANEURALNETWORKS_FLOAT32}.\n * 3: A scalar, specifying the scale factor, alpha.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the\n alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the\n alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.\n * 4: A scalar, specifying the exponent, beta.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta\n value must be of {@link ANEURALNETWORKS_FLOAT16}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta\n value must be of {@link ANEURALNETWORKS_FLOAT32}.\n * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,\n specifying the dimension normalization would be performed on.\n Negative index is used to specify axis from the end (e.g. -1 for\n the last axis). Must be in the range [-n, n).\n Available since API level 29.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
#[doc = " Computes sigmoid activation on the input tensor element-wise.\n\n The output is calculated using this formula:\n\n output = 1 / (1 + exp(-input))\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor, specifying the input.\n Since API level 29, this tensor may be zero-sized.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n the scale must be 1.f / 256 and the zeroPoint must be 0.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the scale must be 1.f / 256 and the zeroPoint must be -128.\n\n Available since API level 27."]
ANEURALNETWORKS_LOGISTIC = 14,
#[doc = " Projects an input to a bit vector via locality senstive hashing.\n\n Supported input tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n\n Supported input tensor rank: from 1\n\n Inputs:\n * 0: Hash functions. Dim.size == 2, DataType: Float.\n Tensor[0].Dim[0]: Number of hash functions.\n Tensor[0].Dim[1]: Number of projected output bits generated by each\n hash function.\n If the projection type is Sparse:\n Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32\n\n * 1: Input. Dim.size >= 1, no restriction on DataType.\n * 2: Weight. Optional. Dim.size == 1, DataType: Float.\n If not set, each input element is considered to have the same weight\n of 1.0.\n Tensor[1].Dim[0] == Tensor[2].Dim[0]\n * 3: Type:\n Sparse:\n Value LSHProjectionType_SPARSE(=3) (since API level 29).\n Computed bit vector is considered to be sparse.\n Each output element is an int32 made up of multiple bits\n computed from hash functions.\n\n NOTE: To avoid collisions across hash functions, an offset value\n of k * (1 << Tensor[0].Dim[1]) will be added to each signature,\n where k is the index of the hash function.\n\n Value LSHProjectionType_SPARSE_DEPRECATED(=1).\n Legacy behavior that does not include the offset value.\n\n Dense:\n Value LSHProjectionType_DENSE(=2).\n Computed bit vector is considered to be dense. Each output\n element represents a bit and can take the value of either\n 0 or 1.\n\n Outputs:\n * 0: If the projection type is Sparse:\n Output.Dim == { Tensor[0].Dim[0] }\n A tensor of int32 that represents hash signatures.\n\n If the projection type is Dense:\n Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }\n A flattened tensor that represents projected bit vectors.\n\n Available since API level 27.\n The offset value for sparse projections was added in API level 29."]
ANEURALNETWORKS_LSH_PROJECTION = 15,
#[doc = " Performs a single time step in a Long Short-Term Memory (LSTM) layer\n\n The LSTM operation is described by the following equations.\n\n \\f{eqnarray*}{\n i_t =& \\sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\\\\n f_t =& \\sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\\\\n C_t =& clip(f_t \\odot C_{t-1} + i_t \\odot\n g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\\ t_{cell}) & \\\\\n o_t =& \\sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\\\\n & & \\\\\n & clip(W_{proj}(o_t \\odot g(C_t))+b_{proj},\\ t_{proj})\n & if\\ there\\ is\\ a\\ projection; \\\\\n h_t =& & \\\\\n & o_t \\odot g(C_t) & otherwise. \\\\\n \\f}\n Where:\n * \\f$x_t\\f$ is the input,\n * \\f$i_t\\f$ is the input gate,\n * \\f$f_t\\f$ is the forget gate,\n * \\f$C_t\\f$ is the cell state,\n * \\f$o_t\\f$ is the output,\n * \\f$h_t\\f$ is the output state,\n * \\f$\\sigma\\f$ is the logistic sigmoid function,\n * \\f$g\\f$ is the cell input and cell output activation function, usually\n \\f$tahn\\f$,\n * \\f$W_{xi}\\f$ is the input-to-input weight matrix,\n * \\f$W_{hi}\\f$ is the recurrent to input weight matrix,\n * \\f$W_{ci}\\f$ is the cell-to-input weight matrix,\n * \\f$b_i\\f$ is the input gate bias,\n * \\f$W_{xf}\\f$ is the input-to-forget weight matrix,\n * \\f$W_{hf}\\f$ is the recurrent-to-forget weight matrix,\n * \\f$W_{cf}\\f$ is the cell-to-forget weight matrix,\n * \\f$b_f\\f$ is the forget gate bias,\n * \\f$W_{xc}\\f$ is the input-to-cell weight matrix,\n * \\f$W_{hc}\\f$ is the recurrent-to-cell weight matrix,\n * \\f$b_c\\f$ is the cell bias,\n * \\f$W_{xo}\\f$ is the input-to-output weight matrix,\n * \\f$W_{ho}\\f$ is the recurrent-to-output weight matrix,\n * \\f$W_{co}\\f$ is the cell-to-output weight matrix,\n * \\f$b_o\\f$ is the output gate bias,\n * \\f$W_{proj}\\f$ is the projection weight matrix,\n * \\f$b_{proj}\\f$ is the projection bias,\n * \\f$t_{cell}\\f$ is the threshold for clipping the cell state, and\n * \\f$t_{proj}\\f$ is the threshold for clipping the projected output.\n * \\f$\\odot\\f$ is the\n <a href=\"https://en.wikipedia.org/wiki/Hadamard_product_(matrices)\">\n Hadamard product</a> that takes two matrices and produces another\n matrix, each element of which is the product of the corresponding\n elements of the input matrices.\n\n Since API level 29 LSTM supports layer normalization.\n In case layer normalization is used, the inputs to internal activation\n functions (sigmoid and \\f$g\\f$) are normalized, rescaled and recentered\n following an approach from section 3.1 from\n https://arxiv.org/pdf/1607.06450.pdf\n\n The operation has the following independently optional inputs:\n * The cell-to-input weights (\\f$W_{ci}\\f$), cell-to-forget weights\n (\\f$W_{cf}\\f$) and cell-to-output weights (\\f$W_{co}\\f$) either all\n have values or neither of them have values (i.e., all set to null). If\n they have values, the peephole optimization is used.\n * The input-to-input weights (\\f$W_{xi}\\f$), recurrent-to-input weights\n (\\f$W_{hi}\\f$) and input gate bias (\\f$b_i\\f$) either all have values,\n or none of them have values. If they have no values, coupling of input\n and forget gates (CIFG) is used, in which case the input gate\n (\\f$i_t\\f$) is calculated using the following equation instead.\n \\f{eqnarray*}{\n i_t = 1 - f_t\n \\f}\n In case peephole optimization is used and CIFG is not used\n cell-to-input (\\f$W_{ci}\\f$) weights must be present. Otherwise, the\n cell-to-input weights must have no value.\n * The projection weights (\\f$W_{proj}\\f$) is required only for the\n recurrent projection layer, and should otherwise have no value.\n * The projection bias (\\f$b_{proj}\\f$) may (but not required to) have a\n value if the recurrent projection layer exists, and should otherwise\n have no value.\n * (API level 29 or later) The four layer normalization weights either all have\n values or none of them have values. Additionally, if CIFG is used,\n input layer normalization weights tensor is omitted and the other layer\n normalization weights either all have values or none of them have\n values. Layer normalization is used when the values of all the layer\n normalization weights are present.\n\n References:\n\n The default non-peephole non-CIFG implementation is based on:\n http://www.bioinf.jku.at/publications/older/2604.pdf\n S. Hochreiter and J. Schmidhuber. \"Long Short-Term Memory\". Neural\n Computation, 9(8):1735-1780, 1997.\n\n The peephole implementation and projection layer is based on:\n https://research.google.com/pubs/archive/43905.pdf\n Hasim Sak, Andrew Senior, and Francoise Beaufays. \"Long short-term memory\n recurrent neural network architectures for large scale acoustic\n modeling.\" INTERSPEECH, 2014.\n (However, the concept of peephole optimization was introduced in work\n prior to this paper.)\n\n The coupling of input and forget gate (CIFG) is based on:\n http://arxiv.org/pdf/1503.04069.pdf\n Greff et al. \"LSTM: A Search Space Odyssey\"\n\n The layer normalization is based on:\n https://arxiv.org/pdf/1607.06450.pdf\n Jimmy Ba et al. \"Layer Normalization\"\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n All input and output tensors must be of the same type.\n\n Inputs:\n * 0: The input (\\f$x_t\\f$).\n A 2-D tensor of shape [batch_size, input_size], where “batch_size”\n corresponds to the batching dimension, and “input_size” is the size\n of the input.\n * 1: The input-to-input weights (\\f$W_{xi}\\f$). Optional.\n A 2-D tensor of shape [num_units, input_size], where “num_units”\n corresponds to the number of cell units.\n * 2: The input-to-forget weights (\\f$W_{xf}\\f$).\n A 2-D tensor of shape [num_units, input_size].\n * 3: The input-to-cell weights (\\f$W_{xc}\\f$).\n A 2-D tensor of shape [num_units, input_size].\n * 4: The input-to-output weights (\\f$W_{xo}\\f$).\n A 2-D tensor of shape [num_units, input_size].\n * 5: The recurrent-to-input weights (\\f$W_{hi}\\f$). Optional.\n A 2-D tensor of shape [num_units, output_size], where “output_size”\n corresponds to either the number of cell units (i.e., “num_units”),\n or the second dimension of the “projection_weights”, if defined.\n * 6: The recurrent-to-forget weights (\\f$W_{hf}\\f$).\n A 2-D tensor of shape [num_units, output_size].\n * 7: The recurrent-to-cell weights (\\f$W_{hc}\\f$).\n A 2-D tensor of shape [num_units, output_size].\n * 8: The recurrent-to-output weights (\\f$W_{ho}\\f$).\n A 2-D tensor of shape [num_units, output_size].\n * 9: The cell-to-input weights (\\f$W_{ci}\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 10:The cell-to-forget weights (\\f$W_{cf}\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 11:The cell-to-output weights (\\f$W_{co}\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 12:The input gate bias (\\f$b_i\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 13:The forget gate bias (\\f$b_f\\f$).\n A 1-D tensor of shape [num_units].\n * 14:The cell bias (\\f$b_c\\f$).\n A 1-D tensor of shape [num_units].\n * 15:The output gate bias (\\f$b_o\\f$).\n A 1-D tensor of shape [num_units].\n * 16:The projection weights (\\f$W_{proj}\\f$). Optional.\n A 2-D tensor of shape [output_size, num_units].\n * 17:The projection bias (\\f$b_{proj}\\f$). Optional.\n A 1-D tensor of shape [output_size].\n * 18:The output state (in) (\\f$h_{t-1}\\f$).\n A 2-D tensor of shape [batch_size, output_size].\n * 19:The cell state (in) (\\f$C_{t-1}\\f$).\n A 2-D tensor of shape [batch_size, num_units].\n * 20:The activation function (\\f$g\\f$).\n A value indicating the activation function:\n <ul>\n <li>0: None;\n <li>1: Relu;\n <li>3: Relu6;\n <li>4: Tanh;\n <li>6: Sigmoid.\n </ul>\n * 21:The clipping threshold (\\f$t_{cell}\\f$) for the cell state, such\n that values are bound within [-cell_clip, cell_clip]. If set to 0.0\n then clipping is disabled.\n Until API level 29 this scalar must be of type {@link\n ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input\n tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this\n scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},\n otherwise if all the input tensors have the type {@link\n ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link\n ANEURALNETWORKS_FLOAT16}.\n * 22:The clipping threshold (\\f$t_{proj}\\f$) for the output from the\n projection layer, such that values are bound within\n [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.\n Until API level 29 this scalar must be of type {@link\n ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input\n tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this\n scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},\n otherwise if all the input tensors have the type {@link\n ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link\n ANEURALNETWORKS_FLOAT16}.\n Since API level 29 there are additional inputs to this op:\n * 23:The input layer normalization weights.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at input gate.\n * 24:The forget layer normalization weights.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at forget gate.\n * 25:The cell layer normalization weights.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at cell gate.\n * 26:The output layer normalization weights.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at output gate.\n\n Outputs:\n * 0: The scratch buffer.\n A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or\n [batch_size, num_units * 4] without CIFG.\n * 1: The output state (out) (\\f$h_t\\f$).\n A 2-D tensor of shape [batch_size, output_size].\n * 2: The cell state (out) (\\f$C_t\\f$).\n A 2-D tensor of shape [batch_size, num_units].\n * 3: The output (\\f$o_t\\f$).\n A 2-D tensor of shape [batch_size, output_size]. This is effectively\n the same as the current “output state (out)” value.\n\n Available since API level 27."]
ANEURALNETWORKS_LSTM = 16,
#[doc = " Performs an 2-D max pooling operation.\n\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n The values in the output tensor are computed as:\n\n output[b, i, j, channel] =\n max_{di, dj} (\n input[b, strides[1] * i + di, strides[2] * j + dj, channel]\n )\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n width.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n height.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n width.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter\n height.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_MAX_POOL_2D = 17,
#[doc = " Multiplies two tensors, element-wise.\n\n Takes two input tensors of identical {@link OperandCode} and compatible\n dimensions. The output is the product of both input tensors, optionally\n modified by an activation function.\n\n Two dimensions are compatible when:\n 1. they are equal, or\n 2. one of them is 1\n\n The size of the resulting output is the maximum size along each dimension\n of the input operands. It starts with the trailing dimensions, and works\n its way forward.\n\n Since API level 29, generic zero-sized input tensor is supported. Zero\n dimension is only compatible with 0 or 1. The size of the output\n dimension is zero if either of corresponding input dimension is zero.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode}, and compatible dimensions\n as input0.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,\n the {@link FuseCode} must be \"NONE\".\n\n Outputs:\n * 0: The product, a tensor of the same {@link OperandCode} as input0.\n For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the following condition must be satisfied:\n output_scale > input1_scale * input2_scale.\n\n Available since API level 27."]
ANEURALNETWORKS_MUL = 18,
#[doc = " Computes rectified linear activation on the input tensor element-wise.\n\n The output is calculated using this formula:\n\n output = max(0, input)\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor, specifying the input.\n Since API level 29, this tensor may be zero-sized.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_RELU = 19,
#[doc = " Computes rectified linear 1 activation on the input tensor element-wise.\n\n The output is calculated using this formula:\n\n output = min(1.f, max(-1.f, input))\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor, specifying the input.\n Since API level 29, this tensor may be zero-sized.\n\n Outputs:\n * 0: The output tensor of the same shape as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_RELU1 = 20,
#[doc = " Computes rectified linear 6 activation on the input tensor element-wise.\n\n The output is calculated using this formula:\n\n output = min(6, max(0, input))\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor, specifying the input.\n Since API level 29, this tensor may be zero-sized.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_RELU6 = 21,
#[doc = " Reshapes a tensor.\n\n Given tensor, this operation returns a tensor that has the same values as\n tensor, but with a newly specified shape.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor, specifying the tensor to be reshaped.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the\n shape of the output tensor. The number of elements implied by shape\n must be the same as the number of elements in the input tensor.\n\n If one component of shape is the special value -1, the size of that\n dimension is computed so that the total size remains constant. In\n particular, a shape of [-1] flattens into 1-D. At most one component\n of shape can be -1.\n\n Outputs:\n * 0: The output tensor, of shape specified by the input shape.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_RESHAPE = 22,
#[doc = " Resizes images to given size using the bilinear interpretation.\n\n Resized images must be distorted if their output aspect ratio is not the\n same as input aspect ratio. The corner pixels of output may not be the\n same as corner pixels of input.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Both resizing by shape and resizing by scale are supported.\n\n Inputs (resizing by shape):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n width of the output tensor.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n height of the output tensor.\n * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the centers of the 4 corner\n pixels of the input and output tensors are aligned, preserving the\n values at the corner pixels.\n Available since API level 30.\n * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the pixel centers are assumed to\n be at (0.5, 0.5). This is the default behavior of image.resize in\n TF 2.0. If this parameter is True, then align_corners parameter\n must be False.\n Available since API level 30.\n\n Inputs (resizing by scale, since API level 29):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input. Zero batches is supported for this tensor.\n * 1: A scalar, specifying width_scale, the scaling factor of the width\n dimension from the input tensor to the output tensor. The output\n width is calculated as new_width = floor(width * width_scale).\n The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is\n of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} otherwise.\n * 2: A scalar, specifying height_scale, the scaling factor of the height\n dimension from the input tensor to the output tensor. The output\n height is calculated as new_height = floor(height * height_scale).\n The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is\n of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} otherwise.\n * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the centers of the 4 corner\n pixels of the input and output tensors are aligned, preserving the\n values at the corner pixels.\n Available since API level 30.\n * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the pixel centers are assumed to\n be at (0.5, 0.5). This is the default behavior of image.resize in\n TF 2.0. If this parameter is True, then align_corners parameter\n must be False.\n Available since API level 30.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, new_height, new_width, depth].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_RESIZE_BILINEAR = 23,
#[doc = " A basic recurrent neural network layer.\n\n This layer implements the operation:\n outputs = state = activation(inputs * input_weights +\n state * recurrent_weights + bias)\n\n Where:\n * “input_weights” is a weight matrix that multiplies the inputs;\n * “recurrent_weights” is a weight matrix that multiplies the current\n “state” which itself is the output from the previous time step\n computation;\n * “bias” is a bias vector (added to each output vector in the batch);\n * “activation” is the function passed as the “fused_activation_function”\n argument (if not “NONE”).\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n The input tensors must all be the same type.\n\n Inputs:\n * 0: input.\n A 2-D tensor of shape [batch_size, input_size], where “batch_size”\n corresponds to the batching dimension, and “input_size” is the size\n of the input.\n * 1: weights.\n A 2-D tensor of shape [num_units, input_size], where “num_units”\n corresponds to the number of units.\n * 2: recurrent_weights.\n A 2-D tensor of shape [num_units, num_units], with columns\n corresponding to the weights from each unit.\n * 3: bias.\n A 1-D tensor of shape [num_units].\n * 4: hidden state (in).\n A 2-D tensor of shape [batch_size, num_units].\n * 5: fused_activation_function.\n An optional {@link FuseCode} value indicating the\n activation function. If “NONE” is specified then it results in a\n linear activation.\n\n Outputs:\n * 0: hidden state (out).\n A 2-D tensor of shape [batch_size, num_units].\n\n * 1: output.\n A 2-D tensor of shape [batch_size, num_units]. This is effectively\n the same as the current state value.\n\n Available since API level 27."]
ANEURALNETWORKS_RNN = 24,
#[doc = " Computes the softmax activation on the input tensor element-wise, per\n batch, by normalizing the input vector so the maximum coefficient is\n zero.\n\n The output is calculated using this formula:\n\n output[batch, i] =\n exp((input[batch, i] - max(input[batch, :])) * beta) /\n sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}\n\n For input tensor with rank other than 2, the activation will be applied\n independently on each 1-D slice along specified dimension.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n Tensors with rank other than 2 or 4 are only supported since API level 29.\n\n Inputs:\n * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.\n Since API level 29, this tensor may be zero-sized.\n * 1: A scalar, specifying the positive scaling factor for the exponent,\n beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32},\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar\n must be of {@link ANEURALNETWORKS_FLOAT32}.\n If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the\n scalar must be of {@link ANEURALNETWORKS_FLOAT16}.\n * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,\n specifying the dimension the activation would be performed on.\n Negative index is used to specify axis from the end (e.g. -1 for\n the last axis). Must be in the range [-n, n).\n Available since API level 29.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n the scale must be 1.f / 256 and the zeroPoint must be 0.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the scale must be 1.f / 256 and the zeroPoint must be -128.\n\n Available since API level 27."]
ANEURALNETWORKS_SOFTMAX = 25,
#[doc = " Rearranges blocks of spatial data, into depth.\n\n More specifically, this op outputs a copy of the input tensor where\n values from the height and width dimensions are moved to the depth\n dimension. The value block_size indicates the input block size and how\n the data is moved.\n\n Chunks of data of size block_size * block_size from depth are rearranged\n into non-overlapping blocks of size block_size x block_size.\n\n The depth of the output tensor is input_depth * block_size * block_size.\n The input tensor's height and width must be divisible by block_size.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Inputs:\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.\n block_size must be >=1 and block_size must be a divisor of both the\n input height and width.\n * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: The output 4-D tensor, of shape [batches, height/block_size,\n width/block_size, depth_in*block_size*block_size].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 27."]
ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
#[doc = " SVDF op is a kind of stateful layer derived from the notion that a\n densely connected layer that's processing a sequence of input frames can\n be approximated by using a singular value decomposition of each of its\n nodes. The implementation is based on:\n\n https://research.google.com/pubs/archive/43813.pdf\n\n P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.\n “Compressing Deep Neural Networks using a Rank-Constrained Topology”.\n INTERSPEECH, 2015.\n\n It processes the incoming input using a 2-stage filtering mechanism:\n * stage 1 performs filtering on the \"features\" dimension, whose outputs\n get pushed into a memory of fixed-size memory_size.\n * stage 2 performs filtering on the \"time\" dimension of the memory_size\n memoized outputs of stage 1.\n\n Specifically, for rank 1, this layer implements the operation:\n\n memory = push(conv1d(inputs, weights_feature, feature_dim,\n \"ANEURALNETWORKS_PADDING_VALID\"));\n outputs = activation(memory * weights_time + bias);\n\n Where:\n * “weights_feature” is a weights matrix that processes the inputs (by\n convolving the input with every “feature filter”), and whose outputs\n get pushed, stacked in order, into the fixed-size “memory” (the oldest\n entry gets dropped);\n * “weights_time” is a weights matrix that processes the “memory” (by a\n batched matrix multiplication on the num_units);\n * “bias” is an optional bias vector (added to each output vector in the\n batch); and\n * “activation” is the function passed as the “fused_activation_function”\n argument (if not “NONE”).\n\n Each rank adds a dimension to the weights matrices by means of stacking\n the filters.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n All input tensors must be the same type.\n\n Inputs:\n * 0: input.\n A 2-D tensor of shape [batch_size, input_size], where “batch_size”\n corresponds to the batching dimension, and “input_size” is the size\n of the input.\n * 1: weights_feature.\n A 2-D tensor of shape [num_units, input_size], where “num_units”\n corresponds to the number of units.\n * 2: weights_time.\n A 2-D tensor of shape [num_units, memory_size], where “memory_size”\n corresponds to the fixed-size of the memory.\n * 3: bias.\n An optional 1-D tensor of shape [num_units].\n * 4: state (in).\n A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].\n * 5: rank.\n The rank of the SVD approximation.\n * 6: fused_activation_function.\n An optional {@link FuseCode} value indicating the\n activation function. If “NONE” is specified then it results in a\n linear activation.\n\n Outputs:\n * 0: state (out).\n A 2-D tensor of the same {@link OperandCode} as the inputs, with shape\n [batch_size, (memory_size - 1) * num_units * rank].\n * 1: output.\n A 2-D tensor of the same {@link OperandCode} as the inputs, with shape\n [batch_size, num_units].\n\n Available since API level 27."]
ANEURALNETWORKS_SVDF = 27,
#[doc = " Computes hyperbolic tangent of input tensor element-wise.\n\n The output is calculated using this formula:\n\n output = tanh(input)\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4.\n\n Inputs:\n * 0: A tensor, specifying the input.\n Since API level 29, this tensor may be zero-sized.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n the scale must be 1.f / 128 and the zeroPoint must be 128.\n For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the scale must be 1.f / 128 and the zeroPoint must be 0.\n\n Available since API level 27."]
ANEURALNETWORKS_TANH = 28,
#[doc = " BatchToSpace for N-dimensional tensors.\n\n This operation reshapes the batch dimension (dimension 0) into M + 1\n dimensions of shape block_shape + [batch], interleaves these blocks back\n into the grid defined by the spatial dimensions [1, ..., M], to obtain a\n result with the same rank as the input.\n\n This is the reverse of SpaceToBatch.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be reshaped\n * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block\n sizes for each spatial dimension of the input tensor. All values\n must be >= 1.\n * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 28."]
ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
#[doc = " Element-wise division of two tensors.\n\n Takes two input tensors of identical {@link OperandCode} and compatible\n dimensions. The output is the result of dividing the first input tensor\n by the second, optionally modified by an activation function.\n\n For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs\n \"floor division\" (\"//\" in Python). For example,\n 5 // 2 = 2\n -5 // 2 = -3\n\n Two dimensions are compatible when:\n 1. they are equal, or\n 2. one of them is 1\n\n The size of the output is the maximum size along each dimension of the\n input operands. It starts with the trailing dimensions, and works its way\n forward.\n\n Example:\n input1.dimension = {4, 1, 2}\n input2.dimension = {5, 4, 3, 1}\n output.dimension = {5, 4, 3, 2}\n\n Since API level 29, generic zero-sized input tensor is supported. Zero\n dimension is only compatible with 0 or 1. The size of the output\n dimension is zero if either of corresponding input dimension is zero.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the first input.\n * 1: A tensor of the same {@link OperandCode}, and compatible dimensions\n as input0.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,\n the {@link FuseCode} must be \"NONE\".\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n\n Available since API level 28."]
ANEURALNETWORKS_DIV = 30,
#[doc = " Computes the mean of elements across dimensions of a tensor.\n\n Reduces the input tensor along the given dimensions to reduce. Unless\n keep_dims is true, the rank of the tensor is reduced by 1 for each entry\n in axis. If keep_dims is true, the reduced dimensions are retained with\n length 1.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: A tensor, specifying the input.\n * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Must be in the range\n [-rank(input_tensor), rank(input_tensor)).\n\n NOTE: When the operation was introduced, the documentation\n incorrectly stated that if dimensions were empty, the operation\n would reduce across all dimensions. This behavior was never\n implemented.\n\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n\n Available since API level 28."]
ANEURALNETWORKS_MEAN = 31,
#[doc = " Pads a tensor.\n\n This operation pads a tensor according to the specified paddings.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n (full support since API level 29, see the output section)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be padded.\n * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings\n for each spatial dimension of the input tensor. The shape of the\n tensor must be {rank(input0), 2}.\n padding[i, 0] specifies the number of elements to be padded in the\n front of dimension i.\n padding[i, 1] specifies the number of elements to be padded after the\n end of dimension i.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0. The\n output tensor has the same rank as input0, and each\n dimension of the output tensor has the same size as the\n corresponding dimension of the input tensor plus the size\n of the padding:\n output0.dimension[i] =\n padding[i, 0] + input0.dimension[i] + padding[i, 1]\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n NOTE: Before API level 29, the pad value for\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.\n Since API level 29, the pad value is always the logical zero.\n\n Available since API level 28."]
ANEURALNETWORKS_PAD = 32,
#[doc = " SpaceToBatch for N-Dimensional tensors.\n\n This operation divides \"spatial\" dimensions [1, ..., M] of the input into\n a grid of blocks of shape block_shape, and interleaves these blocks with\n the \"batch\" dimension (0) such that in the output, the spatial dimensions\n [1, ..., M] correspond to the position within the grid, and the batch\n dimension combines both the position within a spatial block and the\n original batch position. Prior to division into blocks, the spatial\n dimensions of the input are optionally zero padded according to paddings.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n (full support since API level 29, see the output section)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n NCHW is supported since API level 29.\n\n Inputs:\n * 0: An n-D tensor, specifying the input.\n * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block\n sizes for each spatial dimension of the input tensor. All values\n must be >= 1.\n * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings\n for each spatial dimension of the input tensor. All values must be\n >= 0. The shape of the tensor must be {M, 2}, where M is the number\n of spatial dimensions.\n padding[i, 0] specifies the number of element to be padded in the\n front of dimension i.\n padding[i, 1] specifies the number of element to be padded after the\n end of dimension i.\n * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n Available since API level 29.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n NOTE: Before API level 29, the pad value for\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.\n Since API level 29, the pad value is always the logical zero.\n\n Available since API level 28."]
ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
#[doc = " Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor input, this operation returns a tensor of the same\n {@link OperandCode} with all dimensions of size 1 removed. If you don't\n want to remove all size 1 dimensions, you can remove specific size 1\n dimensions by specifying the axes (input1).\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, the tensor to be squeezed.\n * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The\n dimensions to squeeze. If specified only squeezes the dimensions\n listed. Otherwise, squeezes all dimensions. The dimension index\n starts at 0. An error must be reported if squeezing a dimension that\n is not 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0. Contains the\n same data as input, but has one or more dimensions of size 1\n removed.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n If all input dimensions are equal to 1 and are to be squeezed, the\n output shape is [1].\n\n Available since API level 28."]
ANEURALNETWORKS_SQUEEZE = 34,
#[doc = " Extracts a strided slice of a tensor.\n\n Roughly speaking, this op extracts a slice of size (end - begin) / stride\n from the given input tensor. Starting at the location specified by begin\n the slice continues by adding stride to the index until all dimensions\n are not less than end. Note that a stride can be negative, which causes a\n reverse slice.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be sliced.\n * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The\n starts of the dimensions of the input tensor to be sliced. The\n length must be of rank(input0).\n * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The\n ends of the dimensions of the input tensor to be sliced. The length\n must be of rank(input0).\n * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The\n strides of the dimensions of the input tensor to be sliced. The\n length must be of rank(input0). The entries must be non-zero.\n * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit\n of begin_mask is set, begin[i] is ignored and the fullest possible\n range in that dimension is used instead.\n * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of\n end_mask is set, end[i] is ignored and the fullest possible range in\n that dimension is used instead.\n * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the\n ith bit of shrink_axis_mask is set, the ith dimension specification\n shrinks the dimensionality by 1, taking on the value at index\n begin[i]. In this case, the ith specification must define a\n slice of size 1, e.g. begin[i] = x, end[i] = x + 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k),\n where k is the number of bits set in shrink_axis_mask.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n If shrink_axis_mask is true for all input dimensions, the output\n shape is [1].\n\n Available since API level 28."]
ANEURALNETWORKS_STRIDED_SLICE = 35,
#[doc = " Element-wise subtraction of two tensors.\n\n Takes two input tensors of identical {@link OperandCode} and compatible\n dimensions. The output is the result of subtracting the second input\n tensor from the first one, optionally modified by an activation function.\n\n Two dimensions are compatible when:\n 1. they are equal, or\n 2. one of them is 1\n\n The size of the output is the maximum size along each dimension of the\n input operands. It starts with the trailing dimensions, and works its way\n forward.\n\n Example:\n input1.dimension = {4, 1, 2}\n input2.dimension = {5, 4, 3, 1}\n output.dimension = {5, 4, 3, 2}\n\n Since API level 29, generic zero-sized input tensor is supported. Zero\n dimension is only compatible with 0 or 1. The size of the output\n dimension is zero if either of corresponding input dimension is zero.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the first input.\n * 1: A tensor of the same {@link OperandCode}, and compatible dimensions\n as input0.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,\n the {@link FuseCode} must be \"NONE\".\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 28."]
ANEURALNETWORKS_SUB = 36,
#[doc = " Transposes the input tensor, permuting the dimensions according to the\n perm tensor.\n\n The returned tensor's dimension i corresponds to the input dimension\n perm[i]. If perm is not given, it is set to (n-1...0), where n is the\n rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be transposed.\n Since API level 29, this tensor may be zero-sized.\n * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},\n the permutation of the dimensions of the input tensor.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 28."]
ANEURALNETWORKS_TRANSPOSE = 37,
#[doc = " Computes the absolute value of a tensor, element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_ABS = 38,
#[doc = " Returns the index of the largest element along an axis.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: An n-D tensor specifying the input. Must be non-empty.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to\n reduce across. Negative index is used to specify axis from the\n end (e.g. -1 for the last axis). Must be in the range [-n, n).\n\n Outputs:\n * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.\n If input is 1-dimensional, the output shape is [1].\n\n Available since API level 29."]
ANEURALNETWORKS_ARGMAX = 39,
#[doc = " Returns the index of the smallest element along an axis.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: An n-D tensor specifying the input. Must be non-empty.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to\n reduce across. Negative index is used to specify axis from the\n end (e.g. -1 for the last axis). Must be in the range [-n, n).\n\n Outputs:\n * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.\n If input is 1-dimensional, the output shape is [1].\n\n Available since API level 29."]
ANEURALNETWORKS_ARGMIN = 40,
#[doc = " Transform axis-aligned bounding box proposals using bounding box deltas.\n\n Given the positions of bounding box proposals and the corresponding\n bounding box deltas for each class, return the refined bounding box\n regions. The resulting bounding boxes are cliped against the edges of\n the image.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}\n\n Inputs:\n * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the\n bounding box proposals, each line with format [x1, y1, x2, y2].\n For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},\n the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois\n is supported for this tensor.\n * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the\n bounding box delta for each region of interest and each class. The\n bounding box deltas are organized in the following order\n [dx, dy, dw, dh], where dx and dy is the relative correction factor\n for the center position of the bounding box with respect to the width\n and height, dw and dh is the log-scale relative correction factor\n for the width and height. For input0 of type\n {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be\n of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is\n supported for this tensor.\n * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_rois], specifying the batch index of each box. Boxes with\n the same batch index are grouped together. Zero num_rois is\n supported for this tensor.\n * 3: A 2-D Tensor of shape [batches, 2], specifying the information of\n each image in the batch, each line with format\n [image_height, image_width].\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0, with shape\n [num_rois, num_classes * 4], specifying the coordinates of each\n output bounding box for each class, with format [x1, y1, x2, y2].\n For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the\n scale must be 0.125 and the zero point must be 0.\n\n Available since API level 29."]
ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
#[doc = " A recurrent neural network layer that applies an LSTM cell to a\n sequence of inputs in forward and backward directions.\n\n The op supports cross-linking via an auxiliary input. Regular cell feeds\n one input into the two RNN cells in the following way:\n\n INPUT (INPUT_REVERSED)\n | |\n ---------------------\n | FW_LSTM BW_LSTM |\n ---------------------\n | |\n FW_OUT BW_OUT\n\n An op with cross-linking takes two inputs and feeds them into the RNN\n cells in the following way:\n\n AUX_INPUT (AUX_INPUT_REVERSED)\n | |\n INPUT | (INPUT_R'D.)|\n | | | |\n -----------------------\n | \\ / \\ / |\n | FW_LSTM BW_LSTM |\n -----------------------\n | |\n FW_OUT BW_OUT\n\n The cross-linking mode is enabled iff auxiliary input and auxiliary\n weights are present. While stacking this op on top of itself, this\n allows to connect both forward and backward outputs from previous cell\n to the next cell's input.\n\n Since API level 30 parallel linking mode is supported. The mode is\n enabled if auxiliary input is present but auxiliary weights are omitted.\n In this case, the cell feeds inputs into the RNN in the following way:\n\n INPUT (AUX_INPUT_REVERSED)\n | |\n ---------------------\n | FW_LSTM BW_LSTM |\n ---------------------\n | |\n FW_OUT BW_OUT\n\n While stacking this op on top of itself, this allows to connect both\n forward and backward outputs from previous cell to the next cell's\n corresponding inputs.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: 3, either time-major or batch-major.\n\n All input and output tensors must be of the same type.\n\n Inputs:\n * 0: The input.\n A 3-D tensor of shape:\n If time-major: [max_time, batch_size, input_size]\n If batch-major: [batch_size, max_time, input_size]\n where \"max_time\" is the number of timesteps (sequence length),\n \"batch_size\" corresponds to the batching dimension, and\n \"input_size\" is the size of the input.\n * 1: The forward input-to-input weights. Optional.\n A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”\n corresponds to the number of forward cell units.\n * 2: The forward input-to-forget weights.\n A 2-D tensor of shape [fw_num_units, input_size].\n * 3: The forward input-to-cell weights.\n A 2-D tensor of shape [fw_num_units, input_size].\n * 4: The forward input-to-output weights.\n A 2-D tensor of shape [fw_num_units, input_size].\n * 5: The forward recurrent-to-input weights. Optional.\n A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”\n corresponds to either the number of cell units (i.e., fw_num_units),\n or the second dimension of the “fw_projection_weights”, if defined.\n * 6: The forward recurrent-to-forget weights.\n A 2-D tensor of shape [fw_num_units, fw_output_size].\n * 7: The forward recurrent-to-cell weights.\n A 2-D tensor of shape [fw_num_units, fw_output_size].\n * 8: The forward recurrent-to-output weights.\n A 2-D tensor of shape [fw_num_units, fw_output_size].\n * 9: The forward cell-to-input weights. Optional.\n A 1-D tensor of shape [fw_num_units].\n * 10: The forward cell-to-forget weights. Optional.\n A 1-D tensor of shape [fw_num_units].\n * 11: The forward cell-to-output weights. Optional.\n A 1-D tensor of shape [fw_num_units].\n * 12: The forward input gate bias. Optional.\n A 1-D tensor of shape [fw_num_units].\n * 13: The forward forget gate bias.\n A 1-D tensor of shape [fw_num_units].\n * 14: The forward cell gate bias.\n A 1-D tensor of shape [fw_num_units].\n * 15: The forward output gate bias.\n A 1-D tensor of shape [fw_num_units].\n * 16: The forward projection weights. Optional.\n A 2-D tensor of shape [fw_output_size, fw_num_units].\n * 17: The forward projection bias. Optional.\n A 1-D tensor of shape [fw_output_size].\n * 18: The backward input-to-input weights. Optional.\n A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”\n corresponds to the number of backward cell units.\n * 19: The backward input-to-forget weights.\n A 2-D tensor of shape [bw_num_units, input_size].\n * 20: The backward input-to-cell weights.\n A 2-D tensor of shape [bw_num_units, input_size].\n * 21: The backward input-to-output weights.\n A 2-D tensor of shape [bw_num_units, input_size].\n * 22: The backward recurrent-to-input weights. Optional.\n A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”\n corresponds to either the number of cell units (i.e., “bw_num_units”),\n or the second dimension of the “bw_projection_weights”, if defined.\n * 23: The backward recurrent-to-forget weights.\n A 2-D tensor of shape [bw_num_units, bw_output_size].\n * 24: The backward recurrent-to-cell weights.\n A 2-D tensor of shape [bw_num_units, bw_output_size].\n * 25: The backward recurrent-to-output weights.\n A 2-D tensor of shape [bw_num_units, bw_output_size].\n * 26: The backward cell-to-input weights. Optional.\n A 1-D tensor of shape [bw_num_units].\n * 27: The backward cell-to-forget weights. Optional.\n A 1-D tensor of shape [bw_num_units].\n * 28: The backward cell-to-output weights. Optional.\n A 1-D tensor of shape [bw_num_units].\n * 29: The backward input gate bias. Optional.\n A 1-D tensor of shape [bw_num_units].\n * 30: The backward forget gate bias.\n A 1-D tensor of shape [bw_num_units].\n * 31: The backward cell gate bias.\n A 1-D tensor of shape [bw_num_units].\n * 32: The backward output gate bias.\n A 1-D tensor of shape [bw_num_units].\n * 33: The backward projection weights. Optional.\n A 2-D tensor of shape [bw_output_size, bw_num_units].\n * 34: The backward projection bias. Optional.\n A 1-D tensor of shape [bw_output_size].\n * 35: The forward input activation state.\n A 2-D tensor of shape [batch_size, bw_output_size].\n * 36: The forward input cell state.\n A 2-D tensor of shape [batch_size, bw_num_units].\n * 37: The backward input activation state.\n A 2-D tensor of shape [batch_size, bw_output_size].\n * 38: The backward input cell state.\n A 2-D tensor of shape [batch_size, bw_num_units].\n * 39: The auxiliary input. Optional.\n A 3-D tensor of shape [max_time, batch_size, aux_input_size],\n where “batch_size” corresponds to the batching dimension, and\n “aux_input_size” is the size of the auxiliary input. Optional. See\n the docs above for the usage modes explanation.\n * 40: The forward auxiliary input-to-input weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [fw_num_units, aux_input_size].\n * 41: The forward auxiliary input-to-forget weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [fw_num_units, aux_input_size].\n * 42: The forward auxiliary input-to-cell weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [fw_num_units, aux_input_size].\n * 43: The forward auxiliary input-to-output weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [fw_num_units, aux_input_size].\n * 44: The backward auxiliary input-to-input weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [bw_num_units, aux_input_size].\n * 45: The backward auxiliary input-to-forget weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [bw_num_units, aux_input_size].\n * 46: The backward auxiliary input-to-cell weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [bw_num_units, aux_input_size].\n * 47: The backward auxiliary input-to-output weights.\n Optional. See the docs above for the usage modes explanation.\n A 2-D tensor of shape [bw_num_units, aux_input_size].\n * 48: The activation function.\n A value indicating the activation function:\n <ul>\n <li>0: None;\n <li>1: Relu;\n <li>3: Relu6;\n <li>4: Tanh;\n <li>6: Sigmoid.\n </ul>\n * 49: The clipping threshold for the cell state, such\n that values are bound within [-cell_clip, cell_clip]. If set to 0.0\n then clipping is disabled.\n If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},\n this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},\n otherwise if all the input tensors have the type\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be\n of type {@link ANEURALNETWORKS_FLOAT16}.\n * 50: The clipping threshold for the output from the\n projection layer, such that values are bound within\n [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.\n If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},\n this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},\n otherwise if all the input tensors have the type\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be\n of type {@link ANEURALNETWORKS_FLOAT16}.\n * 51: merge_outputs\n An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs\n from forward and backward cells should be merged.\n * 52: time_major\n An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format\n of input and output tensors.\n * 53: The forward input layer normalization weights. Optional.\n A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs\n to activation at input gate.\n * 54: The forward forget layer normalization weights. Optional.\n A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs\n to activation at forget gate.\n * 55: The forward cell layer normalization weights. Optional.\n A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs\n to activation at cell gate.\n * 56: The forward output layer normalization weights. Optional.\n A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs\n to activation at output gate.\n * 57: The backward input layer normalization weights. Optional.\n A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs\n to activation at input gate.\n * 58: The backward forget layer normalization weights. Optional.\n A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs\n to activation at forget gate.\n * 59: The backward cell layer normalization weights. Optional.\n A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs\n to activation at cell gate.\n * 60: The backward output layer normalization weights. Optional.\n A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs\n to activation at output gate.\n\n Outputs:\n * 0: The forward output.\n A 3-D tensor of shape:\n If time-major and not merge_outputs:\n [max_time, batch_size, fw_output_size]\n If time-major and merge_outputs:\n [max_time, batch_size, fw_output_size + bw_output_size]\n If batch-major and not merge_outputs:\n [batch_size, max_time, fw_output_size]\n If batch-major and merge_outputs:\n [batch_size, max_time, fw_output_size + bw_output_size]\n * 1: The backward output. Unused if merge_outputs is true.\n A 3-D tensor of shape:\n If time-major: [max_time, batch_size, bw_output_size]\n If batch-major: [batch_size, max_time, bw_output_size]\n * 2: The forward activation state output.\n A 2-D tensor of shape [batch_size, fw_output_size] containing an\n activation state from the last time step in the sequence. This\n output is optional and can be omitted. If this output is present\n then outputs 3-5 must be present as well.\n Available since API level 30.\n * 3: The forward cell state output.\n A tensor of shape [batch_size, fw_cell_size] containing a cell state\n from the last time step in the sequence. This output is optional\n and can be omitted. If this output is present\n then outputs 2, 4, 5 must be present as well.\n Available since API level 30.\n * 4: The backward activation state output.\n A 2-D tensor of shape [batch_size, bw_output_size] containing an\n activation state from the last time step in the sequence. This\n output is optional and can be omitted. If this output is present\n then outputs 2, 3, 5 must be present as well.\n Available since API level 30.\n * 5: The backward cell state output.\n A tensor of shape [batch_size, bw_cell_size] containing a cell state\n from the last time step in the sequence. This output is optional\n and can be omitted. If this output is present\n then outputs 2-4 must be present as well.\n Available since API level 30.\n\n Available since API level 29.\n\n Important: As of API level 29, there is no way to get the output state tensors out and NNAPI\n does not maintain internal states. This operator does not support the usage pattern in which\n multiple cells are chained and state tensors are propagated."]
ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
#[doc = " A recurrent neural network layer that applies a basic RNN cell to a\n sequence of inputs in forward and backward directions.\n\n This Op unrolls the input along the sequence dimension, and implements\n the following operation for each element in the sequence s =\n 1...sequence_length:\n fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +\n fw_state * fw_recurrent_weights’ + fw_bias)\n\n And for each element in sequence t = sequence_length : 1\n bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +\n bw_state * bw_recurrent_weights’ + bw_bias)\n\n Where:\n * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;\n * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the\n current “state” which itself is the output from the previous time step\n computation;\n * “{fw,bw}_bias” is a bias vector (added to each output vector in the\n batch);\n * “activation” is the function passed as the “fused_activation_function”\n argument (if not “NONE”).\n\n The op supports cross-linking via an auxiliary input. Regular cell feeds\n one input into the two RNN cells in the following way:\n\n INPUT (INPUT_REVERSED)\n | |\n ---------------------\n | FW_RNN BW_RNN |\n ---------------------\n | |\n FW_OUT BW_OUT\n\n An op with cross-linking takes two inputs and feeds them into the RNN\n cells in the following way:\n\n AUX_INPUT (AUX_INPUT_REVERSED)\n | |\n INPUT | (INPUT_R'D.)|\n | | | |\n -----------------------\n | \\ / \\ / |\n | FW_RNN BW_RNN |\n -----------------------\n | |\n FW_OUT BW_OUT\n\n The cross-linking mode is enabled iff auxiliary input and auxiliary\n weights are present. While stacking this op on top of itself, this\n allows to connect both forward and backward outputs from previous cell\n to the next cell's input.\n\n Since API level 30 parallel linking mode is supported. The mode is\n enabled if auxiliary input is present but auxiliary weights are omitted.\n In this case, the cell feeds inputs into the RNN in the following way:\n\n INPUT (AUX_INPUT_REVERSED)\n | |\n ---------------------\n | FW_RNN BW_RNN |\n ---------------------\n | |\n FW_OUT BW_OUT\n\n While stacking this op on top of itself, this allows to connect both\n forward and backward outputs from previous cell to the next cell's\n corresponding inputs.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n The input tensors must all be the same type.\n\n Inputs:\n * 0: input.\n A 3-D tensor. The shape is defined by the input 6 (timeMajor). If\n it is set to true, then the input has a shape [maxTime, batchSize,\n inputSize], otherwise the input has a shape [batchSize, maxTime,\n inputSize].\n * 1: fwWeights.\n A 2-D tensor of shape [fwNumUnits, inputSize].\n * 2: fwRecurrentWeights.\n A 2-D tensor of shape [fwNumUnits, fwNumUnits].\n * 3: fwBias.\n A 1-D tensor of shape [fwNumUnits].\n * 4: fwHiddenState.\n A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden\n state input for the first time step of the computation.\n * 5: bwWeights.\n A 2-D tensor of shape [bwNumUnits, inputSize].\n * 6: bwRecurrentWeights.\n A 2-D tensor of shape [bwNumUnits, bwNumUnits].\n * 7: bwBias.\n A 1-D tensor of shape [bwNumUnits].\n * 8: bwHiddenState\n A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden\n state input for the first time step of the computation.\n * 9: auxInput.\n A 3-D tensor. The shape is defined by the input 6 (timeMajor). If\n it is set to true, then the input has a shape [maxTime, batchSize,\n auxInputSize], otherwise the input has a shape [batchSize, maxTime,\n auxInputSize]. Can be omitted. See the docs above for the usage\n modes explanation.\n * 10:fwAuxWeights.\n A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.\n See the docs above for the usage modes explanation.\n * 11:bwAuxWeights.\n A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.\n See the docs above for the usage modes explanation.\n * 12:fusedActivationFunction.\n A {@link FuseCode} value indicating the activation function. If\n “NONE” is specified then it results in a linear activation.\n * 13:timeMajor\n An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format\n of input and output tensors.\n * 14:mergeOutputs\n An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs\n from forward and backward cells are separate (if set to false) or\n concatenated (if set to true).\n Outputs:\n * 0: fwOutput.\n A 3-D tensor. The first two dimensions of the shape are defined by\n the input 6 (timeMajor) and the third dimension is defined by the\n input 14 (mergeOutputs). If timeMajor is set to true, then the first\n two dimensions are [maxTime, batchSize], otherwise they are set to\n [batchSize, maxTime]. If mergeOutputs is set to true, then the third\n dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set\n to fwNumUnits.\n * 1: bwOutput.\n A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then\n this tensor is not produced. The shape is defined by the input 6\n (timeMajor). If it is set to true, then the shape is set to\n [maxTime, batchSize, bwNumUnits], otherwise the shape is set to\n [batchSize, maxTime, bwNumUnits].\n * 2: The forward hidden state output.\n A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden\n state from the last time step in the sequence. This output is\n optional and can be omitted. If this output is present then output\n 3 must be present as well.\n Available since API level 30.\n * 3: The backward hidden state output.\n A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden\n state from the last time step in the sequence. This output is\n optional and can be omitted. If this output is present then output\n 2 must be present as well.\n Available since API level 30.\n\n Available since API level 29.\n\n Important: As of API level 29, there is no way to get the output state tensors out and NNAPI\n does not maintain internal states. This operator does not support the usage pattern in which\n multiple cells are chained and state tensors are propagated."]
ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
#[doc = " Greedily selects a subset of bounding boxes in descending order of score.\n\n This op applies NMS algorithm to each class. In each loop of execution,\n the box with maximum score gets selected and removed from the pending set.\n The scores of the rest of boxes are lowered according to the\n intersection-over-union (IOU) overlapping with the previously selected\n boxes and a specified NMS kernel method. Any boxes with score less\n than a threshold are removed from the pending set.\n\n Three NMS kernels are supported:\n * Hard: score_new = score_old * (1 if IoU < threshold else 0)\n * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)\n * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)\n\n Axis-aligned bounding boxes are represented by its upper-left corner\n coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid\n bounding box should satisfy x1 <= x2 and y1 <= y2.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Inputs:\n * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score\n of each bounding box proposal. The boxes are grouped by batches in the\n first dimension. Zero num_rois is supported for this tensor.\n * 1: A 2-D Tensor specifying the bounding boxes of shape\n [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].\n The boxes are grouped by batches in the first dimension. The sequential\n order of the boxes corresponds with input0. For input0 of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of\n {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and\n scale of 0.125.\n For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},\n with zeroPoint of -128 and scale of 0.125.\n Zero num_rois is supported for this tensor.\n * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_rois], specifying the batch index of each box. Boxes with\n the same batch index are grouped together.\n * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes\n with scores lower than the threshold are filtered before sending\n to the NMS algorithm.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum\n number of selected bounding boxes for each image. Set to a negative\n value for unlimited number of output bounding boxes.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS\n kernel method, options are 0:hard, 1:linear, 2:gaussian.\n * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU\n threshold in hard and linear NMS kernel. This field is ignored if\n gaussian kernel is selected.\n * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in\n gaussian NMS kernel. This field is ignored if gaussian kernel is\n not selected.\n * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold.\n Boxes with scores lower than the threshold are dropped during the\n score updating phase in soft NMS.\n\n Outputs:\n * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape\n [num_output_rois], specifying the score of each output box. The boxes\n are grouped by batches, but the sequential order in each batch is not\n guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the scale and zero point must be the same as input0.\n * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape\n [num_output_rois, 4], specifying the coordinates of each\n output bounding box with the same format as input1. The sequential\n order of the boxes corresponds with output0. For type of\n {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be\n 0.125 and the zero point must be 0.\n * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_output_rois], specifying the class of each output box. The\n sequential order of the boxes corresponds with output0.\n * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_output_rois], specifying the batch index of each box. Boxes\n with the same batch index are grouped together.\n\n Available since API level 29."]
ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
#[doc = " Casts a tensor to a type.\n\n This operation ignores the scale and zeroPoint of quanized tensors,\n e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input\n as a tensor of uint8 values.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n Since API level 30, casting tensors of the following\n {@link OperandCode} to the same {@link OperandCode} is supported:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: A tensor with the same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_CAST = 45,
#[doc = " Shuffle the channels of the input tensor.\n\n Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE\n divide the channel dimension into num_groups groups, and reorganize the\n channels by grouping channels with the same index in each group.\n\n Along the channel dimension, the output is calculated using this formula:\n\n output_channel[k * num_groups + g] = input_channel[g * group_size + k]\n\n where group_size = num_channels / num_groups\n\n The number of channels must be divisible by num_groups.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be shuffled.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of\n groups.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension\n channel shuffle would be performed on. Negative index is used to\n specify axis from the end (e.g. -1 for the last axis). Must be in\n the range [-n, n).\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} and same shape as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_CHANNEL_SHUFFLE = 46,
#[doc = " Apply postprocessing steps to bounding box detections.\n\n Bounding box detections are generated by applying transformation on a set\n of predefined anchors with the bounding box deltas from bounding box\n regression. A final step of hard NMS is applied to limit the number of\n returned boxes.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Inputs:\n * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying\n the score of each anchor with each class. Class 0 for each\n [batches, num_anchors, 0] is background and will be ignored.\n * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with\n the first four values in length_box_encoding specifying the bounding\n box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],\n where dy and dx is the linear-scale relative correction factor for the\n center position of the bounding box with respect to the width and height,\n dh and dw is the log-scale relative correction factor for the width and\n height. All the entries in length_box_encoding beyond the first four\n values are ignored in this operation.\n * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each\n predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and\n ctr_x are the center position of the box, and h and w are the height\n and the width.\n * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling\n factor for dy in bounding box deltas.\n * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling\n factor for dx in bounding box deltas.\n * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling\n factor for dh in bounding box deltas.\n * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling\n factor for dw in bounding box deltas.\n * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular\n multi-class NMS algorithm that do NMS separately for each class,\n set to false for a faster algorithm that only do one single NMS\n using the highest class score..\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying\n the maximum number of boxes for the output. Boxes with the lowest\n scores are discarded to meet the limit.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is\n set to false, specifying the maximum number of classes per detection.\n * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is\n set to true, specifying the maximum number of detections when\n applying NMS algorithm for each single class.\n * 11: A scalar, score_threshold. Boxes with scores lower than the\n threshold are filtered before sending to the NMS algorithm. The\n scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar\n must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include\n background class in the list of label map for the output, set\n to false to not include the background. When the background\n class is included, it has label 0 and the output classes start\n at 1 in the label map, otherwise, the output classes start at 0.\n\n Outputs:\n * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape\n [batches, max_num_detections], specifying the score of each output\n detections.\n * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the\n coordinates of each output bounding box, with format\n [y1, x1, y2, x2].\n * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [batches, max_num_detections], specifying the class label for each\n output detection.\n * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches],\n specifying the number of valid output detections for each batch.\n\n Available since API level 29."]
ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47,
#[doc = " For input tensors x and y, computes x == y elementwise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and dimensions compatible\n with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_EQUAL = 48,
#[doc = " Computes exponential of x element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_EXP = 49,
#[doc = " Inserts a dimension of 1 into a tensor's shape.\n\n Given a tensor input, this operation inserts a dimension of 1 at the\n given dimension index of input's shape. The dimension index starts at\n zero; if you specify a negative dimension index, it is counted backward\n from the end.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: An n-D tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension\n index to expand. Must be in the range [-(n + 1), (n + 1)).\n\n Outputs:\n * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as\n input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_EXPAND_DIMS = 50,
#[doc = " Gathers values along an axis.\n\n Produces an output tensor with shape\n input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]\n where:\n # Vector indices (output is rank(input0)).\n output[a_0, ..., a_n, i, b_0, ..., b_n] =\n input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]\n\n # Higher rank indices (output is rank(input0) + rank(indices) - 1).\n output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =\n input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: An n-D tensor from which to gather values.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis.\n Negative index is used to specify axis from the end\n (e.g. -1 for the last axis). Must be in the range [-n, n).\n * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices.\n The values must be in the bounds of the corresponding dimensions\n of input0.\n\n Outputs:\n * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_GATHER = 51,
#[doc = " Generate aixs-aligned bounding box proposals.\n\n Bounding box proposals are generated by applying transformation on a set\n of predefined anchors with the bounding box deltas from bounding box\n regression. A final step of hard NMS is applied to limit the number of\n returned boxes.\n\n Axis-aligned bounding boxes are represented by its upper-left corner\n coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid\n bounding box should satisfy x1 <= x2 and y1 <= y2.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Inputs:\n * 0: A 4-D Tensor specifying the score of each anchor at each\n location. With \"NHWC\" data layout, the tensor shape is\n [batches, height, width, num_anchors]. With \"NCHW\" data layout,\n the tensor shape is [batches, num_anchors, height, width].\n * 1: A 4-D Tensor specifying the bounding box deltas. With \"NHWC\" data\n layout, the tensor shape is [batches, height, width, num_anchors * 4].\n With \"NCHW\" data layout, the tensor shape is\n [batches, num_anchors * 4, height, width]. The box deltas are encoded\n in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale\n relative correction factor for the center position of the bounding box\n with respect to the width and height, dw and dh is the log-scale\n relative correction factor for the width and height. The last\n dimensions is the channel dimension.\n * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each\n predefined anchor, with format [x1, y1, x2, y2]. For input0 of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of\n {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125.\n * 3: A 2-D Tensor of shape [batches, 2], specifying the size of\n each image in the batch, with format [image_height, image_width].\n For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this\n tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with\n scale of 0.125.\n * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio\n from the height of original image to the height of feature map.\n * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio\n from the width of original image to the width of feature map.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum\n number of boxes before going into the hard NMS algorithm. Boxes\n with the lowest scores are discarded to meet the limit. Set to\n a non-positive value for unlimited number.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum\n number of boxes returning from the hard NMS algorithm. Boxes\n with the lowest scores are discarded to meet the limit. Set to\n a non-positive value for unlimited number.\n * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU\n threshold for hard NMS.\n * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with\n height or width lower than the absolute threshold are filtered out.\n * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and input1. Set to false for NHWC.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0, of shape\n [num_output_rois], specifying the score of each output box.\n The boxes are grouped by batches, but the sequential order in\n each batch is not guaranteed. For type of\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero\n point must be the same as input0.\n * 1: A tensor of the same {@link OperandCode} as input3, of shape\n [num_output_rois, 4], specifying the coordinates of each output\n bounding box for each class, with format [x1, y1, x2, y2].\n The sequential order of the boxes corresponds with output0.\n For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the\n scale must be 0.125 and the zero point must be 0.\n * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_output_rois], specifying the batch index of each box. Boxes\n with the same batch index are grouped together.\n\n Available since API level 29."]
ANEURALNETWORKS_GENERATE_PROPOSALS = 52,
#[doc = " For input tensors x and y, computes x > y elementwise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and dimensions compatible\n with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_GREATER = 53,
#[doc = " For input tensors x and y, computes x >= y elementwise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and dimensions compatible\n with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_GREATER_EQUAL = 54,
#[doc = " Performs a grouped 2-D convolution operation.\n\n Given an input tensor of shape [batches, height, width, depth_in] and a\n filter tensor of shape [depth_out, filter_height, filter_width, depth_group]\n containing depth_out convolutional filters of depth depth_group, GROUPED_CONV\n applies a group of different filters to each input channel group, then\n concatenates the results together.\n\n Specifically, the input channels are divided into num_groups groups, each with\n depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional\n filters are also divided into num_groups groups, i.e. depth_out is divisible\n by num_groups. GROUPED_CONV applies each group of filters to the corresponding\n input channel group, and the result are concatenated together.\n\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n The values in the output tensor are computed as:\n\n output[b, i, j, g * channel_multiplier + q] =\n sum_{di, dj, dk} (\n input[b, strides[1] * i + di, strides[2] * j + dj,\n g * depth_group + dk] *\n filter[g * channel_multiplier + q, di, dj, dk]\n ) + bias[channel]\n\n where channel_multiplier = depth_out / num_groups\n\n Supported tensor {@link OperandCode} configurations:\n * 16 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.\n\n * 32 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.\n\n * Quantized:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n * Quantized signed (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n * Quantized with symmetric per channel quantization for the filter:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n * Quantized signed with filter symmetric per channel quantization (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input, where depth_in = num_groups * depth_group.\n * 1: A 4-D tensor, of shape\n [depth_out, filter_height, filter_width, depth_group], specifying\n the filter, where depth_out must be divisible by num_groups. For\n tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}\n the channel dimension (channelDim at\n {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint\n of 0 and bias_scale == input_scale * filter_scale. For filter tensor\n of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias\n should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of\n 0 and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of\n groups.\n * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input, where depth_in = num_groups * depth_group.\n * 1: A 4-D tensor, of shape\n [depth_out, filter_height, filter_width, depth_group], specifying\n the filter, where depth_out must be divisible by num_groups. For\n tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}\n the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)\n must be set to 0.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint\n of 0 and bias_scale == input_scale * filter_scale. For filter tensor\n of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias\n should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of\n 0 and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of\n groups.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth_out].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_GROUPED_CONV_2D = 55,
#[doc = " Localize the maximum keypoints from heatmaps.\n\n This operation approximates the accurate maximum keypoint scores and\n indices after bicubic upscaling by using Taylor expansion up to the\n quadratic term.\n\n The bounding box is represented by its upper-left corner coordinate\n (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.\n A valid bounding box should satisfy x1 <= x2 and y1 <= y2.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Inputs:\n * 0: A 4-D Tensor of shape\n [num_boxes, heatmap_size, heatmap_size, num_keypoints],\n specifying the heatmaps, the height and width of heatmaps should\n be the same, and must be greater than or equal to 2.\n * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,\n each with format [x1, y1, x2, y2]. For input0 of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should\n be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint\n of 0 and scale of 0.125.\n For input0 of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor\n should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with\n zeroPoint of -128 and scale of 0.125.\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0. Set to false for NHWC.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0, with shape\n [num_boxes, num_keypoints], specifying score of the keypoints.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint can be different from input0 scale and zeroPoint.\n * 1: A tensor of the same {@link OperandCode} as input1, with shape\n [num_boxes, num_keypoints, 2], specifying the location of\n the keypoints, the second dimension is organized as\n [keypoint_x, keypoint_y].\n For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the\n scale must be 0.125 and the zero point must be 0.\n\n Available since API level 29."]
ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56,
#[doc = " Applies instance normalization to the input tensor.\n\n The values in the output tensor are computed as:\n\n output[b, h, w, c] =\n (input[b, h, w, c] - mean[b, c]) * gamma /\n sqrt(var[b, c] + epsilon) + beta\n\n Where the mean and variance are computed across the spatial dimensions:\n\n mean[b, c] =\n sum_{h, w}(input[b, h, w, c]) / sum(1)\n\n var[b, c] =\n sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be normalized.\n * 1: A scalar, specifying gamma, the scale applied to the normalized\n tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if\n input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n * 2: A scalar, specifying beta, the offset applied to the normalized\n tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if\n input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n * 3: A scalar, specifying epsilon, the small value added to variance to\n avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if\n input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} if input0 is of\n {@link ANEURALNETWORKS_TENSOR_FLOAT32}.\n * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} and same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57,
#[doc = " For input tensors x and y, computes x < y elementwise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and dimensions compatible\n with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_LESS = 58,
#[doc = " For input tensors x and y, computes x <= y elementwise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and dimensions compatible\n with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_LESS_EQUAL = 59,
#[doc = " Computes natural logarithm of x element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_LOG = 60,
#[doc = " Returns the truth value of x AND y element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions\n compatible with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_LOGICAL_AND = 61,
#[doc = " Computes the truth value of NOT x element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_LOGICAL_NOT = 62,
#[doc = " Returns the truth value of x OR y element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions\n compatible with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_LOGICAL_OR = 63,
#[doc = " Computes the log softmax activations given logits.\n\n The output is calculated using this formula:\n\n output = logits * beta - log(reduce_sum(exp(logits * beta), axis))\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor specifying the input logits.\n * 1: A scalar, specifying the positive scaling factor for the exponent,\n beta.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta\n value must be of {@link ANEURALNETWORKS_FLOAT16}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta\n value must be of {@link ANEURALNETWORKS_FLOAT32}.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to\n reduce across. Negative index is used to specify axis from the\n end (e.g. -1 for the last axis). Must be in the range [-n, n).\n\n Outputs:\n * 0: The output tensor of the same {@link OperandCode} and shape as\n input0.\n\n Available since API level 29."]
ANEURALNETWORKS_LOG_SOFTMAX = 64,
#[doc = " Returns the element-wise maximum of two tensors.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and compatible dimensions\n with input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scales and zeroPoint can be different from input0 scale and zeroPoint.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_MAXIMUM = 65,
#[doc = " Returns the element-wise minimum of two tensors.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and compatible dimensions\n with input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scales and zeroPoint can be different from input0 scale and zeroPoint.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_MINIMUM = 66,
#[doc = " Computes numerical negative value element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_NEG = 67,
#[doc = " For input tensors x and y, computes x != y elementwise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n This operation supports broadcasting.\n\n Inputs:\n * 0: A tensor.\n * 1: A tensor of the same {@link OperandCode} and dimensions compatible\n with input0.\n\n Outputs:\n * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.\n\n Available since API level 29."]
ANEURALNETWORKS_NOT_EQUAL = 68,
#[doc = " Pads a tensor with the given constant value according to the specified\n paddings.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor, specifying the tensor to be padded.\n * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings\n for each spatial dimension of the input tensor. The shape of the\n tensor must be {rank(input0), 2}.\n padding[i, 0] specifies the number of elements to be padded in the\n front of dimension i.\n padding[i, 1] specifies the number of elements to be padded after\n the end of dimension i.\n * 2: An scalar specifying the value to use for padding input0.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the\n pad value must be of {@link ANEURALNETWORKS_FLOAT16}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the\n pad value must be of {@link ANEURALNETWORKS_FLOAT32}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the pad value must be of {@link ANEURALNETWORKS_INT32}. The\n scale and zeroPoint are assumed to be the same as in input0.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0. The\n output tensor has the same rank as input0, and each\n dimension of the output tensor has the same size as the\n corresponding dimension of the input tensor plus the size\n of the padding:\n output0.dimension[i] =\n padding[i, 0] + input0.dimension[i] + padding[i, 1]\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_PAD_V2 = 69,
#[doc = " Computes the power of one value to another.\n\n Given a tensor base and a tensor exponent, this operation computes\n base^exponent elementwise.\n\n This operations supports broadcasting. The size of the output is the\n maximum size along each dimension of the input operands. It starts with\n the trailing dimensions, and works its way forward.\n\n For example:\n base.dimension = {4, 1, 2}\n exponent.dimension = {5, 4, 3, 1}\n output.dimension = {5, 4, 3, 2}\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: A tensor specifying the base.\n * 1: A tensor specifying the exponent.\n\n Outputs:\n * 0: An output tensor.\n\n Available since API level 29."]
ANEURALNETWORKS_POW = 70,
#[doc = " Parametric Rectified Linear Unit.\n\n It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha\n is a learned array with the same {@link OperandCode} and compatible\n dimensions as input x.\n\n Two dimensions are compatible when:\n 1. they are equal, or\n 2. one of them is 1\n\n The size of the output is the maximum size along each dimension of the\n input operands. It starts with the trailing dimensions, and works its way\n forward.\n\n Example:\n input.dimension = {4, 1, 2}\n alpha.dimension = {5, 4, 3, 1}\n output.dimension = {5, 4, 3, 2}\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: A tensor, specifying the input.\n * 1: A tensor of the same {@link OperandCode}, and compatible dimensions\n as input0, specifying the alpha.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scales and zeroPoint can be different from input0 scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_PRELU = 71,
#[doc = " Quantizes the input tensor.\n\n The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is:\n\n output = max(0, min(255, round(input / scale) + zeroPoint)\n\n The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output\n tensor is:\n\n output = max(-128, min(127, round(input / scale) + zeroPoint)\n\n Supported input tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported output tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: A tensor, may be zero-sized.\n\n Outputs:\n * 0: The output tensor of same shape as input0, but with\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or.\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}.\n\n Available since API level 29."]
ANEURALNETWORKS_QUANTIZE = 72,
#[doc = " A version of quantized LSTM, using 16 bit quantization for internal\n state.\n\n There is no projection layer, so cell state size is equal to the output\n size.\n\n Inputs:\n * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [numBatches, inputSize] specifying the input to the LSTM\n cell. Tensor is quantized with a fixed quantization range of\n [-1, 127/128] (scale = 1/128, zeroPoint = 128).\n * 1: The input-to-input weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, inputSize] specifying input-to-input part of\n weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 2: The input-to-forget weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, inputSize] specifying input-to-forget part of\n weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 3: The input-to-cell weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, inputSize] specifying input-to-cell part of\n weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 4: The input-to-output weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, inputSize] specifying input-to-output part of\n weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 5: The recurrent-to-input weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, outputSize] specifying recurrent-to-input part\n of weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 6: The recurrent-to-forget weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, outputSize] specifying recurrent-to-forget\n part of weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 7: The recurrent-to-cell weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, outputSize] specifying recurrent-to-cell part\n of weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 8: The recurrent-to-output weights.\n A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [outputSize, outputSize] specifying recurrent-to-output\n part of weights for fully-connected layer inside the LSTM cell.\n Quantization zero point and scale must be the same across all the\n weights.\n * 9: The input gate bias.\n A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape\n [outputSize] specifying the bias for the fully-connected layer\n inside the LSTM cell. Bias is quantized with scale being a product\n of input and weights scales and zeroPoint equal to 0.\n * 10:The forget gate bias.\n A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape\n [outputSize] specifying the bias for the fully-connected layer\n inside the LSTM cell. Bias is quantized with scale being a product\n of input and weights scales and zeroPoint equal to 0.\n * 11:The cell bias.\n A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape\n [outputSize] specifying the bias for the fully-connected layer\n inside the LSTM cell. Bias is quantized with scale being a product\n of input and weights scales and zeroPoint equal to 0.\n * 12:The output gate bias.\n A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape\n [outputSize] specifying the bias for the fully-connected layer\n inside the LSTM cell. Bias is quantized with scale being a product\n of input and weights scales and zeroPoint equal to 0.\n * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n and shape [numBatches, outputSize] specifying the cell state from the\n previous time step of the LSTM cell. It is quantized using a\n quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /\n 32768, zeroPoint = 0).\n * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [numBathes, outputSize] specifying the output of the LSTM\n cell from previous time-step. Tensor is quantized with a fixed\n quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =\n 128).\n\n\n Outputs:\n * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n and shape [numBatches, outputSize] which contains a cell state from\n the current time step. Tensor is quantized using a quantization\n range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =\n 0).\n * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and shape [numBathes, outputSize] which contains the output value.\n Tensor is quantized with a fixed quantization range of [-1, 127/128]\n (scale = 1/128, zeroPoint = 128)."]
ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
#[doc = " Draws samples from a multinomial distribution.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Inputs:\n * 0: A 2-D tensor with shape [batches, classes], specifying the\n unnormalized log-probabilities for all classes.\n * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of\n independent samples to draw for each row slice.\n * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2],\n specifying seeds used to initialize the random distribution. If both\n provided seeds are 0, both will be randomly generated.\n Outputs:\n * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape\n [batches, samples], containing the drawn samples.\n\n Available since API level 29."]
ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74,
#[doc = " Reduces a tensor by computing the \"logical and\" of elements along given\n dimensions.\n\n If keep_dims is true, the reduced dimensions are\n retained with length 1. Otherwise, the rank of the tensor is reduced by\n 1 for each entry in dimensions.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Dimension values must be in the range [-n, n).\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n\n Available since API level 29."]
ANEURALNETWORKS_REDUCE_ALL = 75,
#[doc = " Reduces a tensor by computing the \"logical or\" of elements along given\n dimensions.\n\n If keep_dims is true, the reduced dimensions are\n retained with length 1. Otherwise, the rank of the tensor is reduced by\n 1 for each entry in dimensions.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Dimension values must be in the range [-n, n).\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n\n Available since API level 29."]
ANEURALNETWORKS_REDUCE_ANY = 76,
#[doc = " Reduces a tensor by computing the maximum of elements along given\n dimensions.\n\n If keep_dims is true, the reduced dimensions are\n retained with length 1. Otherwise, the rank of the tensor is reduced by\n 1 for each entry in dimensions.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Dimension values must be in the range [-n, n).\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_REDUCE_MAX = 77,
#[doc = " Reduces a tensor by computing the minimum of elements along given\n dimensions.\n\n If keep_dims is true, the reduced dimensions are\n retained with length 1. Otherwise, the rank of the tensor is reduced by\n 1 for each entry in dimensions.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Dimension values must be in the range [-n, n).\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_REDUCE_MIN = 78,
#[doc = " Reduces a tensor by multiplying elements along given dimensions.\n\n If keep_dims is true, the reduced dimensions are\n retained with length 1. Otherwise, the rank of the tensor is reduced by\n 1 for each entry in dimensions.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Dimension values must be in the range [-n, n).\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n\n Available since API level 29."]
ANEURALNETWORKS_REDUCE_PROD = 79,
#[doc = " Reduces a tensor by summing elements along given dimensions.\n\n If keep_dims is true, the reduced dimensions are\n retained with length 1. Otherwise, the rank of the tensor is reduced by\n 1 for each entry in dimensions.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: up to 4\n\n Inputs:\n * 0: An n-D tensor.\n * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions\n to reduce. Dimension values must be in the range [-n, n).\n * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,\n retains reduced dimensions with length 1.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0.\n If all dimensions are reduced and keep_dims is false, the output\n shape is [1].\n\n Available since API level 29."]
ANEURALNETWORKS_REDUCE_SUM = 80,
#[doc = " Select and scale the feature map of each region of interest to a unified\n output size by average pooling sampling points from bilinear interpolation.\n\n The region of interest is represented by its upper-left corner coordinate\n (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.\n A spatial scaling factor is applied to map into feature map coordinate.\n A valid region of interest should satisfy x1 <= x2 and y1 <= y2.\n\n No rounding is applied in this operation. The sampling points are unified\n distributed in the pooling bin and their values are calculated by bilinear\n interpolation.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Inputs:\n * 0: A 4-D tensor, specifying the feature map.\n * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of\n the regions of interest, each line with format [x1, y1, x2, y2].\n For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},\n this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},\n with zeroPoint of 0 and scale of 0.125. Zero num_rois is\n supported for this tensor.\n * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_rois], specifying the batch index of each box. Boxes with\n the same batch index are grouped together. Zero num_rois is\n supported for this tensor.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n height of the output tensor.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n width of the output tensor.\n * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio\n from the height of original image to the height of feature map.\n * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio\n from the width of original image to the width of feature map.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of\n sampling points in height dimension used to compute the output.\n Set to 0 for adaptive value of ceil(roi_height/out_height).\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of\n sampling points in width dimension used to compute the output.\n Set to 0 for adaptive value of ceil(roi_width/out_width).\n * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0. The output\n shape is [num_rois, out_height, out_width, depth].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint can be different from the input0 scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_ROI_ALIGN = 81,
#[doc = " Select and scale the feature map of each region of interest to a unified\n output size by max-pooling.\n\n The region of interest is represented by its upper-left corner coordinate\n (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.\n A spatial scaling factor is applied to map into feature map coordinate.\n A valid region of interest should satisfy x1 <= x2 and y1 <= y2.\n\n Rounding is applied in this operation to ensure integer boundary for\n regions of interest and pooling bins.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Inputs:\n * 0: A 4-D tensor, specifying the feature map.\n * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of\n the regions of interest, each line with format [x1, y1, x2, y2].\n For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},\n with zeroPoint of 0 and scale of 0.125.\n * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape\n [num_rois], specifying the batch index of each box. Boxes with\n the same batch index are grouped together.\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n height of the output tensor.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n width of the output tensor.\n * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio\n from the height of original image to the height of feature map.\n * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio\n from the width of original image to the width of feature map.\n * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Outputs:\n * 0: A tensor of the same {@link OperandCode} as input0. The output\n shape is [num_rois, out_height, out_width, depth].\n For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_ROI_POOLING = 82,
#[doc = " Computes reciprocal of square root of x element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_RSQRT = 83,
#[doc = " Using a tensor of booleans c and input tensors x and y select values\n elementwise from both input tensors:\n\n O[i] = C[i] ? x[i] : y[i].\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a\n mask that chooses, based on the value at each element, whether the\n corresponding element in the output should be taken from input1 (if\n true) or input2 (if false).\n * 1: An input tensor of the same shape as input0.\n * 2: An input tensor of the same shape and type as input1.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scales and zeroPoint can be different from input1 scale and zeroPoint.\n\n Outputs:\n * 0: A tensor of the same type and shape as input1 and input2.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_SELECT = 84,
#[doc = " Computes sin of x element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_SIN = 85,
#[doc = " Extracts a slice of specified size from the input tensor starting at a\n specified location.\n\n The starting location is specified as a 1-D tensor containing offsets\n for each dimension. The size is specified as a 1-D tensor containing\n either size of a slice along corresponding dimension or -1. In the latter\n case, all the remaining elements in dimension are included in the slice.\n\n A sum of begin offset and a size of a slice must not exceed size of a\n corresponding dimension.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: An n-D tensor to take slice from, may be zero-sized.\n * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying\n the beginning indices of the slice in each dimension.\n * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying\n the size of the slice in each dimension.\n\n Outputs:\n * 0: An n-D tensor of the same type as the input containing the slice.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n its scale and zeroPoint has to be same as the input0 scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_SLICE = 86,
#[doc = " Splits a tensor along a given axis into num_splits subtensors.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: An n-D tensor to split.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along\n which to split.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of\n splits along given axis. Must evenly divide axis size.\n\n Outputs:\n * 0 ~ (num_splits - 1): Resulting subtensors.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_SPLIT = 87,
#[doc = " Computes square root of x element-wise.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor.\n\n Outputs:\n * 0: The output tensor of same shape as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_SQRT = 88,
#[doc = " Constructs a tensor by tiling a given tensor.\n\n This operation creates a new tensor by replicating `input` `multiples`\n times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`\n elements, and the values of `input` are replicated `multiples[i]` times\n along the i-th dimension.\n For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: input, an n-D tensor specifying the input.\n * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.\n The length of multiples must be n.\n\n Outputs:\n * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_TILE = 89,
#[doc = " Finds values and indices of the k largest entries for the last dimension.\n\n Resulting values in each dimensions are sorted in descending order. If\n two values are equal, the one with larger index appears first.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: from 1\n\n Inputs:\n * 0: input, an n-D tensor specifying the input.\n * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of\n top elements to look for along the last dimension.\n\n Outputs:\n * 0: An n-D tensor of the same type as the input, containing the k\n largest elements along each last dimensional slice.\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}\n containing the indices of values within the last dimension of input.\n\n Available since API level 29."]
ANEURALNETWORKS_TOPK_V2 = 90,
#[doc = " Performs the transpose of 2-D convolution operation.\n\n This operation is sometimes called \"deconvolution\" after Deconvolutional\n Networks, but is actually the transpose (gradient) of\n {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution.\n\n The output dimensions are functions of the filter dimensions, stride, and\n padding.\n\n Supported tensor {@link OperandCode} configurations:\n * 16 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.\n\n * 32 bit floating point:\n * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.\n\n * Quantized:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n * Quantized with symmetric per channel quantization for the filter:\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Available since API level 30:\n * Quantized signed (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to\n * * input.scale * filter.scale).\n\n * Quantized signed with filter symmetric per channel quantization (since API level 30):\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.\n * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.\n * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,\n * * each value scaling is separate and equal to input.scale * filter.scales[channel]).\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Both explicit padding and implicit padding are supported.\n\n Inputs (explicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: A 4-D tensor, of shape\n [depth_out, filter_height, filter_width, depth_in], specifying the\n filter. For tensor of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel\n dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the\n same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},\n with zeroPoint of 0 and bias_scale == input_scale * filter_scale.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},\n the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0\n and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the left, in the ‘width’ dimension.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the right, in the ‘width’ dimension.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the top, in the ‘height’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on\n the bottom, in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Inputs (implicit padding):\n * 0: A 4-D tensor, of shape [batches, height, width, depth_in],\n specifying the input.\n Since API level 29, zero batches is supported for this tensor.\n * 1: A 4-D tensor, of shape\n [depth_out, filter_height, filter_width, depth_in], specifying the\n filter. For tensor of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel\n dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.\n * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input\n tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or\n {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the\n same type.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},\n the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},\n with zeroPoint of 0 and bias_scale == input_scale * filter_scale.\n For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},\n the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0\n and bias_scale of 0. The actual scale of each value 'i' is equal to\n bias_scale[i] = input_scale * filter_scale[i].\n * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output\n tensor shape.\n * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit\n padding scheme, has to be one of the\n {@link PaddingCode} values.\n * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘width’ dimension.\n * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when\n walking through input in the ‘height’ dimension.\n * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the\n {@link FuseCode} values. Specifies the activation to\n invoke on the result.\n * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify\n NCHW data layout for input0 and output0. Set to false for NHWC.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, out_height, out_width, depth_out].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint can be different from inputs' scale and zeroPoint.\n\n Available since API level 29."]
ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91,
#[doc = " A recurrent neural network specified by an LSTM cell.\n\n Performs (fully) dynamic unrolling of input.\n\n This Op unrolls the input along the time dimension, and implements the\n following operation for each element in the sequence\n s = 1...sequence_length:\n outputs[s] = projection(state = activation(LSTMOp(inputs[s])))\n\n Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM},\n the \"projection\" is an optional projection layer from state and output\n and the “activation” is the function passed as the\n “fused_activation_function” argument (if not “NONE”).\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: 3, either time-major or batch-major.\n\n All input and output tensors must be of the same type.\n\n Inputs:\n * 0: The input (\\f$x_t\\f$).\n A 3-D tensor of shape:\n If time-major: [max_time, batch_size, input_size]\n If batch-major: [batch_size, max_time, input_size]\n where “max_time” is the number of timesteps (sequence length),\n “batch_size” corresponds to the batching dimension, and\n “input_size” is the size of the input.\n * 1: The input-to-input weights (\\f$W_{xi}\\f$). Optional.\n A 2-D tensor of shape [num_units, input_size], where “num_units”\n corresponds to the number of cell units.\n * 2: The input-to-forget weights (\\f$W_{xf}\\f$).\n A 2-D tensor of shape [num_units, input_size].\n * 3: The input-to-cell weights (\\f$W_{xc}\\f$).\n A 2-D tensor of shape [num_units, input_size].\n * 4: The input-to-output weights (\\f$W_{xo}\\f$).\n A 2-D tensor of shape [num_units, input_size].\n * 5: The recurrent-to-input weights (\\f$W_{hi}\\f$). Optional.\n A 2-D tensor of shape [num_units, output_size], where “output_size”\n corresponds to either the number of cell units (i.e., “num_units”),\n or the second dimension of the “projection_weights”, if defined.\n * 6: The recurrent-to-forget weights (\\f$W_{hf}\\f$).\n A 2-D tensor of shape [num_units, output_size].\n * 7: The recurrent-to-cell weights (\\f$W_{hc}\\f$).\n A 2-D tensor of shape [num_units, output_size].\n * 8: The recurrent-to-output weights (\\f$W_{ho}\\f$).\n A 2-D tensor of shape [num_units, output_size].\n * 9: The cell-to-input weights (\\f$W_{ci}\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 10:The cell-to-forget weights (\\f$W_{cf}\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 11:The cell-to-output weights (\\f$W_{co}\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 12:The input gate bias (\\f$b_i\\f$). Optional.\n A 1-D tensor of shape [num_units].\n * 13:The forget gate bias (\\f$b_f\\f$).\n A 1-D tensor of shape [num_units].\n * 14:The cell bias (\\f$b_c\\f$).\n A 1-D tensor of shape [num_units].\n * 15:The output gate bias (\\f$b_o\\f$).\n A 1-D tensor of shape [num_units].\n * 16:The projection weights (\\f$W_{proj}\\f$). Optional.\n A 2-D tensor of shape [output_size, num_units].\n * 17:The projection bias (\\f$b_{proj}\\f$). Optional.\n A 1-D tensor of shape [output_size].\n * 18:The output state (in) (\\f$h_{t-1}\\f$).\n A 2-D tensor of shape [batch_size, output_size].\n * 19:The cell state (in) (\\f$C_{t-1}\\f$).\n A 2-D tensor of shape [batch_size, num_units].\n * 20:The activation function (\\f$g\\f$).\n A value indicating the activation function:\n <ul>\n <li>0: None;\n <li>1: Relu;\n <li>3: Relu6;\n <li>4: Tanh;\n <li>6: Sigmoid.\n </ul>\n * 21:The clipping threshold (\\f$t_{cell}\\f$) for the cell state, such\n that values are bound within [-cell_clip, cell_clip]. If set to 0.0\n then clipping is disabled.\n * 22:The clipping threshold (\\f$t_{proj}\\f$) for the output from the\n projection layer, such that values are bound within\n [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.\n * 23:Time-major if true, batch-major if false.\n * 24:The input layer normalization weights. Optional.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at input gate.\n * 25:The forget layer normalization weights. Optional.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at forget gate.\n * 26:The cell layer normalization weights. Optional.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at cell gate.\n * 27:The output layer normalization weights. Optional.\n A 1-D tensor of shape [num_units]. Used to rescale normalized inputs\n to activation at output gate.\n\n Outputs:\n * 0: The output (\\f$o_t\\f$).\n A 3-D tensor of shape:\n If time-major: [max_time, batch_size, output_size]\n If batch-major: [batch_size, max_time, output_size]\n * 1: A tensor of shape [batch_size, output_size] containing a hidden\n state from the last time step in the sequence. This output is\n optional and can be omitted. If this output is present then\n output #2 must be present as well.\n Available since API level 30.\n * 2: A tensor of shape [batch_size, cell_size] containing a cell state\n from the last time step in the sequence. This output is optional\n and can be omitted.\n Available since API level 30.\n\n Available since API level 29.\n\n Important: As of API level 29, there is no way to get the output state tensors out and NNAPI\n does not maintain internal states. This operator does not support the usage pattern in which\n multiple cells are chained and state tensors are propagated."]
ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
#[doc = " A recurrent neural network layer that applies a basic RNN cell to a\n sequence of inputs.\n\n This layer unrolls the input along the sequence dimension, and implements\n the following operation\n for each element in the sequence s = 1...sequence_length:\n outputs[s] = state = activation(inputs[s] * input_weights’ + state *\n recurrent_weights’ + bias)\n\n Where:\n * “input_weights” is a weight matrix that multiplies the inputs;\n * “recurrent_weights” is a weight matrix that multiplies the current\n “state” which itself is the output from the previous time step\n computation;\n * “bias” is a bias vector (added to each output vector in the batch);\n * “activation” is the function passed as the “fused_activation_function”\n argument (if not “NONE”).\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n The input tensors must all be the same type.\n\n Inputs:\n * 0: input.\n A 3-D tensor. The shape is defined by the input 6 (timeMajor). If\n it is set to 1, then the input has a shape [maxTime, batchSize,\n inputSize], otherwise the input has a shape [batchSize, maxTime,\n inputSize].\n * 1: weights.\n A 2-D tensor of shape [numUnits, inputSize].\n * 2: recurrent_weights.\n A 2-D tensor of shape [numUnits, numUnits].\n * 3: bias.\n A 1-D tensor of shape [numUnits].\n * 4: hidden state\n A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden\n state input for the first time step of the computation.\n * 5: fusedActivationFunction.\n A {@link FuseCode} value indicating the activation function. If\n “NONE” is specified then it results in a linear activation.\n * 6: timeMajor\n An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format\n of input and output tensors. Must be set to either 0 or 1.\n Outputs:\n * 0: output.\n A 3-D tensor. The shape is defined by the input 6 (timeMajor). If\n it is set to 1, then the output has a shape [maxTime, batchSize,\n numUnits], otherwise the output has a shape [batchSize, maxTime,\n numUnits].\n * 1: A tensor of shape [batchSize, numUnits] containing hidden state\n from the last time step in the sequence. This output is optional\n and can be omitted.\n Available since API level 30.\n\n Available since API level 29.\n\n Important: As of API level 29, there is no way to get the output state tensors out and NNAPI\n does not maintain internal states. This operator does not support the usage pattern in which\n multiple cells are chained and state tensors are propagated."]
ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
#[doc = " Resizes images to given size using the nearest neighbor interpretation.\n\n Resized images must be distorted if their output aspect ratio is not the\n same as input aspect ratio. The corner pixels of output may not be the\n same as corner pixels of input.\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)\n\n Supported tensor rank: 4, with \"NHWC\" or \"NCHW\" data layout.\n With the default data layout NHWC, the data is stored in the order of:\n [batch, height, width, channels]. Alternatively, the data layout could\n be NCHW, the data storage order of: [batch, channels, height, width].\n\n Both resizing by shape and resizing by scale are supported.\n\n Inputs (resizing by shape):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input. Zero batches is supported for this tensor.\n * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n width of the output tensor.\n * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output\n height of the output tensor.\n * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the centers of the 4 corner\n pixels of the input and output tensors are aligned, preserving the\n values at the corner pixels.\n Available since API level 30.\n * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the pixel centers are assumed to\n be at (0.5, 0.5). This is the default behavior of image.resize in\n TF 2.0. If this parameter is True, then align_corners parameter\n must be False.\n Available since API level 30.\n\n Inputs (resizing by scale):\n * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying\n the input. Zero batches is supported for this tensor.\n * 1: A scalar, specifying width_scale, the scaling factor of the width\n dimension from the input tensor to the output tensor. The output\n width is calculated as new_width = floor(width * width_scale).\n The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is\n of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} otherwise.\n * 2: A scalar, specifying height_scale, the scaling factor of the height\n dimension from the input tensor to the output tensor. The output\n height is calculated as new_height = floor(height * height_scale).\n The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is\n of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of\n {@link ANEURALNETWORKS_FLOAT32} otherwise.\n * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.\n Set to true to specify NCHW data layout for input0 and output0.\n * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the centers of the 4 corner\n pixels of the input and output tensors are aligned, preserving the\n values at the corner pixels.\n Available since API level 30.\n * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}\n scalar, default to false. If True, the pixel centers are assumed to\n be at (0.5, 0.5). This is the default behavior of image.resize in\n TF 2.0. If this parameter is True, then align_corners parameter\n must be False.\n Available since API level 30.\n\n Outputs:\n * 0: The output 4-D tensor, of shape\n [batches, new_height, new_width, depth].\n For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and\n {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,\n the scale and zeroPoint must be the same as input0.\n\n Available since API level 29."]
ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
#[doc = " Quantized version of {@link ANEURALNETWORKS_LSTM}.\n\n The input and the output use asymmetric quantized types, while the rest\n use symmetric ones.\n\n Inputs:\n * 0: The input to the LSTM cell.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n Shape: [batchSize, inputSize]\n * 1: The input-to-input weights. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, inputSize]\n * 2: The input-to-forget weights.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, inputSize]\n * 3: The input-to-cell weights.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, inputSize]\n * 4: The input-to-output weights.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, inputSize]\n * 5: The recurrent-to-input weights. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, outputSize]\n * 6: The recurrent-to-forget weights.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, outputSize]\n * 7: The recurrent-to-cell weights.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, outputSize]\n * 8: The recurrent-to-output weights.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [numUnits, outputSize]\n * 9: The cell-to-input weights (for peephole). Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 10: The cell-to-forget weights (for peephole). Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 11: The cell-to-output weights (for peephole). Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 12: The input gate bias. Quantized with scale being the\n product of input and weights scales and zeroPoint equal to 0.\n Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_INT32}\n Shape: [numUnits]\n * 13: The forget gate bias. Quantized with scale being the\n product of input and weights scales and zeroPoint equal to 0.\n Type: {@link ANEURALNETWORKS_TENSOR_INT32}\n Shape: [numUnits]\n * 14: The cell bias. Quantized with scale being the\n product of input and weights scales and zeroPoint equal to 0.\n Type: {@link ANEURALNETWORKS_TENSOR_INT32}\n Shape: [numUnits]\n * 15: The output gate bias. Quantized with scale being the\n product of input and weights scales and zeroPoint equal to 0.\n Type: {@link ANEURALNETWORKS_TENSOR_INT32}\n Shape: [numUnits]\n * 16: The projection weights. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n Shape: [outputSize, numUnits]\n * 17: The projection bias. Quantized with scale being the\n product of input and weights scales and zeroPoint equal to 0.\n Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_INT32}\n Shape: [outputSize]\n * 18: The output from the previous time step.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n Shape: [batchSize, outputSize]\n * 19: The cell state from the previous time step.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [batchSize, numUnits]\n * 20: The input layer normalization weights. Used to rescale\n normalized inputs to activation at input gate. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 21: The forget layer normalization weights. Used to\n rescale normalized inputs to activation at forget gate. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 22: The cell layer normalization weights. Used to rescale\n normalized inputs to activation at cell gate. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 23: The output layer normalization weights. Used to\n rescale normalized inputs to activation at output gate. Optional.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [numUnits]\n * 24: The cell clip. If provided the cell state is clipped\n by this value prior to the cell output activation. Optional.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n * 25: The projection clip. If provided and projection is enabled,\n this is used for clipping the projected values. Optional.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n * 26: The scale of the intermediate result of matmul,\n i.e. input to layer normalization, at input gate.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n * 27: The scale of the intermediate result of matmul,\n i.e. input to layer normalization, at forget gate.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n * 28: The scale of the intermediate result of matmul,\n i.e. input to layer normalization, at cell gate.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n * 29: The scale of the intermediate result of matmul,\n i.e. input to layer normalization, at output gate.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n * 30: The zero point of the hidden state, i.e. input to\n projection.\n Type: {@link ANEURALNETWORKS_INT32}.\n * 31: The scale of the hidden state, i.e. input to\n projection.\n Type: {@link ANEURALNETWORKS_FLOAT32}.\n\n Outputs:\n * 0: The output state (out).\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n Shape: [batchSize, outputSize]\n * 1: The cell state (out).\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n Shape: [batchSize, numUnits]\n * 2: The output. This is effectively the same as the current\n \"output state (out)\" value.\n Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n Shape: [batchSize, outputSize]\n\n Available since API level 30."]
ANEURALNETWORKS_QUANTIZED_LSTM = 95,
#[doc = " Executes one of the two referenced models as determined by a boolean\n value.\n\n The inputs and outputs of the two referenced models must agree with the\n signature of this operation. That is, if the operation has (3 + n) inputs\n and m outputs, both models must have n inputs and m outputs with the same\n types, ranks (if specified), dimensions (if specified), scales,\n zeroPoints, and other operand parameters as the corresponding operation\n inputs and outputs.\n\n Inputs:\n * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]\n that determines which of the two referenced models to execute.\n The operand must have fully specified dimensions.\n * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be\n executed if the condition is true.\n * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be\n executed if the condition is false.\n * 3 ~ (n + 2): Inputs to be passed to the model selected for execution.\n\n Outputs:\n * 0 ~ (m - 1): Outputs produced by the selected model.\n\n Available since API level 30."]
ANEURALNETWORKS_IF = 96,
#[doc = " Executes the body model until the condition model outputs false.\n\n The inputs to this operation are the condition model, the body model,\n and operand values for the first iteration of the loop. The values are\n implicitly split into three groups of input-output, state-only, and\n input-only values, as described below.\n\n The outputs of this operation are the final values of input-output\n operands.\n\n Both the condition and body model receive (m + k + n) inputs.\n * The first m (m >= 1) inputs are input-output operands. For the first\n iteration, these are initialized from the corresponding inputs of the\n WHILE operation. In subsequent iterations, their values come from the\n corresponding outputs of the body model produced during the previous\n iteration.\n * The next k (k >= 0) inputs are state-only operands. They are similar to\n the input-output operands, except that their values are no longer\n available after the loop terminates.\n * The last n (n >= 0) inputs are input-only operands. Their values come\n from the corresponding inputs of the WHILE operation.\n\n The body model produces (m + k) outputs.\n * The first m outputs are input-output operands. They become the outputs\n of the WHILE operation when a termination condition is reached.\n * The last k outputs are state-only operands. Their values are no longer\n available after the loop terminates.\n\n The numbers m, k, and n are inferred by the runtime as follows:\n m = (WHILE operation output count)\n k = (body model output count) - m\n n = (body model input count) - m - k\n\n The pseudo-code below illustrates the flow of a WHILE operation with\n inputs condition, body, initial_input_output, initial_state, input_only\n (m = 1, k = 1, n = 1):\n\n input_output = initial_input_output\n state = initial_state\n while condition(input_output, state, input_only):\n input_output, state = body(input_output, state, input_only)\n return input_output\n\n To prevent infinite loops, there is an implicit execution timeout\n associated with each loop (\"loop timeout duration\"). See {@link\n ANeuralNetworksExecution_setLoopTimeout}.\n\n Inputs:\n * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition\n model. The model must have (m + k + n) inputs with\n the same types, ranks (if specified), dimensions (if specified),\n scales, zeroPoints, and other operand parameters as the\n corresponding inputs of the WHILE operation and exactly one output\n of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1].\n The output operand must have fully specified dimensions.\n * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model.\n The model must have (m + k + n) inputs and (m + k) outputs with\n the same types, ranks (if specified), dimensions (if specified),\n scales, zeroPoints, and other operand parameters as the\n corresponding inputs and outputs of the WHILE operation.\n * (m inputs): Initial values for input-output operands.\n * (k inputs): Initial values for state-only operands.\n * (n inputs): Values for input-only operands.\n\n Outputs:\n * 0 ~ (m - 1): Outputs produced by the loop.\n\n Available since API level 30."]
ANEURALNETWORKS_WHILE = 97,
#[doc = " Computes exponential linear activation on the input tensor element-wise.\n\n The output is calculated using the following formula:\n\n ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor, specifying the input. May be zero-sized.\n * 1: A scalar, specifying the alpha parameter.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},\n the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.\n For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},\n the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.\n\n Outputs:\n * 0: The output tensor of same shape and type as input0.\n\n Available since API level 30."]
ANEURALNETWORKS_ELU = 98,
#[doc = " Computes hard-swish activation on the input tensor element-wise.\n\n Hard swish activation is introduced in\n https://arxiv.org/pdf/1905.02244.pdf\n\n The output is calculated using the following formula:\n\n h-swish(x) = x * max(0, min(6, (x + 3))) / 6\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A tensor, specifying the input. May be zero-sized.\n\n Outputs:\n * 0: The output tensor of same shape and type as input0.\n Scale and zero point of this tensor may be different from the input\n tensor's parameters.\n\n Available since API level 30."]
ANEURALNETWORKS_HARD_SWISH = 99,
#[doc = " Creates a tensor filled with a scalar value.\n\n Supported output tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: A 1-D tensor, specifying the desired output tensor shape.\n * 1: A scalar, specifying the value to fill the output tensors with.\n For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},\n the scalar must be of {@link ANEURALNETWORKS_FLOAT16}.\n For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},\n the scalar must be of {@link ANEURALNETWORKS_FLOAT32}.\n For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32},\n the scalar must be of {@link ANEURALNETWORKS_INT32}.\n\n Outputs:\n * 0: The output tensor.\n\n Available since API level 30."]
ANEURALNETWORKS_FILL = 100,
#[doc = " Returns the rank of a tensor.\n\n The rank of a tensor is the number of dimensions in it. Also known as\n \"order\", \"degree\", \"ndims\".\n\n Supported tensor {@link OperandCode}:\n * {@link ANEURALNETWORKS_TENSOR_FLOAT16}\n * {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n * {@link ANEURALNETWORKS_TENSOR_INT32}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}\n * {@link ANEURALNETWORKS_TENSOR_BOOL8}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}\n * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}\n * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}\n\n Supported tensor rank: from 1.\n\n Inputs:\n * 0: The input tensor.\n\n Outputs:\n * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank\n of the input tensor.\n\n Available since API level 30."]
ANEURALNETWORKS_RANK = 101,
}
#[doc = " NO fused activation function."]
pub const ANEURALNETWORKS_FUSED_NONE: FuseCode = 0;
#[doc = " Fused ReLU activation function."]
pub const ANEURALNETWORKS_FUSED_RELU: FuseCode = 1;
#[doc = " Fused ReLU1 activation function."]
pub const ANEURALNETWORKS_FUSED_RELU1: FuseCode = 2;
#[doc = " Fused ReLU6 activation function."]
pub const ANEURALNETWORKS_FUSED_RELU6: FuseCode = 3;
#[doc = " Fused activation function types.\n\n\n Available since API level 27."]
pub type FuseCode = ::std::os::raw::c_uint;
#[doc = " SAME padding.\n Padding on both ends are the \"same\":\n padding_to_beginning = total_padding / 2\n padding_to_end = (total_padding + 1)/2.\n i.e., for even number of padding, padding to both ends are exactly\n the same; for odd number of padding, padding to the ending is bigger\n than the padding to the beginning by 1.\n\n total_padding is a function of input, stride, dilation and filter size.\n It could be computed as follows:\n out_size = (input + stride - 1) / stride\n effective_filter_size = (filter_size - 1) * dilation + 1\n needed_input = (out_size - 1) * stride + effective_filter_size\n total_padding = max(0, needed_input - input_size)\n The computation is the same for the horizontal and vertical directions."]
pub const ANEURALNETWORKS_PADDING_SAME: PaddingCode = 1;
#[doc = " VALID padding.\n No padding. When the input size is not evenly divisible by\n the filter size, the input at the end that could not fill\n the whole filter tile will simply be ignored."]
pub const ANEURALNETWORKS_PADDING_VALID: PaddingCode = 2;
#[doc = " Implicit padding algorithms.\n\n\n Available since API level 27."]
pub type PaddingCode = ::std::os::raw::c_int;
#[doc = " Prefer executing in a way that minimizes battery drain.\n This is desirable for compilations that will be executed often."]
pub const ANEURALNETWORKS_PREFER_LOW_POWER: PreferenceCode = 0;
#[doc = " Prefer returning a single answer as fast as possible, even if this causes\n more power consumption."]
pub const ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER: PreferenceCode = 1;
#[doc = " Prefer maximizing the throughput of successive frames, for example when\n processing successive frames coming from the camera."]
pub const ANEURALNETWORKS_PREFER_SUSTAINED_SPEED: PreferenceCode = 2;
#[doc = " Execution preferences.\n\n Available since API level 27."]
pub type PreferenceCode = ::std::os::raw::c_int;
#[doc = " The device type cannot be provided."]
pub const ANEURALNETWORKS_DEVICE_UNKNOWN: DeviceTypeCode = 0;
#[doc = " The device does not fall into any category below."]
pub const ANEURALNETWORKS_DEVICE_OTHER: DeviceTypeCode = 1;
#[doc = " The device runs NNAPI models on single or multi-core CPU."]
pub const ANEURALNETWORKS_DEVICE_CPU: DeviceTypeCode = 2;
#[doc = " The device can run NNAPI models and also accelerate graphics APIs such\n as OpenGL ES and Vulkan."]
pub const ANEURALNETWORKS_DEVICE_GPU: DeviceTypeCode = 3;
#[doc = " Dedicated accelerator for Machine Learning workloads."]
pub const ANEURALNETWORKS_DEVICE_ACCELERATOR: DeviceTypeCode = 4;
#[doc = " Device types.\n\n The type of NNAPI device."]
pub type DeviceTypeCode = ::std::os::raw::c_int;
#[doc = " Result codes.\n\n <p>Any NNAPI function can return any result code, including result codes not\n currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}\n indicates a failure of some kind.</p>\n\n <p>Additional information about the nature of a failure can be obtained from\n the device log after enabling NNAPI debugging by setting the debug.nn.vlog\n property to 1, e.g., by calling \"adb shell setprop debug.nn.vlog 1\".</p>\n\n Available since API level 27."]
#[repr(C)]
pub enum ResultCode {
#[doc = " Operation was succesful."]
ANEURALNETWORKS_NO_ERROR = 0,
#[doc = " Failure caused by not enough available memory."]
ANEURALNETWORKS_OUT_OF_MEMORY = 1,
#[doc = " Failure caused by not enough available memory."]
ANEURALNETWORKS_INCOMPLETE = 2,
#[doc = " Failure caused by unexpected null argument."]
ANEURALNETWORKS_UNEXPECTED_NULL = 3,
#[doc = " Failure caused by invalid function arguments, invalid model definition,\n invalid execution definition or invalid data at execution time."]
ANEURALNETWORKS_BAD_DATA = 4,
#[doc = " Failure caused by failed model execution."]
ANEURALNETWORKS_OP_FAILED = 5,
#[doc = " Failure caused by object being in the wrong state."]
ANEURALNETWORKS_BAD_STATE = 6,
#[doc = " Failure caused by not being able to map a file into memory.\n This may be caused by a file descriptor not being mappable, or an AHardwareBuffer\n not supported by the device.\n Mitigate by reading its content into memory."]
ANEURALNETWORKS_UNMAPPABLE = 7,
#[doc = " Failure caused by insufficient buffer size provided to a model output."]
ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
#[doc = " Failure caused by a device not being available."]
ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
#[doc = " Failure because a deadline could not be met for a task, but future\n deadlines may still be met for the same task after a short delay.\n\n Available since API level 30."]
ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
#[doc = " Failure because a deadline could not be met for a task, and future\n deadlines will likely also not be met for the same task even after a\n short delay.\n\n Available since API level 30."]
ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
#[doc = " Failure because of a resource limitation within the driver, but future\n calls for the same task may still succeed after a short delay.\n\n Available since API level 30."]
ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
#[doc = " Failure because of a resource limitation within the driver, and future\n calls for the same task will likely also fail even after a short\n delay.\n\n Available since API level 30."]
ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
#[doc = " Failure indicating an object is in a dead state.\n\n Available since API level 30."]
ANEURALNETWORKS_DEAD_OBJECT = 14,
#[doc = " This error code is not defined in NNAPI."]
INVALID_ERROR = 1000,
}
impl From<i32> for ResultCode {
fn from(value: i32) -> Self {
match value {
0 => ResultCode::ANEURALNETWORKS_NO_ERROR,
1 => ResultCode::ANEURALNETWORKS_OUT_OF_MEMORY,
2 => ResultCode::ANEURALNETWORKS_INCOMPLETE,
3 => ResultCode::ANEURALNETWORKS_UNEXPECTED_NULL,
4 => ResultCode::ANEURALNETWORKS_BAD_DATA,
5 => ResultCode::ANEURALNETWORKS_OP_FAILED,
6 => ResultCode::ANEURALNETWORKS_BAD_STATE,
7 => ResultCode::ANEURALNETWORKS_UNMAPPABLE,
8 => ResultCode::ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE,
9 => ResultCode::ANEURALNETWORKS_UNAVAILABLE_DEVICE,
10 => ResultCode::ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT,
11 => ResultCode::ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT,
12 => ResultCode::ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT,
13 => ResultCode::ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT,
14 => ResultCode::ANEURALNETWORKS_DEAD_OBJECT,
_ => ResultCode::INVALID_ERROR,
}
}
}
impl Debug for ResultCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ResultCode::{}", self.to_string())
}
}
impl ToString for ResultCode {
fn to_string(&self) -> String {
match self {
ResultCode::ANEURALNETWORKS_NO_ERROR => " Operation was succesful.",
ResultCode::ANEURALNETWORKS_OUT_OF_MEMORY => " Failure caused by not enough available memory.",
ResultCode::ANEURALNETWORKS_INCOMPLETE => " Failure caused by not enough available memory.",
ResultCode::ANEURALNETWORKS_UNEXPECTED_NULL => " Failure caused by unexpected null argument.",
ResultCode::ANEURALNETWORKS_BAD_DATA => " Failure caused by invalid function arguments, invalid model definition,\n invalid execution definition or invalid data at execution time.",
ResultCode::ANEURALNETWORKS_OP_FAILED => " Failure caused by failed model execution.",
ResultCode::ANEURALNETWORKS_BAD_STATE => " Failure caused by object being in the wrong state.",
ResultCode::ANEURALNETWORKS_UNMAPPABLE => " Failure caused by not being able to map a file into memory.\n This may be caused by a file descriptor not being mappable, or an AHardwareBuffer\n not supported by the device.\n Mitigate by reading its content into memory.",
ResultCode::ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE => " Failure caused by insufficient buffer size provided to a model output.",
ResultCode::ANEURALNETWORKS_UNAVAILABLE_DEVICE => " Failure caused by a device not being available.",
ResultCode::ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT => " Failure because a deadline could not be met for a task, but future\n deadlines may still be met for the same task after a short delay.\n\n Available since API level 30.",
ResultCode::ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT => " Failure because a deadline could not be met for a task, and future\n deadlines will likely also not be met for the same task even after a\n short delay.\n\n Available since API level 30.",
ResultCode::ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT => " Failure because of a resource limitation within the driver, but future\n calls for the same task may still succeed after a short delay.\n\n Available since API level 30.",
ResultCode::ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT => " Failure because of a resource limitation within the driver, and future\n calls for the same task will likely also fail even after a short\n delay.\n\n Available since API level 30.",
ResultCode::ANEURALNETWORKS_DEAD_OBJECT => " Failure indicating an object is in a dead state.\n\n Available since API level 30.",
ResultCode::INVALID_ERROR => " This error code is not defined in NNAPI.",
}.to_string()
}
}
pub const ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES: _bindgen_ty_1 = 128;
#[doc = " For {@link ANeuralNetworksModel_setOperandValue}, values with a\n length smaller or equal to this will be immediately copied into\n the model. The size is in bytes.\n\n Available since API level 27."]
pub type _bindgen_ty_1 = ::std::os::raw::c_uint;
pub const ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN: _bindgen_ty_2 = 32;
#[doc = " For {@link ANeuralNetworksCompilation_setCaching}, specify the size\n of the cache token required from the application. The size is in bytes.\n\n Available since API level 29."]
pub type _bindgen_ty_2 = ::std::os::raw::c_uint;
pub const DurationCode_ANEURALNETWORKS_DURATION_ON_HARDWARE: DurationCode = 0;
pub const DurationCode_ANEURALNETWORKS_DURATION_IN_DRIVER: DurationCode = 1;
pub const DurationCode_ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE: DurationCode = 2;
pub const DurationCode_ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER: DurationCode = 3;
#[doc = " Different duration measurements.\n\n Durations are measured in nanoseconds.\n\n Available since API level 29."]
pub type DurationCode = ::std::os::raw::c_uint;
pub const PriorityCode_ANEURALNETWORKS_PRIORITY_LOW: PriorityCode = 90;
pub const PriorityCode_ANEURALNETWORKS_PRIORITY_MEDIUM: PriorityCode = 100;
pub const PriorityCode_ANEURALNETWORKS_PRIORITY_HIGH: PriorityCode = 110;
pub const PriorityCode_ANEURALNETWORKS_PRIORITY_DEFAULT: PriorityCode = 100;
#[doc = " Relative execution priority.\n\n Available since API level 30."]
pub type PriorityCode = ::std::os::raw::c_uint;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksMemory {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksModel {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksCompilation {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksExecution {
_unused: [u8; 0],
}
#[doc = " Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksSymmPerChannelQuantParams {
pub channelDim: u32,
#[doc = " The size of the scale array. Should be equal to dimension[channelDim] of the Operand."]
pub scaleCount: u32,
#[doc = " The array of scaling values for each channel. Each value must be greater than zero."]
pub scales: *const f32,
}
#[test]
fn bindgen_test_layout_ANeuralNetworksSymmPerChannelQuantParams() {
const UNINIT: ::std::mem::MaybeUninit<ANeuralNetworksSymmPerChannelQuantParams> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<ANeuralNetworksSymmPerChannelQuantParams>(),
16usize,
concat!(
"Size of: ",
stringify!(ANeuralNetworksSymmPerChannelQuantParams)
)
);
assert_eq!(
::std::mem::align_of::<ANeuralNetworksSymmPerChannelQuantParams>(),
8usize,
concat!(
"Alignment of ",
stringify!(ANeuralNetworksSymmPerChannelQuantParams)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).channelDim) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksSymmPerChannelQuantParams),
"::",
stringify!(channelDim)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).scaleCount) as usize - ptr as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksSymmPerChannelQuantParams),
"::",
stringify!(scaleCount)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).scales) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksSymmPerChannelQuantParams),
"::",
stringify!(scales)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksBurst {
_unused: [u8; 0],
}
#[doc = " ANeuralNetworksOperandType describes the type of an operand.\n\n This structure is used to describe both scalars and tensors.\n\n A tensor operand type with all dimensions specified is \"fully\n specified\". Whenever possible (i.e., whenever the dimensions are\n known at model construction time), a tensor operand type should be\n (but is not required to be) fully specified, in order to enable the\n best possible performance.\n\n If a tensor operand's type is not fully specified, the dimensions\n of the operand are deduced from the operand types and values of the\n operation for which that operand is an output or from the corresponding\n {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input\n operand type in the case of referenced model input operands.\n\n <p>In the following situations, a tensor operand type must be fully\n specified:<ul>\n <li>The operand has a constant value, set by\n {@link ANeuralNetworksModel_setOperandValue} (with a\n non-nullptr buffer) or\n {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>\n <li>The operand is a model input (see\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main\n model within a compilation. A fully specified tensor operand type\n must either be provided to {@link ANeuralNetworksModel_addOperand};\n or it must be provided to the corresponding\n {@link ANeuralNetworksExecution_setInput}, or\n {@link ANeuralNetworksExecution_setInputFromMemory}.\n EXCEPTION: If the input is optional and omitted\n (by passing nullptr for buffer to\n {@link ANeuralNetworksExecution_setInput}) then it need\n not have a fully specified tensor operand type.</li>\n <li>The operand is a model output (see\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main\n model within a compilation and is to be used with {@link\n ANeuralNetworksExecution_startComputeWithDependencies}.\n A fully specified tensor operand type must either be provided\n to {@link ANeuralNetworksModel_addOperand}; or it must be\n provided to the corresponding\n {@link ANeuralNetworksExecution_setOutput}, or\n {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul>\n\n A tensor operand type of specified rank but some number of\n unspecified dimensions is represented by setting dimensionCount to\n the rank and each unspecified dimension to 0.\n\n Available since API level 27.\n\n Starting at API level 29, a tensor operand type of unspecified rank is\n represented by setting dimensionCount to 0 and dimensions to NULL (just as if\n it were a scalar operand type)."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksOperandType {
#[doc = " The data type, e.g ANEURALNETWORKS_FLOAT32."]
pub type_: i32,
#[doc = " The number of dimensions (rank).\n\n Must be 0 for scalars."]
pub dimensionCount: u32,
#[doc = " The dimensions of the tensor.\n\n Must be nullptr for scalars."]
pub dimensions: *const u32,
#[doc = " The quantization scale.\n\n Must be 0 when not applicable to an operand type.\n\n See {@link OperandCode}."]
pub scale: f32,
#[doc = " The quantization zero point.\n\n Must be 0 when not applicable to an operand type.\n\n See {@link OperandCode}."]
pub zeroPoint: i32,
}
#[test]
fn bindgen_test_layout_ANeuralNetworksOperandType() {
const UNINIT: ::std::mem::MaybeUninit<ANeuralNetworksOperandType> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<ANeuralNetworksOperandType>(),
24usize,
concat!("Size of: ", stringify!(ANeuralNetworksOperandType))
);
assert_eq!(
::std::mem::align_of::<ANeuralNetworksOperandType>(),
8usize,
concat!("Alignment of ", stringify!(ANeuralNetworksOperandType))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksOperandType),
"::",
stringify!(type_)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).dimensionCount) as usize - ptr as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksOperandType),
"::",
stringify!(dimensionCount)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).dimensions) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksOperandType),
"::",
stringify!(dimensions)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).scale) as usize - ptr as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksOperandType),
"::",
stringify!(scale)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).zeroPoint) as usize - ptr as usize },
20usize,
concat!(
"Offset of field: ",
stringify!(ANeuralNetworksOperandType),
"::",
stringify!(zeroPoint)
)
);
}
pub type ANeuralNetworksOperationType = i32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksEvent {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksDevice {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ANeuralNetworksMemoryDesc {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct AHardwareBuffer {
_unused: [u8; 0],
}
pub type __builtin_va_list = *mut ::std::os::raw::c_char;
#[link(name = "neuralnetworks")]
extern "C" {
#[doc = " Create a {@link ANeuralNetworksMemoryDesc} with no properties.\n\n This only creates the memory descriptor. Its properties should be set with calls to\n {@link ANeuralNetworksMemoryDesc_addInputRole},\n {@link ANeuralNetworksMemoryDesc_addOutputRole}, and\n {@link ANeuralNetworksMemoryDesc_setDimensions}.\n\n {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set.\n\n {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor\n is no longer needed.\n\n Available since API level 30.\n\n @param desc The {@link ANeuralNetworksMemoryDesc} to be created.\n Set to NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksMemoryDesc_create(
desc: *mut *mut ANeuralNetworksMemoryDesc,
) -> ::std::os::raw::c_int;
#[doc = " Destroy a memory descriptor.\n\n The memory descriptor need not have been finished by a call to\n {@link ANeuralNetworksMemoryDesc_finish}.\n\n See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksMemoryDesc_free(desc: *mut ANeuralNetworksMemoryDesc);
#[doc = " Specify that a memory object will be playing the role of an input to an execution created from a\n particular compilation.\n\n The compilation and the input index fully specify an input operand. This function\n may be invoked multiple times on the same memory descriptor with different input operands,\n and the same input operand may be specified on multiple memory descriptors. However,\n specifying the same input operand on the same memory descriptor more than once will\n return an error.\n\n The dimensions of the corresponding model operands of all the roles specified by\n {@link ANeuralNetworksMemoryDesc_addInputRole} and\n {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two\n dimensions are incompatible if both ranks are fully specified but have different values, or if\n there is at least one axis that is fully specified in both but has different values.\n\n At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and\n {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor\n before invoking {@link ANeuralNetworksMemoryDesc_finish}.\n\n Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param desc The memory descriptor to be modified.\n @param compilation The compilation object. It must already have been finished by calling\n {@link ANeuralNetworksCompilation_finish}, and must outlive the memory\n descriptor.\n @param index The index of the input argument we are referencing from the compilation. It is\n an index into the inputs list passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the\n memory is to be used in the specified role. This is provided as a hint to\n optimize the case when different roles prefer different memory locations or data\n layouts.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksMemoryDesc_addInputRole(
desc: *mut ANeuralNetworksMemoryDesc,
compilation: *const ANeuralNetworksCompilation,
index: u32,
frequency: f32,
) -> ::std::os::raw::c_int;
#[doc = " Specify that a memory object will be playing the role of an output to an execution created from a\n particular compilation.\n\n The compilation and the output index fully specify an output operand. This function\n may be invoked multiple times on the same memory descriptor with different output operands,\n and the same output operand may be specified on multiple memory descriptors. However,\n specifying the same output operand on the same memory descriptor object more than once will\n return an error.\n\n The dimensions of the corresponding model operands of all the roles specified by\n {@link ANeuralNetworksMemoryDesc_addInputRole} and\n {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two\n dimensions are incompatible if both ranks are fully specified but have different values, or if\n there is at least one axis that is fully specified in both but has different values.\n\n At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and\n {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor\n before invoking {@link ANeuralNetworksMemoryDesc_finish}.\n\n Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param desc The memory descriptor to be modified.\n @param compilation The compilation object. It must already have been finished by calling\n {@link ANeuralNetworksCompilation_finish}, and must outlive the memory\n descriptor.\n @param index The index of the output argument we are referencing from the compilation. It is\n an index into the outputs list passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the\n memory is to be used in the specified role. This is provided as a hint to\n optimize the case when multiple roles prefer different memory locations or data\n layouts.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksMemoryDesc_addOutputRole(
desc: *mut ANeuralNetworksMemoryDesc,
compilation: *const ANeuralNetworksCompilation,
index: u32,
frequency: f32,
) -> ::std::os::raw::c_int;
#[doc = " Set the dimensional information of the memory descriptor.\n\n The specified dimensions must be compatible with the dimensions of the corresponding model\n operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and\n {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks\n are fully specified but have different values, or if there is at least one axis that is fully\n specified in both but has different values.\n\n Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param desc The memory descriptor to be modified.\n @param rank The number of dimensions. Must be 0 for scalars.\n @param dimensions An array of dimensions. An entry with the value 0 indicates that the\n corresponding axis has an unknown size.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksMemoryDesc_setDimensions(
desc: *mut ANeuralNetworksMemoryDesc,
rank: u32,
dimensions: *const u32,
) -> ::std::os::raw::c_int;
#[doc = " Indicate that we have finished modifying a memory descriptor. Required before calling\n {@link ANeuralNetworksMemory_createFromDesc}.\n\n This function must only be called once for a given memory descriptor.\n\n See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param desc The memory descriptor to be finished.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksMemoryDesc_finish(
desc: *mut ANeuralNetworksMemoryDesc,
) -> ::std::os::raw::c_int;
#[doc = " Creates a memory object from a memory descriptor.\n\n The memory object is created with an uninitialized buffer. A memory object with an uninitialized\n buffer may only be used according to the roles specified by {@link\n ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link\n ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object\n is used as an output in a successful execution, or used as the destination memory in a successful\n {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used\n according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or\n destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will\n return to the uninitialized state if the memory object is used as an output in a failed\n execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}.\n\n The dimensions of the memory descriptor are deduced from the dimensions of the corresponding\n model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and\n {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to\n {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have\n unspecified dimensions or rank. In such a case, the same memory object may be used with different\n shapes of outputs in different executions. When the memory is used as an input, the input shape\n must be the same as the output shape from the last execution using this memory object as an\n output, or the last {@link ANeuralNetworkMemory_copy} using this memory object as the destination\n memory. Creating a memory object with unspecified dimensions or rank may fail for certain sets of\n roles.\n\n Using the memory in roles or shapes that are not compatible with the rules specified above will\n return an error.\n\n When calling {@link ANeuralNetworksExecution_setInputFromMemory} or\n {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,\n both offset and length must be set to zero and the entire memory region will be\n associated with the specified input or output operand.\n\n Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this\n function will return an error.\n\n {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed.\n\n Attempting to create memory from an unfinished memory descriptor will return an error.\n\n The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory}\n object.\n\n Available since API level 30.\n\n @param desc The memory descriptor.\n @param memory The memory object to be created.\n Set to NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is\n created with unspecified dimensions or rank and it is not supported for this set of\n roles."]
pub fn ANeuralNetworksMemory_createFromDesc(
desc: *const ANeuralNetworksMemoryDesc,
memory: *mut *mut ANeuralNetworksMemory,
) -> ::std::os::raw::c_int;
#[doc = " Copies data from one memory object to another.\n\n If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc},\n the src and dst must have the same logical size:\n - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created\n from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of\n AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.\n - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a\n format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is\n no padding and the data is tightly packed. This function may fail if the AHardwareBuffer\n cannot be accessed.\n - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size\n equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This\n function will fail if the number of elements is unknown.\n\n If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have\n compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but\n have different values, or if there is at least one axis that is fully specified in both but has\n different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions\n of dst will get updated according to the dimensions of the src.\n\n In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must\n have been used as an output in a successful execution, or used as the destination memory in a\n successful {@link ANeuralNetworksMemory_copy}.\n\n The src and dst may have different data layout, in which case the data copying is performed\n logically with data layout transformation.\n\n Available since API level 30.\n\n @param src The source memory object.\n @param dst The destination memory object.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksMemory_copy(
src: *const ANeuralNetworksMemory,
dst: *const ANeuralNetworksMemory,
) -> ::std::os::raw::c_int;
#[doc = " Get the number of available devices.\n\n @param numDevices Used to return the number of devices.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworks_getDeviceCount(numDevices: *mut u32) -> ::std::os::raw::c_int;
#[doc = " Get the representation of the specified device.\n\n @param devIndex The index of the specified device. Must be less than the\nnumber of available devices.\n @param device The representation of the specified device.\n The same representation will always be returned for the specified\n device.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworks_getDevice(
devIndex: u32,
device: *mut *mut ANeuralNetworksDevice,
) -> ::std::os::raw::c_int;
#[doc = " Get the name of the specified device.\n\n @param device The representation of the specified device.\n @param name The returned name of the specified device. The name will be in UTF-8\n and will be null-terminated. It will be recognizable as a known device name\n rather than a cryptic string. For devices with feature level reported by\n {@link ANeuralNetworksDevice_getFeatureLevel} that is 29 and above, the\n format of the name is {VENDOR}-{DEVICE}. For devices with feature level 28\n or lower, the format of the name is undefined.\n The name will remain valid for the duration of the application.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksDevice_getName(
device: *const ANeuralNetworksDevice,
name: *mut *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
#[doc = " Get the type of a given device.\n\n The device type can be used to help application developers to distribute Machine Learning\n workloads and other workloads such as graphical rendering.\n E.g., for an app which renders AR scenes based on real time object detection results,\n the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU\n for graphical rendering.\n\n @param device The representation of the specified device.\n @param type The returned {@link DeviceTypeCode} of the specified device.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksDevice_getType(
device: *const ANeuralNetworksDevice,
type_: *mut i32,
) -> ::std::os::raw::c_int;
#[doc = " Get the version of the driver implementation of the specified device.\n\n It’s the responsibility of the driver implementor to insure that this version string\n uniquely distinguishes this implementation from all previous implementations.\n\n This version string must not be confused with the feature level which is solely defined\n by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions.\n For example, it is not possible to filter all drivers older than a certain version.\n\n Application developers may use this version string to avoid or prefer specific driver\n implementations. For example, an application may want to do so because:\n - A specific version of the driver does not provide the required performance,\n perhaps because of a performance regression.\n - A specific version of the driver has a bug or returns results that don’t match\n the minimum precision requirement for the application.\n\n @param device The representation of the specified device.\n @param version The returned version string of the driver for the specified device. The\n string will be in UTF-8 and will be null-terminated. For devices with feature\n level 28 or lower, \"UNKNOWN\" will be returned. The version string will remain\n valid for the duration of the application.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksDevice_getVersion(
device: *const ANeuralNetworksDevice,
version: *mut *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
#[doc = " Get the supported NNAPI version of the specified device.\n\n Each device has a supported feature level, which is the most advanced feature this driver\n implements. For example, if the driver implements the features introduced in Android P,\n but does not implement the features introduced after Android P, the value would be 28.\n Developers could decide whether or not the specified device should be used for a Model that\n has certain feature requirements.\n\n @param device The representation of the specified device.\n @param featureLevel The API level of the most advanced feature this driver implements.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksDevice_getFeatureLevel(
device: *const ANeuralNetworksDevice,
featureLevel: *mut i64,
) -> ::std::os::raw::c_int;
#[doc = " Wait until the device is in a live state.\n\n A device may encounter internal errors and temporarily enter a dead state. A\n call that uses a device in such a state will return with the error\n {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until\n the device is in a live state.\n\n @param device The representation of the specified device.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 30."]
pub fn ANeuralNetworksDevice_wait(
device: *const ANeuralNetworksDevice,
) -> ::std::os::raw::c_int;
#[doc = " Get the supported operations for a specified set of devices. If multiple devices\n are selected, the supported operation list is a union of supported operations of all\n selected devices.\n\n @param model The model to be queried.\n @param devices The set of devices. Must not contain duplicates.\n @param numDevices The number of devices in the set.\n @param supportedOps The boolean array to be filled. True means supported. The size of the\n boolean array must be at least as large as the number of operations\n in the model. The order of elements in the supportedOps array matches\n the order in which the corresponding operations were added to the model.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksModel_getSupportedOperationsForDevices(
model: *const ANeuralNetworksModel,
devices: *const *const ANeuralNetworksDevice,
numDevices: u32,
supportedOps: *mut bool,
) -> ::std::os::raw::c_int;
#[doc = " Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set\n of devices. If more than one device is specified, the compilation will\n distribute the workload automatically across the devices. The model must be fully\n supported by the specified set of devices. This means that\n ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every\n operation for that model/devices pair.\n\n The user must handle all compilation and execution failures from the\n specified set of devices. This is in contrast to a use of {@link\n ANeuralNetworksCompilation_create}, where the runtime will attempt to recover\n from such failures.\n\n The model passed to this function is termed the \"main model\" of the\n compilation, to distinguish it from other models referred to by an Operand\n of type {@link ANEURALNETWORKS_MODEL} within this compilation.\n\n @param model The {@link ANeuralNetworksModel} to be compiled.\n @param devices The set of devices. Must not contain duplicates.\n @param numDevices The number of devices in the set.\n @param compilation The newly created object or NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA\n if the model is invalid.\n\n Available since API level 29."]
pub fn ANeuralNetworksCompilation_createForDevices(
model: *mut ANeuralNetworksModel,
devices: *const *const ANeuralNetworksDevice,
numDevices: u32,
compilation: *mut *mut ANeuralNetworksCompilation,
) -> ::std::os::raw::c_int;
#[doc = " Sets the compilation caching signature and the cache directory.\n\n Provides optional caching information to the runtime for faster repeated\n compilation.\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n @param compilation The compilation to be modified.\n @param cacheDir The cache directory for the runtime to store and retrieve caching\n data. It is recommended to use the code cache directory provided\n by the Android runtime. If not using the code cache directory, the\n user should choose a directory local to the application, and is\n responsible for managing the cache entries.\n @param token The token provided by the user to specify a model must be of length\n ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that\n the token is unique to a model within the application. The NNAPI\n runtime cannot detect token collisions; a collision will result in a\n failed execution or in a successful execution that produces incorrect\n output values.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksCompilation_setCaching(
compilation: *mut ANeuralNetworksCompilation,
cacheDir: *const ::std::os::raw::c_char,
token: *const u8,
) -> ::std::os::raw::c_int;
#[doc = " Schedule synchronous evaluation of the execution.\n\n <p>Schedules synchronous evaluation of the execution. Returns once the\n execution has completed and the outputs are ready to be consumed.\n </p>\n\n If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,\n and the execution is not able to complete before the timeout duration is\n exceeded, then execution may be aborted, in which case\n {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. If the device has\n a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel}\n that is lower than 30, then the timeout duration hint will be ignored.\n\n If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and\n the condition model does not output false within the loop timeout duration,\n then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}\n will be returned.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.\n See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.\n See {@link ANeuralNetworksExecution_startComputeWithDependencies} for\n asynchronous execution with dependencies.\n\n Available since API level 29.\n\n @param execution The execution to be scheduled and executed.\n\n @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.\n ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot\n be properly mapped."]
pub fn ANeuralNetworksExecution_compute(
execution: *mut ANeuralNetworksExecution,
) -> ::std::os::raw::c_int;
#[doc = " Get the dimensional information of the specified output operand of the model of the\n {@link ANeuralNetworksExecution}.\n\n The execution must have completed. On asynchronous execution initiated by\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksExecution_startComputeWithDependencies},\n {@link ANeuralNetworksEvent_wait} must be called prior to this function.\n\n @param execution The execution to be queried.\n @param index The index of the output argument we are querying. It is\n an index into the lists passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param rank The rank of the output operand.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE\n if the target output is provided an insufficient buffer at execution time,\n ANEURALNETWORKS_BAD_DATA if the index is invalid.\n\n Available since API level 29."]
pub fn ANeuralNetworksExecution_getOutputOperandRank(
execution: *mut ANeuralNetworksExecution,
index: i32,
rank: *mut u32,
) -> ::std::os::raw::c_int;
#[doc = " Get the dimensional information of the specified output operand of the model of the\n {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar.\n\n The execution must have completed. On asynchronous execution initiated by\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksExecution_startComputeWithDependencies},\n {@link ANeuralNetworksEvent_wait} must be called prior to this function.\n\n @param execution The execution to be queried.\n @param index The index of the output argument we are querying. It is an index into the lists\n passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param dimensions The dimension array to be filled. The size of the array must be exactly as\n large as the rank of the output operand to be queried in the model.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE\n if the target output is provided an insufficient buffer at execution time,\n ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar.\n\n Available since API level 29."]
pub fn ANeuralNetworksExecution_getOutputOperandDimensions(
execution: *mut ANeuralNetworksExecution,
index: i32,
dimensions: *mut u32,
) -> ::std::os::raw::c_int;
#[doc = " Create a {@link ANeuralNetworksBurst} to apply the given compilation.\n This only creates the burst object. Computation is only performed once\n {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid\n {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}.\n\n <p>The provided compilation must outlive the burst object.</p>\n\n Available since API level 29.\n\n @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.\n @param burst The newly created object or NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA\n if the compilation is invalid."]
pub fn ANeuralNetworksBurst_create(
compilation: *mut ANeuralNetworksCompilation,
burst: *mut *mut ANeuralNetworksBurst,
) -> ::std::os::raw::c_int;
#[doc = " Destroys the burst object.\n\n Available since API level 29.\n\n @param burst The burst object to be destroyed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksBurst_free(burst: *mut ANeuralNetworksBurst);
#[doc = " Schedule synchronous evaluation of the execution on a burst object.\n\n <p>Schedules synchronous evaluation of the execution. Returns once the\n execution has completed and the outputs are ready to be consumed.</p>\n\n If {@link ANeuralNetworksExecution_setTimeout} was called on the execution,\n and the execution is not able to complete before the timeout duration is\n exceeded, then execution may be aborted, in which case\n {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned.\n\n If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and\n the condition model does not output false within the loop timeout duration,\n then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}\n will be returned. If the device has a feature level reported by\n {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the\n timeout duration hint will be ignored.\n\n <p>There must be at most one {@link ANeuralNetworksExecution} processing at\n any given time for any given burst object. Any\n {@link ANeuralNetworksExecution} launched before the previous has finished\n will result in ANEURALNETWORKS_BAD_STATE.</p>\n\n See {@link ANeuralNetworksExecution_compute} for synchronous execution.\n See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.\n See {@link ANeuralNetworksExecution_startComputeWithDependencies} for\n asynchronous execution with dependencies.\n\n Available since API level 29.\n\n @param burst The burst object to execute on.\n @param execution The execution to be scheduled and executed. The execution\n must be created from the same {@link\n ANeuralNetworksCompilation} as the burst object.\n\n @return ANEURALNETWORKS_NO_ERROR if the execution completed normally."]
pub fn ANeuralNetworksExecution_burstCompute(
execution: *mut ANeuralNetworksExecution,
burst: *mut ANeuralNetworksBurst,
) -> ::std::os::raw::c_int;
#[doc = " Creates a shared memory object from an AHardwareBuffer handle.\n\n If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB\n format, it can be used the same way as shared memory created from a file handle. See\n {@link ANeuralNetworksMemory} for a description on how to use this shared memory.\n\n If the shared memory is backed by an AHardwareBuffer of a format other than\n AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs.\n When calling {@link ANeuralNetworksExecution_setInputFromMemory} or\n {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both\n offset and length must be set to zero and the entire memory region will be\n associated with the specified input or output operand. There is no guarantee\n that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination\n can be used by arbitrary devices. The execution will fail if the selected set of\n devices cannot consume the buffer.\n\n Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory\n backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is\n disallowed.\n\n The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object.\n\n Available since API level 29.\n\n @param ahwb The AHardwareBuffer handle.\n @param memory The memory object to be created.\n Set to NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if the request completed normally.\n\n @see AHardwareBuffer"]
pub fn ANeuralNetworksMemory_createFromAHardwareBuffer(
ahwb: *const AHardwareBuffer,
memory: *mut *mut ANeuralNetworksMemory,
) -> ::std::os::raw::c_int;
#[doc = " Specifies whether duration of the {@link ANeuralNetworksExecution} is to be\n measured. Evaluation of the execution must not have been scheduled.\n\n By default, duration is not measured.\n\n The {@link ANeuralNetworksExecution} must have been created from an\n {@link ANeuralNetworksCompilation} which in turn was created from\n {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1.\n If the device has a feature level reported by\n {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 29, then the\n duration will not be measured.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 29.\n\n @param execution The execution to be modified.\n @param measure 'true' if duration is to be measured, 'false' if not.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksExecution_setMeasureTiming(
execution: *mut ANeuralNetworksExecution,
measure: bool,
) -> ::std::os::raw::c_int;
#[doc = " Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds.\n\n The execution must have completed. On asynchronous execution initiated by\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksExecution_startComputeWithDependencies},\n {@link ANeuralNetworksEvent_wait} must be called prior to this function.\n\n @param execution The execution to be queried.\n @param durationCode The measurement to be queried, specified by {@link DurationCode}.\n @param duration The returned duration. If no measurement was requested by\n {@link ANeuralNetworksExecution_setMeasureTiming}, if the\n device is has a feature level reported by\n {@link ANeuralNetworksDevice_getFeatureLevel} that is lower\n than 29, or for some other reason the duration is not\n available, UINT64_MAX will be returned. A particular device\n need not support any given measurement.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 29."]
pub fn ANeuralNetworksExecution_getDuration(
execution: *const ANeuralNetworksExecution,
durationCode: i32,
duration: *mut u64,
) -> ::std::os::raw::c_int;
#[doc = " Creates a shared memory object from a file descriptor.\n\n The shared memory is backed by a file descriptor via mmap.\n See {@link ANeuralNetworksMemory} for a description on how to use\n this shared memory.\n\n Available since API level 27.\n\n @param size The requested size in bytes.\n Must not be larger than the file size.\n @param prot The desired memory protection for the mapping.\n It is either PROT_NONE or the bitwise OR of one or\n more of the following flags: PROT_READ, PROT_WRITE.\n @param fd The requested file descriptor.\n The file descriptor has to be mmap-able. The file\n descriptor will be duplicated.\n @param offset The offset to the beginning of the file of the area to map.\n The offset has to be aligned to a page size.\n @param memory The memory object to be created.\n Set to NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if the request completed normally."]
pub fn ANeuralNetworksMemory_createFromFd(
size: usize,
protect: ::std::os::raw::c_int,
fd: ::std::os::raw::c_int,
offset: usize,
memory: *mut *mut ANeuralNetworksMemory,
) -> ::std::os::raw::c_int;
#[doc = " Delete a memory object.\n\n Destroys the object used by the run time to keep track of the memory.\n This will free the underlying actual memory if no other code has open\n handles to this memory.\n\n Available since API level 27.\n\n @param memory The memory object to be freed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksMemory_free(memory: *mut ANeuralNetworksMemory);
#[doc = " Create an empty {@link ANeuralNetworksModel}.\n\n <p>This only creates the object. Computation is performed once\n {@link ANeuralNetworksExecution_burstCompute},\n {@link ANeuralNetworksExecution_compute},\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.\n\n The model should be constructed with calls to\n {@link ANeuralNetworksModel_addOperation} and\n {@link ANeuralNetworksModel_addOperand}\n\n <p>{@link ANeuralNetworksModel_finish} should be called once the model\n has been fully constructed.</p>\n\n <p>{@link ANeuralNetworksModel_free} should be called once the model\n is no longer needed.</p>\n\n Available since API level 27.\n\n @param model The {@link ANeuralNetworksModel} to be created.\n Set to NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_create(
model: *mut *mut ANeuralNetworksModel,
) -> ::std::os::raw::c_int;
#[doc = " Destroy a model.\n\n The model need not have been finished by a call to\n {@link ANeuralNetworksModel_finish}.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param model The model to be destroyed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksModel_free(model: *mut ANeuralNetworksModel);
#[doc = " Indicate that we have finished modifying a model. Required before\n calling {@link ANeuralNetworksCompilation_create} and\n {@link ANeuralNetworksCompilation_createForDevices}.\n\n An application must ensure that no other thread uses the model at the same\n time.\n\n This function must only be called once for a given model.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param model The model to be finished.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_finish(model: *mut ANeuralNetworksModel) -> ::std::os::raw::c_int;
#[doc = " Add an operand to a model.\n\n The order in which the operands are added is important. The first one added\n to a model will have the index value 0, the second 1, etc. These indexes are\n used as operand identifiers in\n {@link ANeuralNetworksModel_addOperation},\n {@link ANeuralNetworksModel_identifyInputsAndOutputs},\n {@link ANeuralNetworksModel_setOperandValue},\n {@link ANeuralNetworksModel_setOperandValueFromMemory},\n {@link ANeuralNetworksExecution_setInput},\n {@link ANeuralNetworksExecution_setInputFromMemory},\n {@link ANeuralNetworksExecution_setOutput},\n {@link ANeuralNetworksExecution_setOutputFromMemory} and\n {@link ANeuralNetworksExecution_setOperandValue}.\n\n <p>Every operand must be referenced in exactly one of the following\n ways:<ul>\n <li>It is identified as a model input with\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li>\n <li>It is identified as a constant with\n {@link ANeuralNetworksModel_setOperandValue} or\n {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>\n <li>It is identified as an output of exactly one operation with\n {@link ANeuralNetworksModel_addOperation}.</li></p>\n <p>An operand that is identified as a model input or as a constant\n must not also be identified as a model output with\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p>\n\n To build a model that can accommodate inputs of various sizes, as\n you may want to do for a CNN, leave unspecified the dimensions that\n will vary at run time. If you do so, fully specify dimensions\n when calling {@link ANeuralNetworksExecution_setInput} or\n {@link ANeuralNetworksExecution_setInputFromMemory}.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param model The model to be modified.\n @param type The {@link ANeuralNetworksOperandType} that describes the shape\n of the operand. Neither the {@link ANeuralNetworksOperandType}\n nor the dimensions it points to need to outlive the call to\n {@link ANeuralNetworksModel_addOperand}.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_addOperand(
model: *mut ANeuralNetworksModel,
type_: *const ANeuralNetworksOperandType,
) -> ::std::os::raw::c_int;
#[doc = " Sets an operand to a constant value.\n\n Values of length smaller or equal to\n {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}\n are immediately copied into the model.\n\n For values of length greater than\n {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to\n the buffer is stored within the model. The application must not change the\n content of this region until all executions using this model have\n completed. As the data may be copied during processing, modifying the data\n after this call yields undefined results. The provided buffer must outlive\n this model.\n\n For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}\n is likely to be more efficient.\n\n To indicate that an optional operand should be considered missing,\n pass nullptr for buffer and 0 for length.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param model The model to be modified.\n @param index The index of the model operand we're setting.\n @param buffer A pointer to the data to use.\n @param length The size in bytes of the data value.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_setOperandValue(
model: *mut ANeuralNetworksModel,
index: i32,
buffer: *const ::std::os::raw::c_void,
length: usize,
) -> ::std::os::raw::c_int;
#[doc = " Sets an operand's per channel quantization parameters.\n\n Sets parameters required by a tensor of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}.\n This function must be called for every tensor of type\n {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before\n calling {@link ANeuralNetworksModel_finish}.\n\n Available since API level 29.\n\n @param model The model to be modified.\n @param index The index of the model operand we're setting.\n @param channelQuant The per channel quantization parameters for the operand.\n No memory in this struct needs to outlive the call to\n this function.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
model: *mut ANeuralNetworksModel,
index: i32,
channelQuant: *const ANeuralNetworksSymmPerChannelQuantParams,
) -> ::std::os::raw::c_int;
#[doc = " Sets an operand to a value stored in a memory object.\n\n The content of the memory is not copied. A reference to that memory is stored\n inside the model. The application must not change the content of the memory\n region until all executions using this model have completed. As the data may\n be copied during processing, modifying the data after this call yields\n undefined results.\n\n <p>The provided memory must outlive this model.</p>\n\n To indicate that an optional operand should be considered missing,\n use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.\n\n It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer\n of a format other than AHARDWAREBUFFER_FORMAT_BLOB.\n\n It is disallowed to set an operand value with memory created from\n {@link ANeuralNetworksMemory_createFromDesc}.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on\n AHardwareBuffer usage.\n\n Available since API level 27.\n\n @param model The model to be modified.\n @param index The index of the model operand we're setting.\n @param buffer A pointer to the data to use.\n @param memory The memory containing the data.\n @param offset This specifies the location of the data within the memory.\n The offset is in bytes from the start of memory.\n @param length The size in bytes of the data value.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_setOperandValueFromMemory(
model: *mut ANeuralNetworksModel,
index: i32,
memory: *const ANeuralNetworksMemory,
offset: usize,
length: usize,
) -> ::std::os::raw::c_int;
#[doc = " Sets an operand to a value that is a reference to another NNAPI model.\n\n The referenced model must already have been finished by a call to\n {@link ANeuralNetworksModel_finish}.\n\n The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of\n referenced models is overridden by that setting of the main model of a\n compilation.\n\n The referenced model must outlive the model referring to it.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has\n been called will return an error.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param model The model to be modified.\n @param index The index of the model operand we're setting.\n @param value The model to be referenced.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_setOperandValueFromModel(
model: *mut ANeuralNetworksModel,
index: i32,
value: *const ANeuralNetworksModel,
) -> ::std::os::raw::c_int;
#[doc = " Add an operation to a model.\n\n @param model The model to be modified.\n @param type The {@link ANeuralNetworksOperationType} of the operation.\n @param inputCount The number of entries in the inputs array.\n @param inputs An array of indexes identifying each operand.\n @param outputCount The number of entries in the outputs array.\n @param outputs An array of indexes identifying each operand.\n\n The operands specified by inputs and outputs must have been\n previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 27.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksModel_addOperation(
model: *mut ANeuralNetworksModel,
type_: ANeuralNetworksOperationType,
inputCount: u32,
inputs: *const u32,
outputCount: u32,
outputs: *const u32,
) -> ::std::os::raw::c_int;
#[doc = " Specifies which operands will be the model's inputs and\n outputs. Every model must have at least one input and one output.\n\n An operand cannot be used for both input and output. Doing so will\n return an error.\n\n @param model The model to be modified.\n @param inputCount The number of entries in the inputs array.\n @param inputs An array of indexes identifying the input operands.\n @param outputCount The number of entries in the outputs array.\n @param outputs An array of indexes identifying the output operands.\n\n The operands specified by inputs and outputs must have been\n previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been\n called will return an error.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage.\n\n Available since API level 27.\n"]
pub fn ANeuralNetworksModel_identifyInputsAndOutputs(
model: *mut ANeuralNetworksModel,
inputCount: u32,
inputs: *const u32,
outputCount: u32,
outputs: *const u32,
) -> ::std::os::raw::c_int;
#[doc = " Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be\n calculated with range and/or precision as low as that of the IEEE 754 16-bit\n floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}\n must be calculated using at least the range and precision of the IEEE 754\n 32-bit floating-point format.\n\n The relaxComputationFloat32toFloat16 setting of the main model of\n a compilation overrides the values of the referenced models.\n\n @param model The model to be modified.\n @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be\n calculated with range and/or precision as low as that of the\n IEEE 754 16-bit floating point format. 'false' indicates\n {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using\n at least the range and precision of the IEEE 754 32-bit floating\n point format.\n\n Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been\n called will return an error.\n\n Available since API level 28.\n\n See {@link ANeuralNetworksModel} for information on multithreaded usage."]
pub fn ANeuralNetworksModel_relaxComputationFloat32toFloat16(
model: *mut ANeuralNetworksModel,
allow: bool,
) -> ::std::os::raw::c_int;
#[doc = " Create a {@link ANeuralNetworksCompilation} to compile the given model.\n\n The model passed to this function is termed the \"main model\" of the\n compilation, to distinguish it from other models referred to by an Operand\n of type {@link ANEURALNETWORKS_MODEL} within this compilation.\n\n <p>This function only creates the object. Compilation is only performed once\n {@link ANeuralNetworksCompilation_finish} is invoked.</p>\n\n <p>{@link ANeuralNetworksCompilation_finish} should be called once\n all desired properties have been set on the compilation.</p>\n\n <p>{@link ANeuralNetworksModel_free} should be called once the compilation\n is no longer needed.</p>\n\n <p>The provided model must outlive the compilation.</p>\n\n The model must already have been finished by a call to\n {@link ANeuralNetworksModel_finish}.\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param model The {@link ANeuralNetworksModel} to be compiled.\n @param compilation The newly created object or NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA\n if the model is invalid."]
pub fn ANeuralNetworksCompilation_create(
model: *mut ANeuralNetworksModel,
compilation: *mut *mut ANeuralNetworksCompilation,
) -> ::std::os::raw::c_int;
#[doc = " Destroy a compilation.\n\n The compilation need not have been finished by a call to\n {@link ANeuralNetworksCompilation_finish}.\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param compilation The compilation to be destroyed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksCompilation_free(compilation: *mut ANeuralNetworksCompilation);
#[doc = " Sets the execution preference.\n\n <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime\n uses PREFER_SINGLE_FAST_ANSWER</p>\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param compilation The compilation to be modified.\n @param preference Either {@link PREFER_LOW_POWER},\n {@link PREFER_SINGLE_FAST_ANSWER}, or\n {@link PREFER_SUSTAINED_SPEED}.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksCompilation_setPreference(
compilation: *mut ANeuralNetworksCompilation,
preference: i32,
) -> ::std::os::raw::c_int;
#[doc = " Indicate that we have finished modifying a compilation. Required before\n calling {@link ANeuralNetworksBurst_create} or\n {@link ANeuralNetworksExecution_create}.\n\n An application must ensure that no other thread uses the compilation at the\n same time.\n\n This function must only be called once for a given compilation.\n\n If {@link ANeuralNetworksCompilation_setTimeout} was called on this\n compilation, and the compilation is not able to be finished before the\n timeout duration is exceeded, then compilation may be aborted, in which case\n {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned.\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param compilation The compilation to be finished.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksCompilation_finish(
compilation: *mut ANeuralNetworksCompilation,
) -> ::std::os::raw::c_int;
#[doc = " Set the execution priority.\n\n Execution priorities are relative to other executions created by the same\n application (specifically same uid) for the same device. Specifically,\n priorities of executions from one application will not affect executions from\n another application. Similarly, priorities of executions on one device will\n not affect executions on another device.\n\n Higher priority executions may use more compute resources than lower priority\n executions, and may preempt or starve lower priority executions.\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n Available since API level 30.\n\n @param compilation The compilation to be modified.\n @param priority The relative priority of the execution compared to other\n executions created by the application. Must be one of\n ANEURALNETWORKS_PRIORITY_*.\n\n @return ANEURALNETWORKS_NO_ERROR if successful."]
pub fn ANeuralNetworksCompilation_setPriority(
compilation: *mut ANeuralNetworksCompilation,
priority: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
#[doc = " Set the maximum expected duration for compiling the model.\n\n If the device is not able to complete the compilation within the specified\n duration, the compilation may be aborted. The timeout duration begins at the\n call to {@link ANeuralNetworksCompilation_finish}.\n\n This timeout duration acts as a hint to drivers, and can be used to both free\n up compute resources within the driver and return control back to the\n application quicker than is possible without the hint. It enables drivers\n that are able to estimate how long a compilation will take to abort the\n compilation before it has even started if the driver believes the compilation\n cannot be completed within the timeout duration. Similarly, it enables\n drivers to abort an ongoing compilation if it is taking too long. However,\n this call does not guarantee that the compilation will complete or abort\n within the timeout duration.\n\n By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called),\n the timeout duration for compiling the model is considered infinite.\n\n The {@link ANeuralNetworksCompilation} must have been created with\n {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,\n otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the\n device has a feature level reported by\n {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the\n timeout duration hint will be ignored.\n\n See {@link ANeuralNetworksCompilation} for information on multithreaded usage.\n\n @param compilation The compilation to be modified.\n @param duration The maximum amount of time in nanoseconds that is expected to\n be spent finishing a compilation. If this duration is exceeded, the\n compilation may be aborted. If set to 0, the timeout duration is\n considered infinite.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 30."]
pub fn ANeuralNetworksCompilation_setTimeout(
compilation: *mut ANeuralNetworksCompilation,
duration: u64,
) -> ::std::os::raw::c_int;
#[doc = " Create a {@link ANeuralNetworksExecution} to apply the given compilation.\n This only creates the object. Computation is only performed once\n {@link ANeuralNetworksExecution_burstCompute},\n {@link ANeuralNetworksExecution_compute},\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.\n\n <p>The provided compilation must outlive the execution.</p>\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.\n @param execution The newly created object or NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA\n if the compilation is invalid."]
pub fn ANeuralNetworksExecution_create(
compilation: *mut ANeuralNetworksCompilation,
execution: *mut *mut ANeuralNetworksExecution,
) -> ::std::os::raw::c_int;
#[doc = " Destroy an execution.\n\n <p>The execution need not have been scheduled by a call to\n {@link ANeuralNetworksExecution_burstCompute},\n {@link ANeuralNetworksExecution_compute},\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled,\n then the application must not call {@link ANeuralNetworksExecution_free}\n until the execution has completed (i.e.,\n {@link ANeuralNetworksExecution_burstCompute},\n {@link ANeuralNetworksExecution_compute}, or\n {@link ANeuralNetworksEvent_wait} has returned).\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param execution The execution to be destroyed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksExecution_free(execution: *mut ANeuralNetworksExecution);
#[doc = " Associate a user buffer with an input of the model of the\n {@link ANeuralNetworksExecution}. Evaluation of the execution must not have\n been scheduled. Once evaluation of the execution has been scheduled, the\n application must not change the content of the buffer until the execution has\n completed. Evaluation of the execution will not change the content of the\n buffer.\n\n <p>The provided buffer must outlive the execution.</p>\n\n If the input is optional, you can indicate that it is omitted by\n passing nullptr for buffer and 0 for length.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param execution The execution to be modified.\n @param index The index of the input argument we are setting. It is\n an index into the lists passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with\n {@link ANeuralNetworksModel_addOperand}.\n @param type The {@link ANeuralNetworksOperandType} of the\n operand. Unless the input is omitted, this should be\n used to specify the dimensions that were left\n unspecified when the operand was added to the\n model. All other properties of the type must be the\n same as specified in the model. If the type is the same\n as specified when the model was built, NULL can be\n passed. Neither the {@link ANeuralNetworksOperandType}\n nor the dimensions it points to need to outlive the call\n to {@link ANeuralNetworksExecution_setInput}.\n @param buffer The buffer containing the data.\n @param length The length in bytes of the buffer.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the\n name is not recognized or the buffer is too small for the input."]
pub fn ANeuralNetworksExecution_setInput(
execution: *mut ANeuralNetworksExecution,
index: i32,
type_: *const ANeuralNetworksOperandType,
buffer: *const ::std::os::raw::c_void,
length: usize,
) -> ::std::os::raw::c_int;
#[doc = " Associate a region of a memory object with an input of the model of the\n {@link ANeuralNetworksExecution}. Evaluation of the execution must not have\n been scheduled. Once evaluation of the execution has been scheduled, the\n application must not change the content of the region until the execution has\n completed. Evaluation of the execution will not change the content of the\n region.\n\n <p>The provided memory must outlive the execution.</p>\n\n If the input is optional, you can indicate that it is omitted by\n using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for\n buffer and 0 for length.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on\n AHardwareBuffer usage.\n See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects\n created from memory descriptors.\n\n Available since API level 27.\n\n @param execution The execution to be modified.\n @param index The index of the input argument we are setting. It is\n an index into the lists passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param type The {@link ANeuralNetworksOperandType} of the\n operand. This should be used to specify the dimensions\n that were left unspecified when the operand was added\n to the model. All other properties of the type must be\n the same as specified in the model. If the type is the\n same as specified when the model was built, NULL can be\n passed. Neither the {@link ANeuralNetworksOperandType}\n nor the dimensions it points to need to outlive the call\n to {@link ANeuralNetworksExecution_setInputFromMemory}.\n @param memory The memory containing the data.\n @param offset This specifies the location of the data within the memory.\n The offset is in bytes from the start of memory.\n @param length The size in bytes of the data value.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the\n name is not recognized or the buffer is too small for the input."]
pub fn ANeuralNetworksExecution_setInputFromMemory(
execution: *mut ANeuralNetworksExecution,
index: i32,
type_: *const ANeuralNetworksOperandType,
memory: *const ANeuralNetworksMemory,
offset: usize,
length: usize,
) -> ::std::os::raw::c_int;
#[doc = " Associate a user buffer with an output of the model of the\n {@link ANeuralNetworksExecution}. Evaluation of the execution must not have\n been scheduled. Once evaluation of the execution has been scheduled, the\n application must not change the content of the buffer until the execution has\n completed.\n\n If the output is optional, you can indicate that it is omitted by\n passing nullptr for buffer and 0 for length.\n\n <p>The provided buffer must outlive the execution.</p>\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param execution The execution to be modified.\n @param index The index of the output argument we are setting. It is\n an index into the lists passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param type The {@link ANeuralNetworksOperandType} of the\n operand. Unless the output is omitted, this should be\n used to specify the dimensions that were left\n unspecified when the operand was added to the\n model. All other properties of the type must be the\n same as specified in the model. If the type is the same\n as specified when the model was built, NULL can be\n passed. Neither the {@link ANeuralNetworksOperandType}\n nor the dimensions it points to need to outlive the call\n to {@link ANeuralNetworksExecution_setOutput}.\n Since API level 29, the output operand can have unspecified\n dimensions or rank to be deduced dynamically during the execution.\n However, the user must provide a large enough buffer. The user\n can retrieve the output dimensional information after the execution\n by {@link ANeuralNetworksExecution_getOutputOperandRank} and\n {@link ANeuralNetworksExecution_getOutputOperandDimensions}.\n @param buffer The buffer where the data is to be written.\n @param length The length in bytes of the buffer.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the\n name is not recognized or the buffer is too small for the output."]
pub fn ANeuralNetworksExecution_setOutput(
execution: *mut ANeuralNetworksExecution,
index: i32,
type_: *const ANeuralNetworksOperandType,
buffer: *mut ::std::os::raw::c_void,
length: usize,
) -> ::std::os::raw::c_int;
#[doc = " Associate a region of a memory object with an output of the model of the\n {@link ANeuralNetworksExecution}. Evaluation of the execution must not have\n been scheduled. Once evaluation of the execution has been scheduled, the\n application must not change the content of the region until the execution has\n completed.\n\n If the output is optional, you can indicate that it is omitted by\n using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for\n buffer and 0 for length.\n\n <p>The provided memory must outlive the execution.</p>\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on\n AHardwareBuffer usage.\n See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects\n created from memory descriptors.\n\n Available since API level 27.\n\n @param execution The execution to be modified.\n @param index The index of the output argument we are setting. It is\n an index into the lists passed to\n {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not\n the index associated with {@link ANeuralNetworksModel_addOperand}.\n @param type The {@link ANeuralNetworksOperandType} of the operand. This should be\n used to specify the dimensions that were left\n unspecified when the operand was added to the\n model. All other properties of the type must be the\n same as specified in the model. If the type is the same\n as specified when the model was built, NULL can be\n passed. Neither the {@link ANeuralNetworksOperandType}\n nor the dimensions it points to need to outlive the call\n to {@link ANeuralNetworksExecution_setOutputFromMemory}.\n Since API level 29, the output operand can have unspecified\n dimensions or rank to be deduced dynamically during the execution.\n However, the user must provide a large enough memory. The user\n can retrieve the output dimensional information after the execution\n by {@link ANeuralNetworksExecution_getOutputOperandRank} and\n {@link ANeuralNetworksExecution_getOutputOperandDimensions}.\n @param memory The memory where the data is to be stored.\n @param offset This specifies the location of the data within the memory.\n The offset is in bytes from the start of memory.\n @param length The length in bytes of the data value.\n\n @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the\n name is not recognized or the buffer is too small for the output."]
pub fn ANeuralNetworksExecution_setOutputFromMemory(
execution: *mut ANeuralNetworksExecution,
index: i32,
type_: *const ANeuralNetworksOperandType,
memory: *const ANeuralNetworksMemory,
offset: usize,
length: usize,
) -> ::std::os::raw::c_int;
#[doc = " Schedule asynchronous evaluation of the execution.\n\n <p>Schedules asynchronous evaluation of the execution. Once the execution\n has completed and the outputs are ready to be consumed, the returned event\n will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that\n event.\n </p>\n\n ANeuralNetworksEvent_wait must be called to recuperate the resources used\n by the execution.\n\n If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,\n and the execution is not able to complete before the timeout duration is\n exceeded, then execution may be aborted, in which case\n {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned through\n {@link ANeuralNetworksExecution_startCompute} or\n {@link ANeuralNetworksEvent_wait} on the event object. If the device has a\n feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that\n is lower than 30, then the timeout duration hint will be ignored.\n\n If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and\n the condition model does not output false within the loop timeout duration,\n then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}\n will be returned through {@link ANeuralNetworksEvent_wait} on the event\n object.\n\n If the device can detect before the execution has started that the execution\n will not complete within the timeout duration, the device may choose to skip\n the execution and instead return {@link ANEURALNETWORKS_MISSED_DEADLINE_*}.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n See {@link ANeuralNetworksExecution_compute} for synchronous execution.\n See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.\n See {@link ANeuralNetworksExecution_startComputeWithDependencies} for\n asynchronous execution with dependencies.\n\n Available since API level 27.\n\n @param execution The execution to be scheduled and executed.\n @param event The event that will be signaled on completion. event is set to\n NULL if there's an error.\n\n @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled."]
pub fn ANeuralNetworksExecution_startCompute(
execution: *mut ANeuralNetworksExecution,
event: *mut *mut ANeuralNetworksEvent,
) -> ::std::os::raw::c_int;
#[doc = " Set the maximum expected duration of the specified execution.\n\n If the device is not able to complete the execution within the specified\n duration, the execution may be aborted. The timeout duration begins at a\n call to one of:\n - {@link ANeuralNetworksExecution_burstCompute}\n - {@link ANeuralNetworksExecution_compute}\n - {@link ANeuralNetworksExecution_startCompute}\n - {@link ANeuralNetworksExecution_startComputeWithDependencies}\n\n This timeout duration acts as a hint to drivers, and can be used to both free\n up compute resources within the driver and return control back to the\n application quicker than is possible without the hint. It enables drivers\n that are able to estimate how long an execution will take to abort the\n execution before it has even started if the driver believes the execution\n cannot be completed within the timeout duration. Similarly, it enables\n drivers to abort an ongoing execution if it is taking too long. However, this\n call does not guarantee that the execution will complete or abort within the\n timeout duration.\n\n By default (i.e., unless ANeuralNetworksExecution_setTimeout is called),\n the timeout duration for execution is considered infinite.\n\n The {@link ANeuralNetworksExecution} must have been created from an\n {@link ANeuralNetworksCompilation} which in turn was created from\n {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,\n otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the\n device has a feature level reported by\n {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the\n timeout duration hint will be ignored.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n @param execution The execution to be modified.\n @param duration The maximum amount of time in nanoseconds that is expected to\n be spent executing a model. If this duration is exceeded, the execution\n may be aborted. If set to 0, the timeout duration is considered infinite.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 30."]
pub fn ANeuralNetworksExecution_setTimeout(
execution: *mut ANeuralNetworksExecution,
duration: u64,
) -> ::std::os::raw::c_int;
#[doc = " Set the maximum duration of WHILE loops in the specified execution.\n\n This is a fuzzy per-loop timeout intended to prevent infinite loops.\n\n If a WHILE loop condition model does not output false within the specified\n duration, the execution will be aborted.\n\n See {@link ANeuralNetworks_getDefaultLoopTimeout} and\n {@link ANeuralNetworks_getMaximumLoopTimeout} for the default\n and maximum timeout values.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n @param execution The execution to be modified.\n @param duration The maximum amount of time in nanoseconds that can be spent\n executing a WHILE loop. If the specified duration value exceeds the value\n produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be\n overridden by that value.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n ANEURALNETWORKS_BAD_STATE if execution has started.\n ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.\n\n Available since API level 30."]
pub fn ANeuralNetworksExecution_setLoopTimeout(
execution: *mut ANeuralNetworksExecution,
duration: u64,
) -> ::std::os::raw::c_int;
#[doc = " Get the default timeout value for WHILE loops.\n\n @return The default timeout value in nanoseconds.\n\n Available since API level 30."]
pub fn ANeuralNetworks_getDefaultLoopTimeout() -> u64;
#[doc = " Get the maximum timeout value for WHILE loops.\n\n @return The maximum timeout value in nanoseconds.\n\n Available since API level 30."]
pub fn ANeuralNetworks_getMaximumLoopTimeout() -> u64;
#[doc = " Waits until the execution completes.\n\n More than one thread can wait on an event. When the execution completes,\n all threads will be released.\n\n If {@link ANeuralNetworksExecution_setTimeout} was called on the execution\n corresponding to this event, and the execution is not able to complete\n before the duration is exceeded, the execution may be aborted, in which case\n {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned here.\n\n If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and\n the condition model does not output false within the loop timeout duration,\n the execution will be aborted, and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}\n will be returned here.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param event The event that will be signaled on completion.\n @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.\n ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot\n be properly mapped."]
pub fn ANeuralNetworksEvent_wait(event: *mut ANeuralNetworksEvent) -> ::std::os::raw::c_int;
#[doc = " Destroys the event.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n Available since API level 27.\n\n @param event The event object to be destroyed. Passing NULL is acceptable and\n results in no operation."]
pub fn ANeuralNetworksEvent_free(event: *mut ANeuralNetworksEvent);
#[doc = " Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor.\n\n The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd,\n it will instead dup the provided sync_fence_fd and own the duplicate.\n\n @param sync_fence_fd The sync_fence file descriptor.\n @param event The newly created object or NULL if unsuccessful.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 30."]
pub fn ANeuralNetworksEvent_createFromSyncFenceFd(
sync_fence_fd: ::std::os::raw::c_int,
event: *mut *mut ANeuralNetworksEvent,
) -> ::std::os::raw::c_int;
#[doc = " Get sync_fence file descriptor from the event.\n\n If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd\n will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned.\n\n See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and\n {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create\n an event backed by a sync fence.\n\n The user takes ownership of the returned fd, and must close the returned file descriptor when\n it is no longer needed.\n\n @param event An event that is backed by a sync fence.\n @param sync_fence_fd The sync_fence file descriptor. The file descriptor will\n be set to -1 if there is an error.\n\n @return ANEURALNETWORKS_NO_ERROR if successful.\n\n Available since API level 30."]
pub fn ANeuralNetworksEvent_getSyncFenceFd(
event: *const ANeuralNetworksEvent,
sync_fence_fd: *mut ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
#[doc = " Schedule asynchronous evaluation of the execution with dependencies.\n\n The execution will wait for all the depending events to be signaled before\n starting the evaluation. Once the execution has completed and the outputs\n are ready to be consumed, the returned event will be signaled. Depending on which\n devices are handling the execution, the event could be backed by a sync fence.\n Use {@link ANeuralNetworksEvent_wait} to wait for that event.\n\n ANeuralNetworksEvent_wait must be called to recurperate the resources used\n by the execution.\n\n If parts of the execution are scheduled on devices that do not support fenced execution,\n the function call may wait for such parts to finish before returning.\n\n The function will return an error if any of the events in dependencies is already in a bad\n state. After the execution is scheduled, if any of the events in dependencies does not complete\n normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned\n event will return an error.\n\n The function will return an error if any of the execution outputs has a tensor operand type\n that is not fully specified.\n\n The function can be passed a timeout duration in nanoseconds. This timeout\n duration acts as a hint to drivers in the same way that the timeout durations\n in {@link ANeuralNetworksCompilation_setTimeout} and {@link\n ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration\n begins when all waitFor sync fences have been signaled, and can be used\n together with {@link ANeuralNetworksExecution_setTimeout} which specifies the\n maximum timeout duration beginning at the call to\n {@link ANeuralNetworksExecution_startComputeWithDependencies}.\n If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created\n from an {@link ANeuralNetworksCompilation} which in turn was created from\n {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,\n otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either\n the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the\n timeout duration passed to this call is exceeded, the execution may be\n aborted, in which case {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be\n returned through {@link ANeuralNetworksExecution_startComputeWithDependencies}\n or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a\n feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that\n is lower than 30, then the timeout duration hints will be ignored.\n\n If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and\n the condition model does not output false within the loop timeout duration,\n then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}\n will be returned through {@link ANeuralNetworksEvent_wait} on the event\n object.\n\n See {@link ANeuralNetworksExecution} for information on multithreaded usage.\n\n See {@link ANeuralNetworksExecution_compute} for synchronous execution.\n See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.\n See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.\n\n @param execution The execution to be scheduled and executed.\n @param dependencies A set of depending events. The actual evaluation will not start\n until all the events are signaled.\n @param num_dependencies The number of events in the dependencies set.\n @param duration The maximum amount of time in nanoseconds that is expected to\n be spent executing the model after all dependencies are\n signaled. If set to 0, the timeout duration is considered\n infinite.\n @param event The event that will be signaled on completion. event is set to\n NULL if there's an error.\n\n @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.\n\n Available since API level 30."]
pub fn ANeuralNetworksExecution_startComputeWithDependencies(
execution: *mut ANeuralNetworksExecution,
dependencies: *const *const ANeuralNetworksEvent,
num_dependencies: u32,
duration: u64,
event: *mut *mut ANeuralNetworksEvent,
) -> ::std::os::raw::c_int;
pub fn ANeuralNetworks_getRuntimeFeatureLevel() -> i64;
}