pub const _STDINT_H: u32 = 1;
pub const _FEATURES_H: u32 = 1;
pub const _ISOC95_SOURCE: u32 = 1;
pub const _ISOC99_SOURCE: u32 = 1;
pub const _ISOC11_SOURCE: u32 = 1;
pub const _POSIX_SOURCE: u32 = 1;
pub const _POSIX_C_SOURCE: u32 = 200809;
pub const _XOPEN_SOURCE: u32 = 700;
pub const _XOPEN_SOURCE_EXTENDED: u32 = 1;
pub const _LARGEFILE64_SOURCE: u32 = 1;
pub const _DEFAULT_SOURCE: u32 = 1;
pub const _ATFILE_SOURCE: u32 = 1;
pub const __USE_ISOC11: u32 = 1;
pub const __USE_ISOC99: u32 = 1;
pub const __USE_ISOC95: u32 = 1;
pub const __USE_ISOCXX11: u32 = 1;
pub const __USE_POSIX: u32 = 1;
pub const __USE_POSIX2: u32 = 1;
pub const __USE_POSIX199309: u32 = 1;
pub const __USE_POSIX199506: u32 = 1;
pub const __USE_XOPEN2K: u32 = 1;
pub const __USE_XOPEN2K8: u32 = 1;
pub const __USE_XOPEN: u32 = 1;
pub const __USE_XOPEN_EXTENDED: u32 = 1;
pub const __USE_UNIX98: u32 = 1;
pub const _LARGEFILE_SOURCE: u32 = 1;
pub const __USE_XOPEN2K8XSI: u32 = 1;
pub const __USE_XOPEN2KXSI: u32 = 1;
pub const __USE_LARGEFILE: u32 = 1;
pub const __USE_LARGEFILE64: u32 = 1;
pub const __USE_MISC: u32 = 1;
pub const __USE_ATFILE: u32 = 1;
pub const __USE_GNU: u32 = 1;
pub const __USE_FORTIFY_LEVEL: u32 = 0;
pub const __GLIBC_USE_DEPRECATED_GETS: u32 = 0;
pub const _STDC_PREDEF_H: u32 = 1;
pub const __STDC_IEC_559__: u32 = 1;
pub const __STDC_IEC_559_COMPLEX__: u32 = 1;
pub const __STDC_ISO_10646__: u32 = 201706;
pub const __STDC_NO_THREADS__: u32 = 1;
pub const __GNU_LIBRARY__: u32 = 6;
pub const __GLIBC__: u32 = 2;
pub const __GLIBC_MINOR__: u32 = 27;
pub const _SYS_CDEFS_H: u32 = 1;
pub const __glibc_c99_flexarr_available: u32 = 1;
pub const __WORDSIZE: u32 = 64;
pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1;
pub const __SYSCALL_WORDSIZE: u32 = 64;
pub const __HAVE_GENERIC_SELECTION: u32 = 0;
pub const __GLIBC_USE_LIB_EXT2: u32 = 1;
pub const __GLIBC_USE_IEC_60559_BFP_EXT: u32 = 1;
pub const __GLIBC_USE_IEC_60559_FUNCS_EXT: u32 = 1;
pub const __GLIBC_USE_IEC_60559_TYPES_EXT: u32 = 1;
pub const _BITS_TYPES_H: u32 = 1;
pub const _BITS_TYPESIZES_H: u32 = 1;
pub const __OFF_T_MATCHES_OFF64_T: u32 = 1;
pub const __INO_T_MATCHES_INO64_T: u32 = 1;
pub const __RLIM_T_MATCHES_RLIM64_T: u32 = 1;
pub const __FD_SETSIZE: u32 = 1024;
pub const _BITS_WCHAR_H: u32 = 1;
pub const _BITS_STDINT_INTN_H: u32 = 1;
pub const _BITS_STDINT_UINTN_H: u32 = 1;
pub const INT8_MIN: i32 = -128;
pub const INT16_MIN: i32 = -32768;
pub const INT32_MIN: i32 = -2147483648;
pub const INT8_MAX: u32 = 127;
pub const INT16_MAX: u32 = 32767;
pub const INT32_MAX: u32 = 2147483647;
pub const UINT8_MAX: u32 = 255;
pub const UINT16_MAX: u32 = 65535;
pub const UINT32_MAX: u32 = 4294967295;
pub const INT_LEAST8_MIN: i32 = -128;
pub const INT_LEAST16_MIN: i32 = -32768;
pub const INT_LEAST32_MIN: i32 = -2147483648;
pub const INT_LEAST8_MAX: u32 = 127;
pub const INT_LEAST16_MAX: u32 = 32767;
pub const INT_LEAST32_MAX: u32 = 2147483647;
pub const UINT_LEAST8_MAX: u32 = 255;
pub const UINT_LEAST16_MAX: u32 = 65535;
pub const UINT_LEAST32_MAX: u32 = 4294967295;
pub const INT_FAST8_MIN: i32 = -128;
pub const INT_FAST16_MIN: i64 = -9223372036854775808;
pub const INT_FAST32_MIN: i64 = -9223372036854775808;
pub const INT_FAST8_MAX: u32 = 127;
pub const INT_FAST16_MAX: u64 = 9223372036854775807;
pub const INT_FAST32_MAX: u64 = 9223372036854775807;
pub const UINT_FAST8_MAX: u32 = 255;
pub const UINT_FAST16_MAX: i32 = -1;
pub const UINT_FAST32_MAX: i32 = -1;
pub const INTPTR_MIN: i64 = -9223372036854775808;
pub const INTPTR_MAX: u64 = 9223372036854775807;
pub const UINTPTR_MAX: i32 = -1;
pub const PTRDIFF_MIN: i64 = -9223372036854775808;
pub const PTRDIFF_MAX: u64 = 9223372036854775807;
pub const SIG_ATOMIC_MIN: i32 = -2147483648;
pub const SIG_ATOMIC_MAX: u32 = 2147483647;
pub const SIZE_MAX: i32 = -1;
pub const WINT_MIN: u32 = 0;
pub const WINT_MAX: u32 = 4294967295;
pub const INT8_WIDTH: u32 = 8;
pub const UINT8_WIDTH: u32 = 8;
pub const INT16_WIDTH: u32 = 16;
pub const UINT16_WIDTH: u32 = 16;
pub const INT32_WIDTH: u32 = 32;
pub const UINT32_WIDTH: u32 = 32;
pub const INT64_WIDTH: u32 = 64;
pub const UINT64_WIDTH: u32 = 64;
pub const INT_LEAST8_WIDTH: u32 = 8;
pub const UINT_LEAST8_WIDTH: u32 = 8;
pub const INT_LEAST16_WIDTH: u32 = 16;
pub const UINT_LEAST16_WIDTH: u32 = 16;
pub const INT_LEAST32_WIDTH: u32 = 32;
pub const UINT_LEAST32_WIDTH: u32 = 32;
pub const INT_LEAST64_WIDTH: u32 = 64;
pub const UINT_LEAST64_WIDTH: u32 = 64;
pub const INT_FAST8_WIDTH: u32 = 8;
pub const UINT_FAST8_WIDTH: u32 = 8;
pub const INT_FAST16_WIDTH: u32 = 64;
pub const UINT_FAST16_WIDTH: u32 = 64;
pub const INT_FAST32_WIDTH: u32 = 64;
pub const UINT_FAST32_WIDTH: u32 = 64;
pub const INT_FAST64_WIDTH: u32 = 64;
pub const UINT_FAST64_WIDTH: u32 = 64;
pub const INTPTR_WIDTH: u32 = 64;
pub const UINTPTR_WIDTH: u32 = 64;
pub const INTMAX_WIDTH: u32 = 64;
pub const UINTMAX_WIDTH: u32 = 64;
pub const PTRDIFF_WIDTH: u32 = 64;
pub const SIG_ATOMIC_WIDTH: u32 = 32;
pub const SIZE_WIDTH: u32 = 64;
pub const WCHAR_WIDTH: u32 = 32;
pub const WINT_WIDTH: u32 = 32;
pub const DNNL_MAX_NDIMS: u32 = 12;
pub const DNNL_RNN_MAX_N_PARTS: u32 = 4;
pub const DNNL_ARG_SRC_0: u32 = 1;
pub const DNNL_ARG_SRC: u32 = 1;
pub const DNNL_ARG_SRC_LAYER: u32 = 1;
pub const DNNL_ARG_FROM: u32 = 1;
pub const DNNL_ARG_SRC_1: u32 = 2;
pub const DNNL_ARG_SRC_ITER: u32 = 2;
pub const DNNL_ARG_SRC_2: u32 = 3;
pub const DNNL_ARG_SRC_ITER_C: u32 = 3;
pub const DNNL_ARG_DST_0: u32 = 17;
pub const DNNL_ARG_DST: u32 = 17;
pub const DNNL_ARG_TO: u32 = 17;
pub const DNNL_ARG_DST_LAYER: u32 = 17;
pub const DNNL_ARG_DST_1: u32 = 18;
pub const DNNL_ARG_DST_ITER: u32 = 18;
pub const DNNL_ARG_DST_2: u32 = 19;
pub const DNNL_ARG_DST_ITER_C: u32 = 19;
pub const DNNL_ARG_WEIGHTS_0: u32 = 33;
pub const DNNL_ARG_WEIGHTS: u32 = 33;
pub const DNNL_ARG_SCALE_SHIFT: u32 = 33;
pub const DNNL_ARG_WEIGHTS_LAYER: u32 = 33;
pub const DNNL_ARG_WEIGHTS_1: u32 = 34;
pub const DNNL_ARG_WEIGHTS_ITER: u32 = 34;
pub const DNNL_ARG_WEIGHTS_2: u32 = 35;
pub const DNNL_ARG_WEIGHTS_PEEPHOLE: u32 = 35;
pub const DNNL_ARG_WEIGHTS_3: u32 = 36;
pub const DNNL_ARG_WEIGHTS_PROJECTION: u32 = 36;
pub const DNNL_ARG_BIAS: u32 = 41;
pub const DNNL_ARG_MEAN: u32 = 49;
pub const DNNL_ARG_VARIANCE: u32 = 50;
pub const DNNL_ARG_WORKSPACE: u32 = 64;
pub const DNNL_ARG_SCRATCHPAD: u32 = 80;
pub const DNNL_ARG_DIFF_SRC_0: u32 = 129;
pub const DNNL_ARG_DIFF_SRC: u32 = 129;
pub const DNNL_ARG_DIFF_SRC_LAYER: u32 = 129;
pub const DNNL_ARG_DIFF_SRC_1: u32 = 130;
pub const DNNL_ARG_DIFF_SRC_ITER: u32 = 130;
pub const DNNL_ARG_DIFF_SRC_2: u32 = 131;
pub const DNNL_ARG_DIFF_SRC_ITER_C: u32 = 131;
pub const DNNL_ARG_DIFF_DST_0: u32 = 145;
pub const DNNL_ARG_DIFF_DST: u32 = 145;
pub const DNNL_ARG_DIFF_DST_LAYER: u32 = 145;
pub const DNNL_ARG_DIFF_DST_1: u32 = 146;
pub const DNNL_ARG_DIFF_DST_ITER: u32 = 146;
pub const DNNL_ARG_DIFF_DST_2: u32 = 147;
pub const DNNL_ARG_DIFF_DST_ITER_C: u32 = 147;
pub const DNNL_ARG_DIFF_WEIGHTS_0: u32 = 161;
pub const DNNL_ARG_DIFF_WEIGHTS: u32 = 161;
pub const DNNL_ARG_DIFF_SCALE_SHIFT: u32 = 161;
pub const DNNL_ARG_DIFF_WEIGHTS_LAYER: u32 = 161;
pub const DNNL_ARG_DIFF_WEIGHTS_1: u32 = 162;
pub const DNNL_ARG_DIFF_WEIGHTS_ITER: u32 = 162;
pub const DNNL_ARG_DIFF_WEIGHTS_2: u32 = 163;
pub const DNNL_ARG_DIFF_WEIGHTS_PEEPHOLE: u32 = 163;
pub const DNNL_ARG_DIFF_WEIGHTS_3: u32 = 164;
pub const DNNL_ARG_DIFF_WEIGHTS_PROJECTION: u32 = 164;
pub const DNNL_ARG_DIFF_BIAS: u32 = 169;
pub const DNNL_ARG_ATTR_OUTPUT_SCALES: u32 = 513;
pub const DNNL_ARG_MULTIPLE_SRC: u32 = 1024;
pub const DNNL_ARG_MULTIPLE_DST: u32 = 2048;
pub const DNNL_ARG_ATTR_ZERO_POINTS: u32 = 4096;
pub const DNNL_ARG_ATTR_POST_OP_DW: u32 = 8192;
pub const DNNL_RUNTIME_NONE: u32 = 0;
pub const DNNL_RUNTIME_SEQ: u32 = 1;
pub const DNNL_RUNTIME_OMP: u32 = 2;
pub const DNNL_RUNTIME_TBB: u32 = 4;
pub const DNNL_RUNTIME_THREADPOOL: u32 = 8;
pub const DNNL_RUNTIME_OCL: u32 = 256;
pub const DNNL_JIT_PROFILE_NONE: u32 = 0;
pub const DNNL_JIT_PROFILE_VTUNE: u32 = 1;
pub const DNNL_JIT_PROFILE_LINUX_PERFMAP: u32 = 2;
pub const DNNL_JIT_PROFILE_LINUX_JITDUMP: u32 = 4;
pub const DNNL_JIT_PROFILE_LINUX_JITDUMP_USE_TSC: u32 = 8;
pub const DNNL_JIT_PROFILE_LINUX_PERF: u32 = 6;
pub const DNNL_CPU_THREADING_RUNTIME: u32 = 2;
pub const DNNL_CPU_RUNTIME: u32 = 2;
pub const DNNL_GPU_RUNTIME: u32 = 0;
pub const DNNL_VERSION_MAJOR: u32 = 1;
pub const DNNL_VERSION_MINOR: u32 = 5;
pub const DNNL_VERSION_PATCH: u32 = 0;
pub const DNNL_VERSION_HASH: &'static [u8; 41usize] = b"f5997b5e6726de82d19ae9b86b08d80aea4af82e\0";
#[repr(C)]
#[repr(align(16))]
#[derive(Debug, Copy, Clone)]
pub struct max_align_t {
pub __clang_max_align_nonce1: ::libc::c_longlong,
pub __bindgen_padding_0: u64,
pub __clang_max_align_nonce2: u128,
}
#[test]
fn bindgen_test_layout_max_align_t() {
assert_eq!(
::std::mem::size_of::<max_align_t>(),
32usize,
concat!("Size of: ", stringify!(max_align_t))
);
assert_eq!(
::std::mem::align_of::<max_align_t>(),
16usize,
concat!("Alignment of ", stringify!(max_align_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce1 as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(max_align_t),
"::",
stringify!(__clang_max_align_nonce1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce2 as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(max_align_t),
"::",
stringify!(__clang_max_align_nonce2)
)
);
}
pub type __u_char = ::libc::c_uchar;
pub type __u_short = ::libc::c_ushort;
pub type __u_int = ::libc::c_uint;
pub type __u_long = ::libc::c_ulong;
pub type __int8_t = ::libc::c_schar;
pub type __uint8_t = ::libc::c_uchar;
pub type __int16_t = ::libc::c_short;
pub type __uint16_t = ::libc::c_ushort;
pub type __int32_t = ::libc::c_int;
pub type __uint32_t = ::libc::c_uint;
pub type __int64_t = ::libc::c_long;
pub type __uint64_t = ::libc::c_ulong;
pub type __quad_t = ::libc::c_long;
pub type __u_quad_t = ::libc::c_ulong;
pub type __intmax_t = ::libc::c_long;
pub type __uintmax_t = ::libc::c_ulong;
pub type __dev_t = ::libc::c_ulong;
pub type __uid_t = ::libc::c_uint;
pub type __gid_t = ::libc::c_uint;
pub type __ino_t = ::libc::c_ulong;
pub type __ino64_t = ::libc::c_ulong;
pub type __mode_t = ::libc::c_uint;
pub type __nlink_t = ::libc::c_ulong;
pub type __off_t = ::libc::c_long;
pub type __off64_t = ::libc::c_long;
pub type __pid_t = ::libc::c_int;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct __fsid_t {
pub __val: [::libc::c_int; 2usize],
}
#[test]
fn bindgen_test_layout___fsid_t() {
assert_eq!(
::std::mem::size_of::<__fsid_t>(),
8usize,
concat!("Size of: ", stringify!(__fsid_t))
);
assert_eq!(
::std::mem::align_of::<__fsid_t>(),
4usize,
concat!("Alignment of ", stringify!(__fsid_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__fsid_t>())).__val as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__fsid_t),
"::",
stringify!(__val)
)
);
}
pub type __clock_t = ::libc::c_long;
pub type __rlim_t = ::libc::c_ulong;
pub type __rlim64_t = ::libc::c_ulong;
pub type __id_t = ::libc::c_uint;
pub type __time_t = ::libc::c_long;
pub type __useconds_t = ::libc::c_uint;
pub type __suseconds_t = ::libc::c_long;
pub type __daddr_t = ::libc::c_int;
pub type __key_t = ::libc::c_int;
pub type __clockid_t = ::libc::c_int;
pub type __timer_t = *mut ::libc::c_void;
pub type __blksize_t = ::libc::c_long;
pub type __blkcnt_t = ::libc::c_long;
pub type __blkcnt64_t = ::libc::c_long;
pub type __fsblkcnt_t = ::libc::c_ulong;
pub type __fsblkcnt64_t = ::libc::c_ulong;
pub type __fsfilcnt_t = ::libc::c_ulong;
pub type __fsfilcnt64_t = ::libc::c_ulong;
pub type __fsword_t = ::libc::c_long;
pub type __ssize_t = ::libc::c_long;
pub type __syscall_slong_t = ::libc::c_long;
pub type __syscall_ulong_t = ::libc::c_ulong;
pub type __loff_t = __off64_t;
pub type __caddr_t = *mut ::libc::c_char;
pub type __intptr_t = ::libc::c_long;
pub type __socklen_t = ::libc::c_uint;
pub type __sig_atomic_t = ::libc::c_int;
pub type int_least8_t = ::libc::c_schar;
pub type int_least16_t = ::libc::c_short;
pub type int_least32_t = ::libc::c_int;
pub type int_least64_t = ::libc::c_long;
pub type uint_least8_t = ::libc::c_uchar;
pub type uint_least16_t = ::libc::c_ushort;
pub type uint_least32_t = ::libc::c_uint;
pub type uint_least64_t = ::libc::c_ulong;
pub type int_fast8_t = ::libc::c_schar;
pub type int_fast16_t = ::libc::c_long;
pub type int_fast32_t = ::libc::c_long;
pub type int_fast64_t = ::libc::c_long;
pub type uint_fast8_t = ::libc::c_uchar;
pub type uint_fast16_t = ::libc::c_ulong;
pub type uint_fast32_t = ::libc::c_ulong;
pub type uint_fast64_t = ::libc::c_ulong;
pub type intmax_t = __intmax_t;
pub type uintmax_t = __uintmax_t;
#[repr(u32)]
#[non_exhaustive]
#[doc = " Status values returned by the library functions."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_status_t {
#[doc = " The operation was successful"]
dnnl_success = 0,
#[doc = " The operation failed due to an out-of-memory condition"]
dnnl_out_of_memory = 1,
#[doc = " The operation failed because of incorrect function arguments"]
dnnl_invalid_arguments = 2,
#[doc = " The operation failed because requested functionality is not implemented"]
dnnl_unimplemented = 3,
#[doc = " Primitive iterator passed over last primitive descriptor"]
dnnl_iterator_ends = 4,
#[doc = " Primitive or engine failed on execution"]
dnnl_runtime_error = 5,
#[doc = " Queried element is not required for given primitive"]
dnnl_not_required = 6,
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Data type specification"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_data_type_t {
#[doc = " Undefined data type, used for empty memory descriptors."]
dnnl_data_type_undef = 0,
#[doc = " 16-bit/half-precision floating point."]
dnnl_f16 = 1,
#[doc = " non-standard 16-bit (bfloat16 w/ 7 bit mantissa) floating point."]
dnnl_bf16 = 2,
#[doc = " 32-bit/single-precision floating point."]
dnnl_f32 = 3,
#[doc = " 32-bit signed integer."]
dnnl_s32 = 4,
#[doc = " 8-bit signed integer."]
dnnl_s8 = 5,
#[doc = " 8-bit unsigned integer."]
dnnl_u8 = 6,
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Memory format kind"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_format_kind_t {
#[doc = " Undefined memory format kind, used for empty memory descriptors."]
dnnl_format_kind_undef = 0,
#[doc = " Unspecified format kind."]
#[doc = " The primitive selects a format automatically."]
dnnl_format_kind_any = 1,
#[doc = " A tensor in a generic format described by the stride and blocking"]
#[doc = " values in each dimension. See @ref dnnl_blocking_desc_t for more"]
#[doc = " information."]
dnnl_blocked = 2,
#[doc = " Weights format used in 8bit Winograd convolution"]
dnnl_format_kind_wino = 3,
#[doc = " Packed weights format used in RNN"]
dnnl_format_kind_rnn_packed = 4,
}
impl dnnl_format_tag_t {
pub const dnnl_x: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_a;
}
impl dnnl_format_tag_t {
pub const dnnl_nc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ab;
}
impl dnnl_format_tag_t {
pub const dnnl_cn: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ba;
}
impl dnnl_format_tag_t {
pub const dnnl_tn: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ab;
}
impl dnnl_format_tag_t {
pub const dnnl_nt: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ba;
}
impl dnnl_format_tag_t {
pub const dnnl_ncw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abc;
}
impl dnnl_format_tag_t {
pub const dnnl_nwc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acb;
}
impl dnnl_format_tag_t {
pub const dnnl_nchw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcd;
}
impl dnnl_format_tag_t {
pub const dnnl_nhwc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acdb;
}
impl dnnl_format_tag_t {
pub const dnnl_chwn: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bcda;
}
impl dnnl_format_tag_t {
pub const dnnl_ncdhw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcde;
}
impl dnnl_format_tag_t {
pub const dnnl_ndhwc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acdeb;
}
impl dnnl_format_tag_t {
pub const dnnl_oi: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ab;
}
impl dnnl_format_tag_t {
pub const dnnl_io: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ba;
}
impl dnnl_format_tag_t {
pub const dnnl_oiw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abc;
}
impl dnnl_format_tag_t {
pub const dnnl_owi: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acb;
}
impl dnnl_format_tag_t {
pub const dnnl_wio: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_cba;
}
impl dnnl_format_tag_t {
pub const dnnl_iwo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bca;
}
impl dnnl_format_tag_t {
pub const dnnl_oihw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcd;
}
impl dnnl_format_tag_t {
pub const dnnl_hwio: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_cdba;
}
impl dnnl_format_tag_t {
pub const dnnl_ohwi: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acdb;
}
impl dnnl_format_tag_t {
pub const dnnl_ihwo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bcda;
}
impl dnnl_format_tag_t {
pub const dnnl_iohw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bacd;
}
impl dnnl_format_tag_t {
pub const dnnl_oidhw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcde;
}
impl dnnl_format_tag_t {
pub const dnnl_iodhw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bacde;
}
impl dnnl_format_tag_t {
pub const dnnl_dhwio: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_cdeba;
}
impl dnnl_format_tag_t {
pub const dnnl_odhwi: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acdeb;
}
impl dnnl_format_tag_t {
pub const dnnl_idhwo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bcdea;
}
impl dnnl_format_tag_t {
pub const dnnl_goiw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcd;
}
impl dnnl_format_tag_t {
pub const dnnl_wigo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_dcab;
}
impl dnnl_format_tag_t {
pub const dnnl_goihw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcde;
}
impl dnnl_format_tag_t {
pub const dnnl_hwigo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_decab;
}
impl dnnl_format_tag_t {
pub const dnnl_giohw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acbde;
}
impl dnnl_format_tag_t {
pub const dnnl_goidhw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcdef;
}
impl dnnl_format_tag_t {
pub const dnnl_giodhw: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_acbdef;
}
impl dnnl_format_tag_t {
pub const dnnl_dhwigo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_defcab;
}
impl dnnl_format_tag_t {
pub const dnnl_tnc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abc;
}
impl dnnl_format_tag_t {
pub const dnnl_ntc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_bac;
}
impl dnnl_format_tag_t {
pub const dnnl_ldnc: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcd;
}
impl dnnl_format_tag_t {
pub const dnnl_ldigo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcde;
}
impl dnnl_format_tag_t {
pub const dnnl_ldgoi: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abdec;
}
impl dnnl_format_tag_t {
pub const dnnl_ldio: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcd;
}
impl dnnl_format_tag_t {
pub const dnnl_ldoi: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abdc;
}
impl dnnl_format_tag_t {
pub const dnnl_ldgo: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_abcd;
}
impl dnnl_format_tag_t {
pub const dnnl_nCdhw32c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcde32b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCdhw16c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcde16b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCdhw4c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcde4b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCdhw8c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcde8b;
}
impl dnnl_format_tag_t {
pub const dnnl_nChw32c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcd32b;
}
impl dnnl_format_tag_t {
pub const dnnl_nChw16c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcd16b;
}
impl dnnl_format_tag_t {
pub const dnnl_nChw4c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcd4b;
}
impl dnnl_format_tag_t {
pub const dnnl_nChw8c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcd8b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCw32c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBc32b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCw16c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBc16b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCw4c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBc4b;
}
impl dnnl_format_tag_t {
pub const dnnl_nCw8c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBc8b;
}
impl dnnl_format_tag_t {
pub const dnnl_NCw16n16c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_NCdhw16n16c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_NChw16n16c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_NCw32n32c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc32a32b;
}
impl dnnl_format_tag_t {
pub const dnnl_NChw32n32c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd32a32b;
}
impl dnnl_format_tag_t {
pub const dnnl_NCdhw32n32c: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde32a32b;
}
impl dnnl_format_tag_t {
pub const dnnl_IOw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAc16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_IOw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAc16b16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc16b16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_Oiw16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abc16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw4i16o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc4b16a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw2i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc2b8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw4i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc4b4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw4o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc4a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_Oiw4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abc4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw8i16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc8b16a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw8i8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc8b8a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc8a16b2a;
}
impl dnnl_format_tag_t {
pub const dnnl_IOw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAc8a16b2a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw8o8i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc8a8b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_Owi16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acb16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OwI16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_AcB16a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_Owi4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acb4a;
}
impl dnnl_format_tag_t {
pub const dnnl_Owi8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acb8a;
}
impl dnnl_format_tag_t {
pub const dnnl_IOhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcd16b16a;
}
impl dnnl_format_tag_t {
pub const dnnl_IOhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcd16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_Ohwi16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdb16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OhwI16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_AcdB16a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_Ohwi32o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdb32a;
}
impl dnnl_format_tag_t {
pub const dnnl_Ohwi4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdb4a;
}
impl dnnl_format_tag_t {
pub const dnnl_Ohwi8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdb8a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd16b16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_Oihw16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcd16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw4i16o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd4b16a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw4i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd4b4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw4o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd4a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_Oihw4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcd4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw8i16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd8b16a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw8i8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd8b8a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd8a16b2a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw2i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd2b8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_IOhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcd8a16b2a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw8o8i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd8a8b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_Odhwi16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdeb16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OdhwI16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_AcdeB16a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_Odhwi4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdeb4a;
}
impl dnnl_format_tag_t {
pub const dnnl_Odhwi8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Acdeb8a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde16b16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_Oidhw16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcde16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw4i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde4b4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw4o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde4a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_Oidhw4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcde4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw8i16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde8b16a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw8i8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde8b8a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde8a16b2a;
}
impl dnnl_format_tag_t {
pub const dnnl_IOdhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcde8a16b2a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw4i16o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde4b16a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw2i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde2b8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw8o8i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde8a8b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_IOdhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcde16b16a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIdhw4o8i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcde4a8b8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_IOdhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcde16a16b;
}
impl dnnl_format_tag_t {
pub const dnnl_Goiw16g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcd16a;
}
impl dnnl_format_tag_t {
pub const dnnl_Goiw8g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcd8a;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBd16b16c;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBd16c16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd16c16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd16b16c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOiw16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcd16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw4i16o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd4c16b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw2i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd2c8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw4i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd4c4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw4o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd4b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOiw4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcd4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw8i16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd8c16b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw8i8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd8c8b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd8b16c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBd8b16c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw8o8i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd8b8c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOwi16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdc16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOwI16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdC16b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOwi4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdc4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOwi8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdc8b;
}
impl dnnl_format_tag_t {
pub const dnnl_Goiw32g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcd32a;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw2i4o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd2c4b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw2o4i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd2b4c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw4i8o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd4c8b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw4o8i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd4b8c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBde16c16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBde16b16c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOhwi16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdec16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOhwI16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdeC16b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOhwi32o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdec32b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOhwi4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdec4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOhwi8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdec8b;
}
impl dnnl_format_tag_t {
pub const dnnl_Goihw16g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcde16a;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde16c16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde16b16c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOihw16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcde16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw2i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde2c8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw4i16o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde4c16b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw4i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde4c4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw4o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde4b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOihw4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcde4b;
}
impl dnnl_format_tag_t {
pub const dnnl_Goihw8g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcde8a;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw8i16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde8c16b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw8i8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde8c8b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde8b16c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBde8b16c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw8o8i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde8b8c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_Goihw32g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcde32a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIw4o8i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABc4a8b8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw4o8i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd4a8b8a4b;
}
impl dnnl_format_tag_t {
pub const dnnl_IOw4i8o8i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAc4b8a8b4a;
}
impl dnnl_format_tag_t {
pub const dnnl_IOhw4i8o8i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcd4b8a8b4a;
}
impl dnnl_format_tag_t {
pub const dnnl_IOdhw4i8o8i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_BAcde4b8a8b4a;
}
impl dnnl_format_tag_t {
pub const dnnl_OIhw2o8i8o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_ABcd2a8b8a2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIw4o8i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCd4b8c8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw4o8i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde4b8c8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw4o8i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef4b8c8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOw4i8o8i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBd4c8b8c4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOhw4i8o8i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBde4c8b8c4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOdhw4i8o8i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBdef4c8b8c4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw2o8i8o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde2b8c8b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw2i4o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde2c4b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw2o4i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde2b4c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw4i8o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde4c8b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIhw4o8i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCde4b8c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOdhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBdef16c16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOdhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBdef16b16c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOdhwi16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdefc16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOdhwI16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdefC16b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOdhwi4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdefc4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOdhwi8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBdefc8b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw16i16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef16c16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw4i16o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef4c16b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw2i8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef2c8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw16o16i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef16b16c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOidhw16o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcdef16b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw4i4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef4c4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw4o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef4b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOidhw4o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBcdef4b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw8i16o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef8c16b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw8i8o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef8c8b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef8b16c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gIOdhw8o16i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aCBdef8b16c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw8o8i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef8b8c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw8o4i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef8b4c;
}
impl dnnl_format_tag_t {
pub const dnnl_Goidhw16g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcdef16a;
}
impl dnnl_format_tag_t {
pub const dnnl_Goidhw32g: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_Abcdef32a;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw2i4o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef2c4b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw4i8o2i: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef4c8b2c;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw2o4i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef2b4c2b;
}
impl dnnl_format_tag_t {
pub const dnnl_gOIdhw4o8i2o: dnnl_format_tag_t = dnnl_format_tag_t::dnnl_aBCdef4b8c2b;
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Memory format tag specification."]
#[doc = ""]
#[doc = " oneDNN formats describe physical data layout. The physical layout"]
#[doc = " is described as a sequence of the dimensions as they are laid out in the"]
#[doc = " memory (from the outer-most to the inner-most). Note that this order"]
#[doc = " doesn't affect the logical order of the dimensions that is kept in the"]
#[doc = " `dims` field of the dnnl_memory_desc_t structure. The logical order of the"]
#[doc = " dimensions is specified by the primitive that uses the tensor."]
#[doc = ""]
#[doc = " For example, CNN 5D tensor always has its logical dimensions in the order"]
#[doc = " `(batch, channels, depth, height, width)`, while the physical layout might be"]
#[doc = " `NCDHW` (corresponds to #dnnl_ncdhw format tag) or"]
#[doc = " `NDHWC` (corresponds to #dnnl_ndhwc format tag)."]
#[doc = ""]
#[doc = " ~~~cpp"]
#[doc = " int batch = 2, channels = 16, depth = 13, height = 13, width = 13;"]
#[doc = ""]
#[doc = " int ndims = 5; // 5D tensor"]
#[doc = " dnnl_dims_t dims = {batch, channels, depth, height, width};"]
#[doc = " dnnl_memory_desc_t data_in_ncdhw;"]
#[doc = " dnnl_memory_desc_init_by_tag("]
#[doc = " &data_in_ncdhw, 5, dims, dnnl_f32, dnnl_ncdhw);"]
#[doc = ""]
#[doc = " // note that in both cases dims passed are the same"]
#[doc = " dnnl_memory_desc_t data_in_ndhwc;"]
#[doc = " dnnl_memory_desc_init_by_tag("]
#[doc = " &data_in_ndhwc, 5, dims, dnnl_f32, dnnl_ndhwc);"]
#[doc = " ~~~"]
#[doc = ""]
#[doc = " Memory format tags can be further divided into two categories:"]
#[doc = " - Domain-agnostic names, i.e. names the do not depend on the tensor usage"]
#[doc = " in the specific primitive. These names use letters from `a` to `l` to"]
#[doc = " denote logical dimension from 1 to 12, and form the order in which the"]
#[doc = " dimensions are laid in memory. For instance, #dnnl_ab is used to denote"]
#[doc = " 2D tensor where the second logical dimension (aka `b`) is the innermost,"]
#[doc = " i.e. has stride = 1, and the first logical dimension (`a`) laid out in"]
#[doc = " memory with stride equal to the size of second dimension. On the other"]
#[doc = " hand, #dnnl_ba is just transposed version of the same tensor: the"]
#[doc = " first dimension (`a`) becomes the innermost one."]
#[doc = " - Domain-specific names, i.e. names that make sense only in the context of"]
#[doc = " a certain domain, such as CNN. This names are just aliases to the"]
#[doc = " corresponding domain-agnostic tags and used mostly for the convenience."]
#[doc = " For example, #dnnl_nc is used to denote 2D CNN activations tensor"]
#[doc = " memory format, where channels are the innermost dimension and batch is an"]
#[doc = " outermost one. Moreover, #dnnl_nc is just an alias to #dnnl_ab,"]
#[doc = " since for oneDNN CNN primitives the logical dimensions of"]
#[doc = " activations tensors come in order: batch, channels, spatial."]
#[doc = " In other words, batch corresponds to the first logical dimension (`a`),"]
#[doc = " channels correspond to the second one (`b`)."]
#[doc = ""]
#[doc = " The following domain-specific notation applies to memory format tags:"]
#[doc = " - @c 'n' denotes the mini-batch dimension"]
#[doc = " - @c 'c' denotes a channels dimension"]
#[doc = " - When there are multiple channel dimensions (for example, in convolution"]
#[doc = " weights tensor), @c 'i' and @c 'o' denote dimensions of input and output"]
#[doc = " channels"]
#[doc = " - @c 'd', @c 'h', and @c 'w' denote spatial depth, height, and width"]
#[doc = " respectively"]
#[doc = ""]
#[doc = " Upper-case letters indicate that the data is laid out in blocks for a"]
#[doc = " particular dimension. In such cases, the format name contains both upper-"]
#[doc = " and lower-case letters for that dimension with a lower-case letter preceded"]
#[doc = " by the block size. For example: #dnnl_nChw8c describes a format where the"]
#[doc = " outermost dimension is mini-batch, followed by the channel block number,"]
#[doc = " followed by the spatial height and width, and finally followed by 8-element"]
#[doc = " channel blocks."]
#[doc = ""]
#[doc = " @sa @ref dev_guide_understanding_memory_formats"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_format_tag_t {
#[doc = " Undefined memory format tag"]
dnnl_format_tag_undef = 0,
#[doc = " Undefined memory format tag."]
#[doc = " The primitive selects a format automatically."]
dnnl_format_tag_any = 1,
#[doc = "< plain 1D tensor"]
dnnl_a = 2,
#[doc = "< plain 2D tensor"]
dnnl_ab = 3,
#[doc = "< plain 3D tensor"]
dnnl_abc = 4,
#[doc = "< plain 4D tensor"]
dnnl_abcd = 5,
#[doc = "< plain 5D tensor"]
dnnl_abcde = 6,
#[doc = "< plain 6D tensor"]
dnnl_abcdef = 7,
#[doc = "< permuted 4D tensor"]
dnnl_abdc = 8,
#[doc = "< permuted 5D tensor"]
dnnl_abdec = 9,
#[doc = "< permuted 3D tensor"]
dnnl_acb = 10,
#[doc = "< permuted 5D tensor"]
dnnl_acbde = 11,
#[doc = "< permuted 6D tensor"]
dnnl_acbdef = 12,
#[doc = "< permuted 4D tensor"]
dnnl_acdb = 13,
#[doc = "< permuted 5D tensor"]
dnnl_acdeb = 14,
#[doc = "< permuted 2D tensor"]
dnnl_ba = 15,
#[doc = "< permuted 3D tensor"]
dnnl_bac = 16,
#[doc = "< permuted 4D tensor"]
dnnl_bacd = 17,
#[doc = "< permuted 5D tensor"]
dnnl_bacde = 18,
#[doc = "< permuted 3D tensor"]
dnnl_bca = 19,
#[doc = "< permuted 4D tensor"]
dnnl_bcda = 20,
#[doc = "< permuted 5D tensor"]
dnnl_bcdea = 21,
#[doc = "< permuted 3D tensor"]
dnnl_cba = 22,
#[doc = "< permuted 4D tensor"]
dnnl_cdba = 23,
#[doc = "< permuted 4D tensor"]
dnnl_dcab = 24,
#[doc = "< permuted 5D tensor"]
dnnl_cdeba = 25,
#[doc = "< permuted 5D tensor"]
dnnl_decab = 26,
#[doc = "< permuted 6D tensor"]
dnnl_defcab = 27,
dnnl_Abc16a = 28,
dnnl_ABc16a16b = 29,
dnnl_ABc32a32b = 30,
dnnl_ABc4a4b = 31,
#[doc = " 3D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBc16b = 32,
#[doc = " 3D tensor blocked by 2nd dimension with block size 16"]
dnnl_ABc16b16a = 33,
#[doc = " 3D tensor blocked by 2nd dimension with block size 16"]
dnnl_Abc4a = 34,
#[doc = " 3D tensor blocked by 2nd dimension with block size 32"]
dnnl_aBc32b = 35,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBc4b = 36,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABc4b16a4b = 37,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABc2b8a4b = 38,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABc4b4a = 39,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABc8a16b2a = 40,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABc8a8b = 41,
#[doc = " 3D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABc8a4b = 42,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBc8b = 43,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABc8b16a2b = 44,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_BAc8a16b2a = 45,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABc8b8a = 46,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_Abcd16a = 47,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_Abcd8a = 48,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcd16a16b = 49,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_Abcd32a = 50,
#[doc = " 3D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcd32a32b = 51,
#[doc = " 4D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBcd16b = 52,
#[doc = " 4D tensor blocked by 2nd dimension with block size 16"]
dnnl_ABcd16b16a = 53,
#[doc = " 4D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCd16b16c = 54,
#[doc = " 4D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCd16c16b = 55,
#[doc = " 4D tensor blocked by 2nd dimension with block size 16"]
dnnl_Abcd4a = 56,
#[doc = " 4D tensor blocked by 2nd dimension with block size 32"]
dnnl_aBcd32b = 57,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBcd4b = 58,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd4b16a4b = 59,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd4b4a = 60,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd4a4b = 61,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCd2c4b2c = 62,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCd4b8c2b = 63,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCd4c16b4c = 64,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCd2c8b4c = 65,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCd4c4b = 66,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCd4b4c = 67,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd8a16b2a = 68,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd2b8a4b = 69,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd8a8b = 70,
#[doc = " 4D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcd8a4b = 71,
#[doc = " 4D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBcd8b = 72,
#[doc = " 4D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCd4c8b2c = 73,
#[doc = " 4D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcd8b16a2b = 74,
#[doc = " 4D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCd8b16c2b = 75,
#[doc = " 4D tensor blocked by 2nd dimension with block size 8"]
dnnl_BAcd8a16b2a = 76,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_ABcd8b8a = 77,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_aBCd8b8c = 78,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_aBCd8b4c = 79,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_aBCd8c16b2c = 80,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_ABcde8a16b2a = 81,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_aCBd8b16c2b = 82,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_aBCd8c8b = 83,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_Abcde16a = 84,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_Abcde32a = 85,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_ABcde16a16b = 86,
#[doc = " 4D tensor blocked by 1st and 2nd dimension with block size 8"]
dnnl_BAcde8a16b2a = 87,
#[doc = " 4D tensor blocked by 3rd dimension with block size 4"]
dnnl_aBCd2b4c2b = 88,
#[doc = " 5D tensor blocked by 1st dimension with block size 16"]
dnnl_ABcde4b16a4b = 89,
#[doc = " 5D tensor blocked by 1st dimension with block size 8"]
dnnl_ABcde2b8a4b = 90,
#[doc = " 5D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBcde16b = 91,
#[doc = " 5D tensor blocked by 2nd dimension with block size 16"]
dnnl_ABcde16b16a = 92,
#[doc = " 5D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCde16b16c = 93,
#[doc = " 5D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCde16c16b = 94,
#[doc = " 5D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCde2c8b4c = 95,
#[doc = " 5D tensor blocked by 2nd dimension with block size 16"]
dnnl_Abcde4a = 96,
#[doc = " 5D tensor blocked by 2nd dimension with block size 32"]
dnnl_aBcde32b = 97,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBcde4b = 98,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcde4b4a = 99,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcde4a4b = 100,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCde4b4c = 101,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCde2c4b2c = 102,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCde4b8c2b = 103,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCde4c16b4c = 104,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCde4c4b = 105,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_Abcde8a = 106,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcde8a8b = 107,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_ABcde8a4b = 108,
#[doc = " 5D tensor blocked by 2nd dimension with block size 4"]
dnnl_BAcde16b16a = 109,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBcde8b = 110,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcde8b16a2b = 111,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde8b16c2b = 112,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde4c8b2c = 113,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aCBde8b16c2b = 114,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcde8b8a = 115,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcde32a32b = 116,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde8b8c = 117,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde8b4c = 118,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABc4a8b8a4b = 119,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcd4a8b8a4b = 120,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcde4a8b8a4b = 121,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_BAc4b8a8b4a = 122,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_BAcd4b8a8b4a = 123,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_BAcde4b8a8b4a = 124,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_ABcd2a8b8a2b = 125,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCd4b8c8b4c = 126,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde4b8c8b4c = 127,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde2b8c8b2c = 128,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde8c16b2c = 129,
#[doc = " 5D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCde8c8b = 130,
#[doc = " 5D tensor blocked by 3rd dimension with block size 4"]
dnnl_aBCde2b4c2b = 131,
#[doc = " 6D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBcdef16b = 132,
#[doc = " 6D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCdef16b16c = 133,
#[doc = " 6D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCdef16c16b = 134,
#[doc = " 6D tensor blocked by 2nd dimension with block size 16"]
dnnl_aBCdef4c16b4c = 135,
#[doc = " 6D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCdef2c8b4c = 136,
#[doc = " 6D tensor blocked by 2nd dimension with block size 8"]
dnnl_aBCdef4c8b2c = 137,
#[doc = " 6D tensor blocked by 3rd dimension with block size 4"]
dnnl_aBCdef2b4c2b = 138,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBcdef4b = 139,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef4c4b = 140,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef4b4c = 141,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef2c4b2c = 142,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef4b8c2b = 143,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef8b8c = 144,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef8b4c = 145,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef8c16b2c = 146,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef4b8c8b4c = 147,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef8b16c2b = 148,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBdef8b16c2b = 149,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBCdef8c8b = 150,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdc16b = 151,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdC16b2c = 152,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdc4b = 153,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdc8b = 154,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdec16b = 155,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdeC16b2c = 156,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdec32b = 157,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdec4b = 158,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdec8b = 159,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdefc16b = 160,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdefC16b2c = 161,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBdef16c16b = 162,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdefc4b = 163,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aBdefc8b = 164,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Abcdef16a = 165,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Abcdef32a = 166,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acb16a = 167,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_AcB16a2b = 168,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acb4a = 169,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acb8a = 170,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBd16b16c = 171,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBd16c16b = 172,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBde16b16c = 173,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBde16c16b = 174,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdb16a = 175,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_AcdB16a2b = 176,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdb32a = 177,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdb4a = 178,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdb8a = 179,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdeb16a = 180,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_AcdeB16a2b = 181,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdeb4a = 182,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_Acdeb8a = 183,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_BAc16a16b = 184,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_BAc16b16a = 185,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_BAcd16a16b = 186,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_BAcd16b16a = 187,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBd4c8b8c4b = 188,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBde4c8b8c4b = 189,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBdef4c8b8c4b = 190,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_BAcde16a16b = 191,
#[doc = " 6D tensor blocked by 2nd dimension with block size 4"]
dnnl_aCBdef16b16c = 192,
#[doc = " Just a sentinel, not real memory format tag. Must be changed after new"]
#[doc = " format tag is added."]
dnnl_format_tag_last = 193,
}
impl dnnl_prop_kind_t {
pub const dnnl_forward_scoring: dnnl_prop_kind_t = dnnl_prop_kind_t::dnnl_forward_inference;
}
impl dnnl_prop_kind_t {
pub const dnnl_forward: dnnl_prop_kind_t = dnnl_prop_kind_t::dnnl_forward_training;
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Kinds of propagation."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_prop_kind_t {
#[doc = " Undefined propagation type."]
dnnl_prop_kind_undef = 0,
#[doc = " Forward data propagation (training mode). In this mode primitives"]
#[doc = " perform computations necessary for subsequent backward propagation."]
dnnl_forward_training = 64,
#[doc = " Forward data propagation (inference mode). In this mode primitives"]
#[doc = " perform only computations that are necessary for inference and omit"]
#[doc = " computations that are necessary only for backward propagation."]
dnnl_forward_inference = 96,
#[doc = " Backward propagation (with respect to all parameters)."]
dnnl_backward = 128,
#[doc = " Backward data propagation."]
dnnl_backward_data = 160,
#[doc = " Backward weights propagation."]
dnnl_backward_weights = 192,
#[doc = " Backward bias propagation."]
dnnl_backward_bias = 193,
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Kinds of primitives. Used to implement a way to extend the library with new"]
#[doc = " primitives without changing the ABI."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_primitive_kind_t {
#[doc = " Undefined primitive"]
dnnl_undefined_primitive = 0,
#[doc = " A reorder primitive."]
dnnl_reorder = 1,
#[doc = " A shuffle primitive."]
dnnl_shuffle = 2,
#[doc = " A (out-of-place) concat primitive."]
dnnl_concat = 3,
#[doc = " A sum primitive."]
dnnl_sum = 4,
#[doc = " A convolution primitive."]
dnnl_convolution = 5,
#[doc = " A deconvolution primitive."]
dnnl_deconvolution = 6,
#[doc = " An element-wise primitive."]
dnnl_eltwise = 7,
#[doc = " A softmax primitive."]
dnnl_softmax = 8,
#[doc = " A pooling primitive."]
dnnl_pooling = 9,
#[doc = " An LRN primitive."]
dnnl_lrn = 10,
#[doc = " A batch normalization primitive."]
dnnl_batch_normalization = 11,
#[doc = " A layer normalization primitive."]
dnnl_layer_normalization = 12,
#[doc = " An inner product primitive."]
dnnl_inner_product = 13,
#[doc = " A rnn primitive."]
dnnl_rnn = 14,
#[doc = " A matrix multiplication primitive (internal)."]
dnnl_gemm = 15,
#[doc = " A binary primitive."]
dnnl_binary = 16,
#[doc = " A logsoftmax primitive."]
dnnl_logsoftmax = 17,
#[doc = " A matrix multiplication primitive."]
dnnl_matmul = 18,
#[doc = " A resampling primitive."]
dnnl_resampling = 19,
}
impl dnnl_alg_kind_t {
pub const dnnl_eltwise_gelu: dnnl_alg_kind_t = dnnl_alg_kind_t::dnnl_eltwise_gelu_tanh;
}
impl dnnl_alg_kind_t {
pub const dnnl_pooling_avg: dnnl_alg_kind_t = dnnl_alg_kind_t::dnnl_pooling_avg_exclude_padding;
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Kinds of algorithms."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_alg_kind_t {
dnnl_alg_kind_undef = 0,
#[doc = " Direct convolution"]
dnnl_convolution_direct = 1,
#[doc = " Winograd convolution"]
dnnl_convolution_winograd = 2,
#[doc = " Convolution algorithm(either direct or Winograd) is chosen just in time"]
dnnl_convolution_auto = 3,
#[doc = " Direct deconvolution"]
dnnl_deconvolution_direct = 10,
#[doc = " Winograd deconvolution"]
dnnl_deconvolution_winograd = 11,
#[doc = " Eltwise: ReLU"]
dnnl_eltwise_relu = 31,
#[doc = " Eltwise: hyperbolic tangent non-linearity (tanh)"]
dnnl_eltwise_tanh = 47,
#[doc = " Eltwise: exponential linear unit (elu)"]
dnnl_eltwise_elu = 63,
#[doc = " Eltwise: square"]
dnnl_eltwise_square = 79,
#[doc = " Eltwise: abs"]
dnnl_eltwise_abs = 95,
#[doc = " Eltwise: square root"]
dnnl_eltwise_sqrt = 111,
#[doc = " Eltwise: linear"]
dnnl_eltwise_linear = 127,
#[doc = " Eltwise: bounded_relu"]
dnnl_eltwise_bounded_relu = 143,
#[doc = " Eltwise: soft_relu"]
dnnl_eltwise_soft_relu = 159,
#[doc = " Eltwise: logistic"]
dnnl_eltwise_logistic = 175,
#[doc = " Eltwise: exponent"]
dnnl_eltwise_exp = 191,
#[doc = " Eltwise: gelu"]
#[doc = ""]
#[doc = " @note Tanh approximation formula is used to approximate"]
#[doc = " the cumulative distribution function of a Gaussian here"]
dnnl_eltwise_gelu_tanh = 207,
#[doc = " Eltwise: swish"]
dnnl_eltwise_swish = 223,
#[doc = " Eltwise: natural logarithm"]
dnnl_eltwise_log = 239,
#[doc = " Eltwise: clip"]
dnnl_eltwise_clip = 255,
#[doc = " Eltwise: pow"]
dnnl_eltwise_pow = 32,
#[doc = " Eltwise: erf-based gelu"]
dnnl_eltwise_gelu_erf = 48,
#[doc = " Eltwise: round"]
dnnl_eltwise_round = 64,
#[doc = " Eltwise: ReLU (dst for backward)"]
dnnl_eltwise_relu_use_dst_for_bwd = 256,
#[doc = " Eltwise: hyperbolic tangent non-linearity (tanh) (dst for backward)"]
dnnl_eltwise_tanh_use_dst_for_bwd = 257,
#[doc = " Eltwise: exponential linear unit (elu) (dst for backward)"]
dnnl_eltwise_elu_use_dst_for_bwd = 258,
#[doc = " Eltwise: square root (dst for backward)"]
dnnl_eltwise_sqrt_use_dst_for_bwd = 259,
#[doc = " Eltwise: logistic (dst for backward)"]
dnnl_eltwise_logistic_use_dst_for_bwd = 260,
#[doc = " Eltwise: exp (dst for backward)"]
dnnl_eltwise_exp_use_dst_for_bwd = 261,
#[doc = " Max pooling"]
dnnl_pooling_max = 511,
#[doc = " Average pooling include padding"]
dnnl_pooling_avg_include_padding = 767,
#[doc = " Average pooling exclude padding"]
dnnl_pooling_avg_exclude_padding = 1023,
#[doc = " Local response normalization (LRN) across multiple channels"]
dnnl_lrn_across_channels = 2815,
#[doc = " LRN within a single channel"]
dnnl_lrn_within_channel = 3071,
#[doc = " RNN cell"]
dnnl_vanilla_rnn = 8191,
#[doc = " LSTM cell"]
dnnl_vanilla_lstm = 12287,
#[doc = " GRU cell"]
dnnl_vanilla_gru = 16383,
#[doc = " GRU cell with linear before reset"]
#[doc = ""]
#[doc = " Modification of original GRU cell. Differs from #dnnl_vanilla_gru"]
#[doc = " in how the new memory gate is calculated:"]
#[doc = " \\f[ c_t = tanh(W_c*x_t + b_{c_x} + r_t*(U_c*h_{t-1}+b_{c_h})) \\f]"]
#[doc = " Primitive expects 4 biases on input:"]
#[doc = " \\f$[b_{u}, b_{r}, b_{c_x}, b_{c_h}]\\f$"]
dnnl_lbr_gru = 20479,
#[doc = " Binary add"]
dnnl_binary_add = 131056,
#[doc = " Binary mul"]
dnnl_binary_mul = 131057,
#[doc = " Binary max"]
dnnl_binary_max = 131058,
#[doc = " Binary min"]
dnnl_binary_min = 131059,
#[doc = " Nearest Neighbor Resampling Method"]
dnnl_resampling_nearest = 196592,
#[doc = " Linear Resampling Method"]
dnnl_resampling_linear = 196593,
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Flags for normalization primitives."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_normalization_flags_t {
#[doc = " Use no normalization flags"]
#[doc = ""]
#[doc = " If specified"]
#[doc = " - on forward training propagation mean and variance are computed and"]
#[doc = " stored as output"]
#[doc = " - on backward propagation compute full derivative wrt data"]
#[doc = " - on backward propagation prop_kind == #dnnl_backward_data has the same"]
#[doc = " behavior as prop_kind == #dnnl_backward"]
dnnl_normalization_flags_none = 0,
#[doc = " Use global statistics"]
#[doc = ""]
#[doc = " If specified"]
#[doc = " - on forward propagation use mean and variance provided by user (input)"]
#[doc = " - on backward propagation reduces the amount of computations, since"]
#[doc = " mean and variance are considered as constants"]
#[doc = ""]
#[doc = " If not specified:"]
#[doc = " - on forward propagation mean and variance are computed and stored as"]
#[doc = " output"]
#[doc = " - on backward propagation compute full derivative wrt data"]
dnnl_use_global_stats = 1,
#[doc = " Use scale and shift parameters"]
#[doc = ""]
#[doc = " If specified:"]
#[doc = " - on forward propagation use scale and shift (aka scale and bias) for"]
#[doc = " the batch normalization results"]
#[doc = " - on backward propagation (for prop_kind == #dnnl_backward) compute"]
#[doc = " diff wrt scale and shift (hence one extra output used)"]
#[doc = ""]
#[doc = " If no specified:"]
#[doc = " - on backward propagation prop_kind == #dnnl_backward_data has the"]
#[doc = " same behavior as prop_kind == #dnnl_backward"]
dnnl_use_scaleshift = 2,
#[doc = " Fuse with ReLU"]
#[doc = ""]
#[doc = " The flag implies negative slope being 0. On training this is the only"]
#[doc = " configuration supported. For inference, to use non-zero negative slope"]
#[doc = " consider using @ref dev_guide_attributes_post_ops."]
#[doc = ""]
#[doc = " If specified:"]
#[doc = " - on inference this option behaves the same as if the primitive were"]
#[doc = " fused with ReLU using post ops API with zero negative slope."]
#[doc = " - on training primitive requires workspace (required to be able to"]
#[doc = " perform backward pass)"]
dnnl_fuse_norm_relu = 4,
}
#[doc = " @cond DO_NOT_DOCUMENT_THIS"]
#[doc = " Hex representation for a **special** quiet NAN (!= NAN from math.h)"]
#[repr(C)]
#[derive(Copy, Clone)]
pub union _bindgen_ty_1 {
pub u: ::libc::c_uint,
pub f: f32,
_bindgen_union_align: u32,
}
#[test]
fn bindgen_test_layout__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<_bindgen_ty_1>(),
4usize,
concat!("Size of: ", stringify!(_bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<_bindgen_ty_1>(),
4usize,
concat!("Alignment of ", stringify!(_bindgen_ty_1))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bindgen_ty_1>())).u as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_bindgen_ty_1),
"::",
stringify!(u)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_bindgen_ty_1>())).f as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_bindgen_ty_1),
"::",
stringify!(f)
)
);
}
extern "C" {
#[link_name = "\u{1}_ZL24DNNL_RUNTIME_F32_VAL_REP"]
pub static DNNL_RUNTIME_F32_VAL_REP: _bindgen_ty_1;
}
pub const DNNL_RUNTIME_S32_VAL_REP: ::libc::c_int = -2147483648;
#[doc = " A type to describe tensor dimension."]
pub type dnnl_dim_t = i64;
#[doc = " A type to describe tensor dimensions."]
pub type dnnl_dims_t = [dnnl_dim_t; 12usize];
#[doc = " Generic description of blocked data layout for most memory formats."]
#[doc = ""]
#[doc = " @sa @ref dev_guide_understanding_memory_formats"]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_blocking_desc_t {
#[doc = " The strides between the outermost blocks."]
#[doc = " In case of plain (non-blocked) formats the strides between dimensions."]
pub strides: dnnl_dims_t,
#[doc = " The number of innermost blocks, e.g. 3 in case of `OIhw_4i16o4i_`"]
pub inner_nblks: ::libc::c_int,
#[doc = " The size of the blocks, e.g. `{4, 16, 4}` in case of `OIhw_4i16o4i`"]
pub inner_blks: dnnl_dims_t,
#[doc = " The logical indices of the blocks, e.g. `{1, 0, 1}` in case of"]
#[doc = " `4i16o4i`, because `i` is the 1st dim and `o` is the 0st dim"]
pub inner_idxs: dnnl_dims_t,
}
#[test]
fn bindgen_test_layout_dnnl_blocking_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_blocking_desc_t>(),
296usize,
concat!("Size of: ", stringify!(dnnl_blocking_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_blocking_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_blocking_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_blocking_desc_t>())).strides as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_blocking_desc_t),
"::",
stringify!(strides)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_blocking_desc_t>())).inner_nblks as *const _ as usize
},
96usize,
concat!(
"Offset of field: ",
stringify!(dnnl_blocking_desc_t),
"::",
stringify!(inner_nblks)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_blocking_desc_t>())).inner_blks as *const _ as usize },
104usize,
concat!(
"Offset of field: ",
stringify!(dnnl_blocking_desc_t),
"::",
stringify!(inner_blks)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_blocking_desc_t>())).inner_idxs as *const _ as usize },
200usize,
concat!(
"Offset of field: ",
stringify!(dnnl_blocking_desc_t),
"::",
stringify!(inner_idxs)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Winograd-specific formats"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_wino_memory_format_t {
#[doc = " Undefined memory format, used for empty memory descriptors."]
dnnl_wino_undef = 0,
#[doc = "< Internal weights format for 2x3 Winograd"]
dnnl_wino_wei_aaOIoi = 1,
#[doc = "< Internal weights format for 2x3 Winograd"]
dnnl_wino_wei_aaOio = 2,
#[doc = "< Internal weights format for 2x3 Winograd"]
dnnl_wino_wei_aaOBiOo = 3,
#[doc = "< Internal weights format for 4x3 Winograd"]
dnnl_wino_wei_OBaaIBOIio = 4,
}
#[doc = " Description of tensor of weights for winograd 2x3 convolution."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_wino_desc_t {
pub wino_format: dnnl_wino_memory_format_t,
pub r: ::libc::c_int,
pub alpha: ::libc::c_int,
pub ic: ::libc::c_int,
pub oc: ::libc::c_int,
pub ic_block: ::libc::c_int,
pub oc_block: ::libc::c_int,
pub ic2_block: ::libc::c_int,
pub oc2_block: ::libc::c_int,
pub adj_scale: f32,
pub size: usize,
}
#[test]
fn bindgen_test_layout_dnnl_wino_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_wino_desc_t>(),
48usize,
concat!("Size of: ", stringify!(dnnl_wino_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_wino_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_wino_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).wino_format as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(wino_format)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).r as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(r)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).alpha as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(alpha)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).ic as *const _ as usize },
12usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(ic)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).oc as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(oc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).ic_block as *const _ as usize },
20usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(ic_block)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).oc_block as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(oc_block)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).ic2_block as *const _ as usize },
28usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(ic2_block)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).oc2_block as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(oc2_block)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).adj_scale as *const _ as usize },
36usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(adj_scale)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_wino_desc_t>())).size as *const _ as usize },
40usize,
concat!(
"Offset of field: ",
stringify!(dnnl_wino_desc_t),
"::",
stringify!(size)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_rnn_packed_memory_format_t {
dnnl_packed_format_undef = 0,
dnnl_ldigo_p = 1,
dnnl_ldgoi_p = 2,
}
#[doc = " Description of tensor of packed weights for rnn."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_rnn_packed_desc_t {
pub format: dnnl_rnn_packed_memory_format_t,
pub n_parts: ::libc::c_int,
pub n: ::libc::c_int,
pub ldb: ::libc::c_int,
pub parts: [::libc::c_int; 4usize],
pub part_pack_size: [usize; 4usize],
pub pack_part: [::libc::c_uint; 4usize],
pub offset_compensation: usize,
pub size: usize,
pub reserved: [::libc::c_char; 200usize],
}
#[test]
fn bindgen_test_layout_dnnl_rnn_packed_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_rnn_packed_desc_t>(),
296usize,
concat!("Size of: ", stringify!(dnnl_rnn_packed_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_rnn_packed_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_rnn_packed_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).format as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(format)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).n_parts as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(n_parts)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).n as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(n)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).ldb as *const _ as usize },
12usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(ldb)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).parts as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(parts)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).part_pack_size as *const _ as usize
},
32usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(part_pack_size)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).pack_part as *const _ as usize
},
64usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(pack_part)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).offset_compensation as *const _
as usize
},
80usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(offset_compensation)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).size as *const _ as usize },
88usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(size)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_packed_desc_t>())).reserved as *const _ as usize },
96usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_packed_desc_t),
"::",
stringify!(reserved)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Flags for memory special features"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_memory_extra_flags_t {
dnnl_memory_extra_flag_none = 0,
#[doc = " Indicates the weights have an additional buffer, that depends on the"]
#[doc = " @p compensation_mask."]
#[doc = ""]
#[doc = " For instance, in 4D case with the compensation mask equals (1 << 0)"]
#[doc = " the additional buffer would consist of OC values:"]
#[doc = " O[oc : 0,OC] ="]
#[doc = " -128 * SUM(ic : 0,IC; kh : 0,KH; kw : 0,KW){ weights(oc, ic, kh, kw) }"]
dnnl_memory_extra_flag_compensation_conv_s8s8 = 1,
#[doc = " Indicates the weights have an additional buffer, that depends on the"]
#[doc = " @p compensation_mask."]
#[doc = ""]
#[doc = " For instance, in 4D case with the compensation mask equals (1 << 0)"]
#[doc = " the additional buffer would consist of OC values:"]
#[doc = " O[oc : 0,OC] ="]
#[doc = " -128 * SUM(ic : 0,IC; kh : 0,KH; kw : 0,KW){ weights(oc, ic, kh, kw) }"]
dnnl_memory_extra_flag_scale_adjust = 2,
#[doc = " Indicates the weights have an additional buffer, that depends on the"]
#[doc = " @p compensation_mask."]
#[doc = ""]
#[doc = " For instance, in 4D case with the compensation mask equals (1 << 0)"]
#[doc = " the additional buffer would consist of OC values:"]
#[doc = " O[oc : 0,OC] ="]
#[doc = " -128 * SUM(ic : 0,IC; kh : 0,KH; kw : 0,KW){ weights(oc, ic, kh, kw) }"]
dnnl_memory_extra_flag_gpu_rnn_u8s8_compensation = 4,
}
#[doc = " Description of extra information stored in memory"]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_memory_extra_desc_t {
#[doc = " The flags contain arbitrary extra information, such as compensation."]
#[doc = " @sa dnnl_memory_extra_flags_t"]
pub flags: u64,
#[doc = " Compensation mask"]
pub compensation_mask: ::libc::c_int,
#[doc = " Scale applied to the data"]
pub scale_adjust: f32,
#[doc = " For future backwards compatibility"]
pub reserved: [::libc::c_char; 64usize],
}
#[test]
fn bindgen_test_layout_dnnl_memory_extra_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_memory_extra_desc_t>(),
80usize,
concat!("Size of: ", stringify!(dnnl_memory_extra_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_memory_extra_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_memory_extra_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_extra_desc_t>())).flags as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_extra_desc_t),
"::",
stringify!(flags)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_extra_desc_t>())).compensation_mask as *const _
as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_extra_desc_t),
"::",
stringify!(compensation_mask)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_extra_desc_t>())).scale_adjust as *const _ as usize
},
12usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_extra_desc_t),
"::",
stringify!(scale_adjust)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_extra_desc_t>())).reserved as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_extra_desc_t),
"::",
stringify!(reserved)
)
);
}
#[doc = " Memory descriptor. The description is based on a number of dimensions,"]
#[doc = " dimensions themselves, plus information about elements type and memory"]
#[doc = " format. Additionally, contains format-specific descriptions of the data"]
#[doc = " layout."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_memory_desc_t {
#[doc = " Number of dimensions"]
pub ndims: ::libc::c_int,
#[doc = " Dimensions in the following order:"]
#[doc = " - CNN data tensors: mini-batch, channel, spatial"]
#[doc = " (<code>{N, C, [[D,] H,] W}</code>)"]
#[doc = " - CNN weight tensors: group (optional), output channel, input channel,"]
#[doc = " spatial (<code>{[G,] O, I, [[D,] H,] W}</code>)"]
#[doc = " - RNN data tensors: time, mini-batch, channels (<code>{T, N, C}</code>)"]
#[doc = " or layers, directions, states, mini-batch, channels (<code>{L, D, S, N, C}</code>)"]
#[doc = " - RNN weight tensor: layers, directions, input channel, gates, output channels"]
#[doc = " (<code>{L, D, I, G, O}</code>)."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The order of dimensions does not depend on the memory format, so"]
#[doc = " whether the data is laid out in #dnnl_nchw or #dnnl_nhwc"]
#[doc = " the dims for 4D CN data tensor would be <code>{N, C, H, W}</code>."]
pub dims: dnnl_dims_t,
#[doc = " Data type of the tensor elements."]
pub data_type: dnnl_data_type_t,
#[doc = " Size of the data including padding in each dimension."]
pub padded_dims: dnnl_dims_t,
#[doc = " Per-dimension offset from the padding to actual data, the top-level"]
#[doc = " tensor with offsets applied must lie within the padding area."]
pub padded_offsets: dnnl_dims_t,
#[doc = " Offset from memory origin to the current block, non-zero only in"]
#[doc = " a description of a memory sub-block."]
pub offset0: dnnl_dim_t,
#[doc = " Memory format kind."]
pub format_kind: dnnl_format_kind_t,
pub format_desc: dnnl_memory_desc_t__bindgen_ty_1,
pub extra: dnnl_memory_extra_desc_t,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union dnnl_memory_desc_t__bindgen_ty_1 {
#[doc = " Description of the data layout for memory formats that use"]
#[doc = " blocking."]
pub blocking: dnnl_blocking_desc_t,
#[doc = " Tensor of weights for integer 8bit winograd convolution."]
pub wino_desc: dnnl_wino_desc_t,
#[doc = " Tensor of packed weights for RNN."]
pub rnn_packed_desc: dnnl_rnn_packed_desc_t,
_bindgen_union_align: [u64; 37usize],
}
#[test]
fn bindgen_test_layout_dnnl_memory_desc_t__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<dnnl_memory_desc_t__bindgen_ty_1>(),
296usize,
concat!("Size of: ", stringify!(dnnl_memory_desc_t__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<dnnl_memory_desc_t__bindgen_ty_1>(),
8usize,
concat!(
"Alignment of ",
stringify!(dnnl_memory_desc_t__bindgen_ty_1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_desc_t__bindgen_ty_1>())).blocking as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t__bindgen_ty_1),
"::",
stringify!(blocking)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_desc_t__bindgen_ty_1>())).wino_desc as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t__bindgen_ty_1),
"::",
stringify!(wino_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_desc_t__bindgen_ty_1>())).rnn_packed_desc as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t__bindgen_ty_1),
"::",
stringify!(rnn_packed_desc)
)
);
}
#[test]
fn bindgen_test_layout_dnnl_memory_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_memory_desc_t>(),
696usize,
concat!("Size of: ", stringify!(dnnl_memory_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_memory_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_memory_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).ndims as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(ndims)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).dims as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(dims)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).data_type as *const _ as usize },
104usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(data_type)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).padded_dims as *const _ as usize },
112usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(padded_dims)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_memory_desc_t>())).padded_offsets as *const _ as usize
},
208usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(padded_offsets)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).offset0 as *const _ as usize },
304usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(offset0)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).format_kind as *const _ as usize },
312usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(format_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).format_desc as *const _ as usize },
320usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(format_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_memory_desc_t>())).extra as *const _ as usize },
616usize,
concat!(
"Offset of field: ",
stringify!(dnnl_memory_desc_t),
"::",
stringify!(extra)
)
);
}
#[doc = " @struct dnnl_memory"]
#[doc = " An opaque structure to describe a memory."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_memory {
_unused: [u8; 0],
}
#[doc = " A memory handle."]
pub type dnnl_memory_t = *mut dnnl_memory;
#[doc = " A constant memory handle."]
pub type const_dnnl_memory_t = *const dnnl_memory;
#[doc = " A pointer to any of the operation descriptors."]
pub type dnnl_op_desc_t = *mut ::libc::c_void;
#[doc = " A pointer to any of the operation descriptors (constant variant)."]
pub type const_dnnl_op_desc_t = *const ::libc::c_void;
#[doc = " A descriptor of a convolution operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_convolution_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_convolution."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward_data,"]
#[doc = " #dnnl_backward_weights, and #dnnl_backward_bias."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " The kind of the convolution algorithm. Possible values:"]
#[doc = " #dnnl_convolution_direct."]
pub alg_kind: dnnl_alg_kind_t,
#[doc = " Source memory descriptor."]
pub src_desc: dnnl_memory_desc_t,
#[doc = " Source gradient memory descriptor."]
pub diff_src_desc: dnnl_memory_desc_t,
#[doc = " Weights memory descriptor."]
pub weights_desc: dnnl_memory_desc_t,
#[doc = " Weights gradient memory descriptor."]
pub diff_weights_desc: dnnl_memory_desc_t,
#[doc = " Bias memory descriptor."]
pub bias_desc: dnnl_memory_desc_t,
#[doc = " Bias gradient memory descriptor."]
pub diff_bias_desc: dnnl_memory_desc_t,
#[doc = " Destination memory descriptor."]
pub dst_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient memory descriptor."]
pub diff_dst_desc: dnnl_memory_desc_t,
#[doc = " Convolution strides in each spatial dimension."]
pub strides: dnnl_dims_t,
#[doc = " Convolution dilates in each spatial dimension."]
pub dilates: dnnl_dims_t,
#[doc = " Padding in each spatial dimension. padding[0] is a padding in the"]
#[doc = " beginning (@p padding_l), padding[1] is a padding in the end (@p"]
#[doc = " padding_r)."]
pub padding: [dnnl_dims_t; 2usize],
#[doc = " The accumulator data type. Initialized automatically."]
pub accum_data_type: dnnl_data_type_t,
}
#[test]
fn bindgen_test_layout_dnnl_convolution_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_convolution_desc_t>(),
5976usize,
concat!("Size of: ", stringify!(dnnl_convolution_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_convolution_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_convolution_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).prop_kind as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).alg_kind as *const _ as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(alg_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).src_desc as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(src_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).diff_src_desc as *const _ as usize
},
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(diff_src_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).weights_desc as *const _ as usize
},
1408usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(weights_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).diff_weights_desc as *const _
as usize
},
2104usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(diff_weights_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).bias_desc as *const _ as usize
},
2800usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(bias_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).diff_bias_desc as *const _ as usize
},
3496usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(diff_bias_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).dst_desc as *const _ as usize
},
4192usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(dst_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).diff_dst_desc as *const _ as usize
},
4888usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(diff_dst_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_convolution_desc_t>())).strides as *const _ as usize },
5584usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(strides)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_convolution_desc_t>())).dilates as *const _ as usize },
5680usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(dilates)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_convolution_desc_t>())).padding as *const _ as usize },
5776usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(padding)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_convolution_desc_t>())).accum_data_type as *const _ as usize
},
5968usize,
concat!(
"Offset of field: ",
stringify!(dnnl_convolution_desc_t),
"::",
stringify!(accum_data_type)
)
);
}
#[doc = " A descriptor of a deconvolution operation."]
pub type dnnl_deconvolution_desc_t = dnnl_convolution_desc_t;
#[doc = " A descriptor of a shuffle operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_shuffle_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_shuffle."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, and #dnnl_backward_data."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " Source and destination memory descriptor,"]
#[doc = " and source and destination gradient memory descriptor."]
pub data_desc: dnnl_memory_desc_t,
#[doc = " Axis for shuffling."]
pub axis: ::libc::c_int,
#[doc = " Number of groups."]
pub group_size: dnnl_dim_t,
}
#[test]
fn bindgen_test_layout_dnnl_shuffle_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_shuffle_desc_t>(),
720usize,
concat!("Size of: ", stringify!(dnnl_shuffle_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_shuffle_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_shuffle_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_shuffle_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_shuffle_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_shuffle_desc_t>())).prop_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_shuffle_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_shuffle_desc_t>())).data_desc as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_shuffle_desc_t),
"::",
stringify!(data_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_shuffle_desc_t>())).axis as *const _ as usize },
704usize,
concat!(
"Offset of field: ",
stringify!(dnnl_shuffle_desc_t),
"::",
stringify!(axis)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_shuffle_desc_t>())).group_size as *const _ as usize },
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_shuffle_desc_t),
"::",
stringify!(group_size)
)
);
}
#[doc = " A descriptor of a element-wise operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_eltwise_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_eltwise."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward, and #dnnl_backward_data."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " The kind of eltwise algorithm. Possible values: #dnnl_eltwise_relu,"]
#[doc = " #dnnl_eltwise_tanh, #dnnl_eltwise_elu, #dnnl_eltwise_square,"]
#[doc = " #dnnl_eltwise_abs, #dnnl_eltwise_sqrt, #dnnl_eltwise_linear,"]
#[doc = " #dnnl_eltwise_bounded_relu, #dnnl_eltwise_soft_relu,"]
#[doc = " #dnnl_eltwise_logistic, #dnnl_eltwise_exp, #dnnl_eltwise_gelu_tanh,"]
#[doc = " #dnnl_eltwise_swish, #dnnl_eltwise_log, #dnnl_eltwise_clip,"]
#[doc = " #dnnl_eltwise_pow, #dnnl_eltwise_gelu_erf, #dnnl_eltwise_round."]
#[doc = " Possible values for passing destination memory on backward:"]
#[doc = " #dnnl_eltwise_relu_use_dst_for_bwd, #dnnl_eltwise_tanh_use_dst_for_bwd,"]
#[doc = " #dnnl_eltwise_elu_use_dst_for_bwd, #dnnl_eltwise_sqrt_use_dst_for_bwd,"]
#[doc = " #dnnl_eltwise_logistic_use_dst_for_bwd,"]
#[doc = " #dnnl_eltwise_exp_use_dst_for_bwd."]
pub alg_kind: dnnl_alg_kind_t,
#[doc = " Source and destination memory descriptor."]
pub data_desc: dnnl_memory_desc_t,
#[doc = " Source and destination gradient memory descriptor."]
pub diff_data_desc: dnnl_memory_desc_t,
#[doc = " Algorithm specific parameter."]
#[doc = " Accordance table:"]
#[doc = " - #dnnl_eltwise_relu: @p alpha -- negative slope, @p beta ignored"]
#[doc = " - #dnnl_eltwise_tanh: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_elu: @p alpha -- negative slope, @p beta ignored"]
#[doc = " - #dnnl_eltwise_square: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_abs: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_sqrt: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_linear: @p alpha -- scale, @p beta -- shift"]
#[doc = " - #dnnl_eltwise_bounded_relu: @p alpha -- upper bound, @p beta ignored"]
#[doc = " - #dnnl_eltwise_soft_relu: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_logistic: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_exp: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_gelu_tanh: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_swish: @p alpha -- sigmoid arg scaling, @p beta ignored"]
#[doc = " - #dnnl_eltwise_log: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_clip: @p alpha -- lower bound, @p beta -- upper bound"]
#[doc = " - #dnnl_eltwise_pow: @p alpha -- scale, @p beta -- exponent"]
#[doc = " - #dnnl_eltwise_gelu_erf: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_round: @p alpha and @p beta ignored"]
pub alpha: f32,
#[doc = " Algorithm specific parameter."]
#[doc = " Accordance table:"]
#[doc = " - #dnnl_eltwise_relu: @p alpha -- negative slope, @p beta ignored"]
#[doc = " - #dnnl_eltwise_tanh: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_elu: @p alpha -- negative slope, @p beta ignored"]
#[doc = " - #dnnl_eltwise_square: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_abs: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_sqrt: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_linear: @p alpha -- scale, @p beta -- shift"]
#[doc = " - #dnnl_eltwise_bounded_relu: @p alpha -- upper bound, @p beta ignored"]
#[doc = " - #dnnl_eltwise_soft_relu: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_logistic: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_exp: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_gelu_tanh: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_swish: @p alpha -- sigmoid arg scaling, @p beta ignored"]
#[doc = " - #dnnl_eltwise_log: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_clip: @p alpha -- lower bound, @p beta -- upper bound"]
#[doc = " - #dnnl_eltwise_pow: @p alpha -- scale, @p beta -- exponent"]
#[doc = " - #dnnl_eltwise_gelu_erf: @p alpha and @p beta ignored"]
#[doc = " - #dnnl_eltwise_round: @p alpha and @p beta ignored"]
pub beta: f32,
}
#[test]
fn bindgen_test_layout_dnnl_eltwise_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_eltwise_desc_t>(),
1416usize,
concat!("Size of: ", stringify!(dnnl_eltwise_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_eltwise_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_eltwise_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).prop_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).alg_kind as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(alg_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).data_desc as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(data_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).diff_data_desc as *const _ as usize
},
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(diff_data_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).alpha as *const _ as usize },
1408usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(alpha)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_eltwise_desc_t>())).beta as *const _ as usize },
1412usize,
concat!(
"Offset of field: ",
stringify!(dnnl_eltwise_desc_t),
"::",
stringify!(beta)
)
);
}
#[doc = " A descriptor of a Softmax operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_softmax_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_softmax."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training and"]
#[doc = " #dnnl_forward_inference."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " Source and destination memory descriptor."]
pub data_desc: dnnl_memory_desc_t,
#[doc = " Source and Destination of gradient memory descriptor."]
pub diff_desc: dnnl_memory_desc_t,
#[doc = " The axis along which to perform the softmax."]
pub softmax_axis: ::libc::c_int,
}
#[test]
fn bindgen_test_layout_dnnl_softmax_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_softmax_desc_t>(),
1408usize,
concat!("Size of: ", stringify!(dnnl_softmax_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_softmax_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_softmax_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_softmax_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_softmax_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_softmax_desc_t>())).prop_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_softmax_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_softmax_desc_t>())).data_desc as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_softmax_desc_t),
"::",
stringify!(data_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_softmax_desc_t>())).diff_desc as *const _ as usize },
704usize,
concat!(
"Offset of field: ",
stringify!(dnnl_softmax_desc_t),
"::",
stringify!(diff_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_softmax_desc_t>())).softmax_axis as *const _ as usize
},
1400usize,
concat!(
"Offset of field: ",
stringify!(dnnl_softmax_desc_t),
"::",
stringify!(softmax_axis)
)
);
}
#[doc = " A descriptor of a LogSoftmax operation. An alias of Softmax structure, but"]
#[doc = " primitive_kind must be #dnnl_logsoftmax."]
pub type dnnl_logsoftmax_desc_t = dnnl_softmax_desc_t;
#[doc = " A descriptor of a pooling operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_pooling_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_pooling."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward, and #dnnl_backward_data."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " The kind of pooling algorithm."]
#[doc = " Possible values: #dnnl_pooling_max,"]
#[doc = " #dnnl_pooling_avg_include_padding, and"]
#[doc = " #dnnl_pooling_avg_exclude_padding."]
pub alg_kind: dnnl_alg_kind_t,
#[doc = " Source memory descriptor."]
pub src_desc: dnnl_memory_desc_t,
#[doc = " Source gradient memory descriptor."]
pub diff_src_desc: dnnl_memory_desc_t,
#[doc = " Destination memory descriptor."]
pub dst_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient memory descriptor."]
pub diff_dst_desc: dnnl_memory_desc_t,
#[doc = " Pooling kernel strides for spatial dimensions."]
pub strides: dnnl_dims_t,
#[doc = " Pooling kernel spatial dimensions."]
pub kernel: dnnl_dims_t,
#[doc = " Padding in each spatial dimension. padding[0] is a padding in the"]
#[doc = " beginning (@p padding_l), padding[1] is a padding in the end (@p"]
#[doc = " padding_r)."]
pub padding: [dnnl_dims_t; 2usize],
#[doc = " The accumulator data type. Initialized automatically."]
pub accum_data_type: dnnl_data_type_t,
}
#[test]
fn bindgen_test_layout_dnnl_pooling_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_pooling_desc_t>(),
3192usize,
concat!("Size of: ", stringify!(dnnl_pooling_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_pooling_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_pooling_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_pooling_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).prop_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).alg_kind as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(alg_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).src_desc as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(src_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_pooling_desc_t>())).diff_src_desc as *const _ as usize
},
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(diff_src_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).dst_desc as *const _ as usize },
1408usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(dst_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_pooling_desc_t>())).diff_dst_desc as *const _ as usize
},
2104usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(diff_dst_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).strides as *const _ as usize },
2800usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(strides)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).kernel as *const _ as usize },
2896usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(kernel)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_pooling_desc_t>())).padding as *const _ as usize },
2992usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(padding)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_pooling_desc_t>())).accum_data_type as *const _ as usize
},
3184usize,
concat!(
"Offset of field: ",
stringify!(dnnl_pooling_desc_t),
"::",
stringify!(accum_data_type)
)
);
}
#[doc = " A descriptor of a Local Response Normalization (LRN) operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_lrn_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_lrn."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward, and #dnnl_backward_data."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " LRN algorithm. Possible values: #dnnl_lrn_within_channel and"]
#[doc = " #dnnl_lrn_across_channels."]
pub alg_kind: dnnl_alg_kind_t,
#[doc = " Source and destination memory descriptor."]
pub data_desc: dnnl_memory_desc_t,
#[doc = " Source and destination gradient memory descriptor."]
pub diff_data_desc: dnnl_memory_desc_t,
#[doc = " The number of channels to sum over (for cross-channel LRN) or the side"]
#[doc = " length of the square region to sum over (for within-channel LRN)."]
pub local_size: dnnl_dim_t,
#[doc = " LRN alpha parameter."]
pub lrn_alpha: f32,
#[doc = " LRN beta parameter."]
pub lrn_beta: f32,
#[doc = " LRN k parameter."]
pub lrn_k: f32,
}
#[test]
fn bindgen_test_layout_dnnl_lrn_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_lrn_desc_t>(),
1432usize,
concat!("Size of: ", stringify!(dnnl_lrn_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_lrn_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_lrn_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).primitive_kind as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).prop_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).alg_kind as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(alg_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).data_desc as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(data_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).diff_data_desc as *const _ as usize },
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(diff_data_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).local_size as *const _ as usize },
1408usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(local_size)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).lrn_alpha as *const _ as usize },
1416usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(lrn_alpha)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).lrn_beta as *const _ as usize },
1420usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(lrn_beta)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_lrn_desc_t>())).lrn_k as *const _ as usize },
1424usize,
concat!(
"Offset of field: ",
stringify!(dnnl_lrn_desc_t),
"::",
stringify!(lrn_k)
)
);
}
#[doc = " A descriptor of a Batch Normalization operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_batch_normalization_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_batch_normalization."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward, and #dnnl_backward_data."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " Source and destination memory descriptor."]
pub data_desc: dnnl_memory_desc_t,
#[doc = " Source and destination gradient memory descriptor."]
pub diff_data_desc: dnnl_memory_desc_t,
#[doc = " Scale and shift data and gradient memory descriptors."]
#[doc = ""]
#[doc = " Scaleshift memory descriptor uses 2D #dnnl_nc format[2,Channels]. 1-st"]
#[doc = " dimension contains gamma parameter, 2-nd dimension contains beta"]
#[doc = " parameter."]
pub data_scaleshift_desc: dnnl_memory_desc_t,
pub diff_data_scaleshift_desc: dnnl_memory_desc_t,
#[doc = " Statistics memory descriptor."]
#[doc = ""]
#[doc = " Statistics (mean or variance) descriptor use 1D #dnnl_x format[Channels]."]
pub stat_desc: dnnl_memory_desc_t,
#[doc = " Batch normalization epsilon parameter."]
pub batch_norm_epsilon: f32,
pub flags: ::libc::c_uint,
}
#[test]
fn bindgen_test_layout_dnnl_batch_normalization_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_batch_normalization_desc_t>(),
3496usize,
concat!("Size of: ", stringify!(dnnl_batch_normalization_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_batch_normalization_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_batch_normalization_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).primitive_kind as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).prop_kind as *const _
as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).data_desc as *const _
as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(data_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).diff_data_desc as *const _
as usize
},
704usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(diff_data_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).data_scaleshift_desc
as *const _ as usize
},
1400usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(data_scaleshift_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).diff_data_scaleshift_desc
as *const _ as usize
},
2096usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(diff_data_scaleshift_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).stat_desc as *const _
as usize
},
2792usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(stat_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).batch_norm_epsilon
as *const _ as usize
},
3488usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(batch_norm_epsilon)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_batch_normalization_desc_t>())).flags as *const _ as usize
},
3492usize,
concat!(
"Offset of field: ",
stringify!(dnnl_batch_normalization_desc_t),
"::",
stringify!(flags)
)
);
}
#[doc = " A descriptor of a Layer Normalization operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_layer_normalization_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_layer_normalization."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward, and #dnnl_backward_data."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " Source and destination memory descriptor."]
pub data_desc: dnnl_memory_desc_t,
#[doc = " Source and destination gradient memory descriptor."]
pub diff_data_desc: dnnl_memory_desc_t,
#[doc = " Scale and shift data and gradient memory descriptors."]
#[doc = ""]
#[doc = " Scaleshift memory descriptor uses 2D #dnnl_ab"]
#[doc = " format[2, normalized_dim] where 1-st dimension contains gamma parameter,"]
#[doc = " 2-nd dimension contains beta parameter. Normalized_dim is equal to the"]
#[doc = " last logical dimension of the data tensor across which normalization is"]
#[doc = " performed."]
pub data_scaleshift_desc: dnnl_memory_desc_t,
pub diff_data_scaleshift_desc: dnnl_memory_desc_t,
#[doc = " Mean and variance data memory descriptors."]
#[doc = ""]
#[doc = " Statistics (mean and variance) memory descriptor is the k-dimensional tensor"]
#[doc = " where k is equal to data_tensor_ndims - 1 and may have any plain"]
#[doc = " (stride[last_dim] == 1) user-provided format."]
pub stat_desc: dnnl_memory_desc_t,
#[doc = " Layer normalization epsilon parameter."]
pub layer_norm_epsilon: f32,
pub flags: ::libc::c_uint,
}
#[test]
fn bindgen_test_layout_dnnl_layer_normalization_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_layer_normalization_desc_t>(),
3496usize,
concat!("Size of: ", stringify!(dnnl_layer_normalization_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_layer_normalization_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_layer_normalization_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).primitive_kind as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).prop_kind as *const _
as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).data_desc as *const _
as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(data_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).diff_data_desc as *const _
as usize
},
704usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(diff_data_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).data_scaleshift_desc
as *const _ as usize
},
1400usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(data_scaleshift_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).diff_data_scaleshift_desc
as *const _ as usize
},
2096usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(diff_data_scaleshift_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).stat_desc as *const _
as usize
},
2792usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(stat_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).layer_norm_epsilon
as *const _ as usize
},
3488usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(layer_norm_epsilon)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_layer_normalization_desc_t>())).flags as *const _ as usize
},
3492usize,
concat!(
"Offset of field: ",
stringify!(dnnl_layer_normalization_desc_t),
"::",
stringify!(flags)
)
);
}
#[doc = " A descriptor of an inner product operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_inner_product_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_inner_product."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward_data,"]
#[doc = " #dnnl_backward_weights, and #dnnl_backward_bias."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " Source memory descriptor."]
pub src_desc: dnnl_memory_desc_t,
#[doc = " Source gradient memory descriptor."]
pub diff_src_desc: dnnl_memory_desc_t,
#[doc = " Weights memory descriptor."]
pub weights_desc: dnnl_memory_desc_t,
#[doc = " Weights gradient memory descriptor."]
pub diff_weights_desc: dnnl_memory_desc_t,
#[doc = " Bias memory descriptor."]
pub bias_desc: dnnl_memory_desc_t,
#[doc = " Bias gradient memory descriptor."]
pub diff_bias_desc: dnnl_memory_desc_t,
#[doc = " Destination memory descriptor."]
pub dst_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient memory descriptor."]
pub diff_dst_desc: dnnl_memory_desc_t,
#[doc = " The accumulator data type. Initialized automatically."]
pub accum_data_type: dnnl_data_type_t,
}
#[test]
fn bindgen_test_layout_dnnl_inner_product_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_inner_product_desc_t>(),
5584usize,
concat!("Size of: ", stringify!(dnnl_inner_product_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_inner_product_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_inner_product_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).primitive_kind as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).prop_kind as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).src_desc as *const _ as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(src_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).diff_src_desc as *const _ as usize
},
704usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(diff_src_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).weights_desc as *const _ as usize
},
1400usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(weights_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).diff_weights_desc as *const _
as usize
},
2096usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(diff_weights_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).bias_desc as *const _ as usize
},
2792usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(bias_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).diff_bias_desc as *const _
as usize
},
3488usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(diff_bias_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).dst_desc as *const _ as usize
},
4184usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(dst_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).diff_dst_desc as *const _ as usize
},
4880usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(diff_dst_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_inner_product_desc_t>())).accum_data_type as *const _
as usize
},
5576usize,
concat!(
"Offset of field: ",
stringify!(dnnl_inner_product_desc_t),
"::",
stringify!(accum_data_type)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Flags for RNN cell."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_rnn_flags_t {
#[doc = " Undefined RNN flags"]
dnnl_rnn_flags_undef = 0,
}
impl dnnl_rnn_direction_t {
pub const dnnl_unidirectional: dnnl_rnn_direction_t =
dnnl_rnn_direction_t::dnnl_unidirectional_left2right;
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " A direction of RNN primitive execution."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_rnn_direction_t {
#[doc = " Unidirectional execution of RNN primitive from left to right."]
dnnl_unidirectional_left2right = 0,
#[doc = " Unidirectional execution of RNN primitive from right to left."]
dnnl_unidirectional_right2left = 1,
#[doc = " Bidirectional execution of RNN primitive with concatenation of the"]
#[doc = " results."]
dnnl_bidirectional_concat = 2,
#[doc = " Bidirectional execution of RNN primitive with summation of the"]
#[doc = " results."]
dnnl_bidirectional_sum = 3,
}
#[doc = " A descriptor for an RNN operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_rnn_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_rnn."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, and #dnnl_backward."]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " RNN cell kind. Must be one of #dnnl_vanilla_rnn,"]
#[doc = " #dnnl_vanilla_lstm, #dnnl_vanilla_gru, or #dnnl_lbr_gru."]
pub cell_kind: dnnl_alg_kind_t,
#[doc = " The direction of RNN primitive execution."]
pub direction: dnnl_rnn_direction_t,
#[doc = " Source layer memory descriptor."]
pub src_layer_desc: dnnl_memory_desc_t,
#[doc = " Source iteration memory descriptor for hidden state."]
pub src_iter_desc: dnnl_memory_desc_t,
#[doc = " Source iteration memory descriptor for cell state."]
pub src_iter_c_desc: dnnl_memory_desc_t,
#[doc = " Weights layer memory descriptor."]
pub weights_layer_desc: dnnl_memory_desc_t,
#[doc = " Weights iteration memory descriptor."]
pub weights_iter_desc: dnnl_memory_desc_t,
#[doc = " Bias memory descriptor."]
pub bias_desc: dnnl_memory_desc_t,
#[doc = " Destination layer memory descriptor."]
pub dst_layer_desc: dnnl_memory_desc_t,
#[doc = " Destination iter memory descriptor for hidden state."]
pub dst_iter_desc: dnnl_memory_desc_t,
#[doc = " Destination iter memory descriptor for cell state."]
pub dst_iter_c_desc: dnnl_memory_desc_t,
#[doc = " Weights peephole memory descriptor."]
#[doc = " This memory descriptor is equal to zero memory descriptor in case of"]
#[doc = " non-peephole LSTMs and other non-LSTM RNNs."]
pub weights_peephole_desc: dnnl_memory_desc_t,
#[doc = " Weights projection memory descriptor."]
#[doc = " This memory descriptor is equal to zero memory descriptor in case of"]
#[doc = " non-projection LSTMs and other non-LSTM RNNs."]
pub weights_projection_desc: dnnl_memory_desc_t,
#[doc = " Source gradient layer memory descriptor."]
pub diff_src_layer_desc: dnnl_memory_desc_t,
#[doc = " Source gradient iter memory descriptor for hidden state."]
pub diff_src_iter_desc: dnnl_memory_desc_t,
#[doc = " Source gradient iter memory descriptor for cell state."]
pub diff_src_iter_c_desc: dnnl_memory_desc_t,
#[doc = " Weights gradient layer memory descriptor."]
pub diff_weights_layer_desc: dnnl_memory_desc_t,
#[doc = " Weights gradient iter memory descriptor."]
pub diff_weights_iter_desc: dnnl_memory_desc_t,
#[doc = " Bias gradient memory descriptor."]
pub diff_bias_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient layer memory descriptor."]
pub diff_dst_layer_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient iteration memory descriptor for hidden state."]
pub diff_dst_iter_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient iteration memory descriptor for cell state."]
pub diff_dst_iter_c_desc: dnnl_memory_desc_t,
#[doc = " Weights gradient peephole memory descriptor."]
#[doc = " This memory descriptor is equal to zero memory descriptor in case of"]
#[doc = " non-peephole LSTMs and other non-LSTM RNNs."]
pub diff_weights_peephole_desc: dnnl_memory_desc_t,
#[doc = " Weights gradient projection memory descriptor."]
#[doc = " This memory descriptor is equal to zero memory descriptor in case of"]
#[doc = " non-projection LSTMs and other non-LSTM RNNs."]
pub diff_weights_projection_desc: dnnl_memory_desc_t,
#[doc = " RNN cell flags"]
pub flags: ::libc::c_uint,
#[doc = " Activation function used for vanilla_rnn cell kind."]
#[doc = " Must be either #dnnl_eltwise_relu or #dnnl_eltwise_tanh."]
pub activation_kind: dnnl_alg_kind_t,
pub alpha: f32,
pub beta: f32,
}
#[test]
fn bindgen_test_layout_dnnl_rnn_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_rnn_desc_t>(),
15344usize,
concat!("Size of: ", stringify!(dnnl_rnn_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_rnn_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_rnn_desc_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).primitive_kind as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).prop_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).cell_kind as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(cell_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).direction as *const _ as usize },
12usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(direction)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).src_layer_desc as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(src_layer_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).src_iter_desc as *const _ as usize },
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(src_iter_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).src_iter_c_desc as *const _ as usize },
1408usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(src_iter_c_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).weights_layer_desc as *const _ as usize
},
2104usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(weights_layer_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).weights_iter_desc as *const _ as usize
},
2800usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(weights_iter_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).bias_desc as *const _ as usize },
3496usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(bias_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).dst_layer_desc as *const _ as usize },
4192usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(dst_layer_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).dst_iter_desc as *const _ as usize },
4888usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(dst_iter_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).dst_iter_c_desc as *const _ as usize },
5584usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(dst_iter_c_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).weights_peephole_desc as *const _ as usize
},
6280usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(weights_peephole_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).weights_projection_desc as *const _ as usize
},
6976usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(weights_projection_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_src_layer_desc as *const _ as usize
},
7672usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_src_layer_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_src_iter_desc as *const _ as usize
},
8368usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_src_iter_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_src_iter_c_desc as *const _ as usize
},
9064usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_src_iter_c_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_weights_layer_desc as *const _ as usize
},
9760usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_weights_layer_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_weights_iter_desc as *const _ as usize
},
10456usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_weights_iter_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_bias_desc as *const _ as usize },
11152usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_bias_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_dst_layer_desc as *const _ as usize
},
11848usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_dst_layer_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_dst_iter_desc as *const _ as usize
},
12544usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_dst_iter_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_dst_iter_c_desc as *const _ as usize
},
13240usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_dst_iter_c_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_weights_peephole_desc as *const _
as usize
},
13936usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_weights_peephole_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_rnn_desc_t>())).diff_weights_projection_desc as *const _
as usize
},
14632usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(diff_weights_projection_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).flags as *const _ as usize },
15328usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(flags)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).activation_kind as *const _ as usize },
15332usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(activation_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).alpha as *const _ as usize },
15336usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(alpha)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_rnn_desc_t>())).beta as *const _ as usize },
15340usize,
concat!(
"Offset of field: ",
stringify!(dnnl_rnn_desc_t),
"::",
stringify!(beta)
)
);
}
#[doc = " A descriptor of a binary operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_binary_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_binary."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of the binary algorithm. Possible values:"]
#[doc = " #dnnl_binary_add, #dnnl_binary_mul, #dnnl_binary_max and"]
#[doc = " #dnnl_binary_min."]
pub alg_kind: dnnl_alg_kind_t,
#[doc = " Source memory descriptors."]
pub src_desc: [dnnl_memory_desc_t; 2usize],
#[doc = " Destination memory descriptor."]
pub dst_desc: dnnl_memory_desc_t,
}
#[test]
fn bindgen_test_layout_dnnl_binary_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_binary_desc_t>(),
2096usize,
concat!("Size of: ", stringify!(dnnl_binary_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_binary_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_binary_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_binary_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_binary_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_binary_desc_t>())).alg_kind as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_binary_desc_t),
"::",
stringify!(alg_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_binary_desc_t>())).src_desc as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_binary_desc_t),
"::",
stringify!(src_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_binary_desc_t>())).dst_desc as *const _ as usize },
1400usize,
concat!(
"Offset of field: ",
stringify!(dnnl_binary_desc_t),
"::",
stringify!(dst_desc)
)
);
}
#[doc = " A descriptor of a matrix multiplication operation."]
#[doc = ""]
#[doc = " 2D case:"]
#[doc = " dst[m, n] = src[m, k] * weights[k, n] + bias[m, n]"]
#[doc = ""]
#[doc = " 3D case:"]
#[doc = " dst[mb, m, n] = src[mb, m, k] * weights[mb, k, n] + bias[mb, m, n]"]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_matmul_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_matmul."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " Source memory descriptor."]
pub src_desc: dnnl_memory_desc_t,
#[doc = " Weights memory descriptor."]
pub weights_desc: dnnl_memory_desc_t,
#[doc = " Bias memory descriptor."]
pub bias_desc: dnnl_memory_desc_t,
#[doc = " Destination memory descriptor."]
pub dst_desc: dnnl_memory_desc_t,
#[doc = " The accumulator data type. Initialized automatically."]
pub accum_data_type: dnnl_data_type_t,
}
#[test]
fn bindgen_test_layout_dnnl_matmul_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_matmul_desc_t>(),
2800usize,
concat!("Size of: ", stringify!(dnnl_matmul_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_matmul_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_matmul_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_matmul_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_matmul_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_matmul_desc_t>())).src_desc as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_matmul_desc_t),
"::",
stringify!(src_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_matmul_desc_t>())).weights_desc as *const _ as usize },
704usize,
concat!(
"Offset of field: ",
stringify!(dnnl_matmul_desc_t),
"::",
stringify!(weights_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_matmul_desc_t>())).bias_desc as *const _ as usize },
1400usize,
concat!(
"Offset of field: ",
stringify!(dnnl_matmul_desc_t),
"::",
stringify!(bias_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_matmul_desc_t>())).dst_desc as *const _ as usize },
2096usize,
concat!(
"Offset of field: ",
stringify!(dnnl_matmul_desc_t),
"::",
stringify!(dst_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_matmul_desc_t>())).accum_data_type as *const _ as usize
},
2792usize,
concat!(
"Offset of field: ",
stringify!(dnnl_matmul_desc_t),
"::",
stringify!(accum_data_type)
)
);
}
#[doc = " A descriptor of resampling operation."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct dnnl_resampling_desc_t {
#[doc = " The kind of primitive. Used for self-identifying the primitive"]
#[doc = " descriptor. Must be #dnnl_resampling."]
pub primitive_kind: dnnl_primitive_kind_t,
#[doc = " The kind of propagation. Possible values: #dnnl_forward_training,"]
#[doc = " #dnnl_forward_inference, #dnnl_backward_data,"]
pub prop_kind: dnnl_prop_kind_t,
#[doc = " The kind of the resampling algorithm. Possible values:"]
#[doc = " #dnnl_resampling_nearest, #dnnl_resampling_linear."]
pub alg_kind: dnnl_alg_kind_t,
#[doc = " Source memory descriptor."]
pub src_desc: dnnl_memory_desc_t,
#[doc = " Source gradient memory descriptor."]
pub diff_src_desc: dnnl_memory_desc_t,
#[doc = " Destination memory descriptor."]
pub dst_desc: dnnl_memory_desc_t,
#[doc = " Destination gradient memory descriptor."]
pub diff_dst_desc: dnnl_memory_desc_t,
#[doc = " Resampling factor in each spatial dimension."]
pub factors: [f32; 12usize],
}
#[test]
fn bindgen_test_layout_dnnl_resampling_desc_t() {
assert_eq!(
::std::mem::size_of::<dnnl_resampling_desc_t>(),
2848usize,
concat!("Size of: ", stringify!(dnnl_resampling_desc_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_resampling_desc_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_resampling_desc_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_resampling_desc_t>())).primitive_kind as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(primitive_kind)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_resampling_desc_t>())).prop_kind as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(prop_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_resampling_desc_t>())).alg_kind as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(alg_kind)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_resampling_desc_t>())).src_desc as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(src_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_resampling_desc_t>())).diff_src_desc as *const _ as usize
},
712usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(diff_src_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_resampling_desc_t>())).dst_desc as *const _ as usize },
1408usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(dst_desc)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<dnnl_resampling_desc_t>())).diff_dst_desc as *const _ as usize
},
2104usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(diff_dst_desc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_resampling_desc_t>())).factors as *const _ as usize },
2800usize,
concat!(
"Offset of field: ",
stringify!(dnnl_resampling_desc_t),
"::",
stringify!(factors)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " @brief Kinds of engines."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_engine_kind_t {
#[doc = " An unspecified engine."]
dnnl_any_engine = 0,
#[doc = " CPU engine."]
dnnl_cpu = 1,
#[doc = " GPU engine."]
dnnl_gpu = 2,
}
#[doc = " @struct dnnl_engine"]
#[doc = " @brief An opaque structure to describe an engine."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_engine {
_unused: [u8; 0],
}
#[doc = " @brief An engine handle."]
pub type dnnl_engine_t = *mut dnnl_engine;
#[doc = " @struct dnnl_primitive_desc_iterator"]
#[doc = " @brief An opaque structure to describe a primitive descriptor iterator."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_primitive_desc_iterator {
_unused: [u8; 0],
}
#[doc = " @brief A primitive descriptor iterator handle."]
pub type dnnl_primitive_desc_iterator_t = *mut dnnl_primitive_desc_iterator;
#[doc = " @brief A constant primitive descriptor iterator handle."]
pub type const_dnnl_primitive_desc_iterator_t = *const dnnl_primitive_desc_iterator;
#[doc = " @struct dnnl_primitive_desc"]
#[doc = " @brief An opaque structure to describe a primitive descriptor."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_primitive_desc {
_unused: [u8; 0],
}
#[doc = " @brief A primitive descriptor handle."]
pub type dnnl_primitive_desc_t = *mut dnnl_primitive_desc;
#[doc = " @brief A constant primitive descriptor handle."]
pub type const_dnnl_primitive_desc_t = *const dnnl_primitive_desc;
#[repr(u32)]
#[non_exhaustive]
#[doc = " Scratchpad mode"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_scratchpad_mode_t {
#[doc = " The library manages the scratchpad allocation according to the policy"]
#[doc = " specified by the `DNNL_ENABLE_CONCURRENT_EXEC`"]
#[doc = " [build option](@ref dev_guide_build_options) (default)."]
#[doc = ""]
#[doc = " When `DNNL_ENABLE_CONCURRENT_EXEC=OFF` (default), the library"]
#[doc = " scratchpad is common to all primitives to reduce the memory footprint."]
#[doc = " This configuration comes with limited thread-safety properties, namely"]
#[doc = " primitives can be created and executed in parallel but cannot migrate"]
#[doc = " between threads (in other words, each primitive should be executed in"]
#[doc = " the same thread it was created in)."]
#[doc = ""]
#[doc = " When `DNNL_ENABLE_CONCURRENT_EXEC=ON`, the library scratchpad is"]
#[doc = " private to each primitive. The memory footprint is larger than when"]
#[doc = " using `DNNL_ENABLE_CONCURRENT_EXEC=OFF` but different primitives can be"]
#[doc = " created and run concurrently (the same primitive cannot be run"]
#[doc = " concurrently from two different threads though)."]
dnnl_scratchpad_mode_library = 0,
#[doc = " The user manages the scratchpad allocation by querying and providing"]
#[doc = " the scratchpad memory to primitives. This mode is thread-safe as long"]
#[doc = " as the scratchpad buffers are not used concurrently by two primitive"]
#[doc = " executions."]
dnnl_scratchpad_mode_user = 1,
}
#[doc = " @struct dnnl_primitive_attr"]
#[doc = " @brief An opaque structure for primitive descriptor attributes."]
#[doc = ""]
#[doc = " Attributes may contain:"]
#[doc = " - output scales (to scale the result prior to storing it to the memory)"]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_primitive_attr {
_unused: [u8; 0],
}
#[doc = " @brief A primitive descriptor attributes handle that controls primitive"]
#[doc = " behavior."]
pub type dnnl_primitive_attr_t = *mut dnnl_primitive_attr;
#[doc = " @brief A constant primitive descriptor attributes handle."]
pub type const_dnnl_primitive_attr_t = *const dnnl_primitive_attr;
#[doc = " @struct dnnl_post_ops"]
#[doc = " @brief An opaque structure for a chain of post operations."]
#[doc = ""]
#[doc = " dnnl_post_ops can be used to perform some (trivial) operations like"]
#[doc = " accumulation or eltwise after certain primitives like convolution."]
#[doc = ""]
#[doc = " Post operations might be combined together, making a chain of post"]
#[doc = " operations. For instance one can configure convolution followed by"]
#[doc = " accumulation followed by eltwise. This might be especially beneficial"]
#[doc = " for residual learning blocks."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " Of course not all combinations are supported, so the user should handle"]
#[doc = " errors accordingly."]
#[doc = ""]
#[doc = " Supported post operations:"]
#[doc = " - accumulation (base primitive: convolution)"]
#[doc = " - eltwise (base primitive: convolution)"]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_post_ops {
_unused: [u8; 0],
}
#[doc = " @brief A post operation chain handle."]
pub type dnnl_post_ops_t = *mut dnnl_post_ops;
#[doc = " @brief A constant post operation chain handle."]
pub type const_dnnl_post_ops_t = *const dnnl_post_ops;
#[doc = " @struct dnnl_primitive"]
#[doc = " An opaque structure to describe a primitive."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_primitive {
_unused: [u8; 0],
}
#[doc = " A primitive handle."]
pub type dnnl_primitive_t = *mut dnnl_primitive;
#[doc = " A constant primitive handle."]
pub type const_dnnl_primitive_t = *const dnnl_primitive;
#[doc = " A structure that contains an index and a memory object, and is used to pass"]
#[doc = " arguments to dnnl_primitive_execute()."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_exec_arg_t {
#[doc = "< An argument index, e.g. DNNL_ARG_SRC"]
pub arg: ::libc::c_int,
#[doc = "< Input/output memory"]
pub memory: dnnl_memory_t,
}
#[test]
fn bindgen_test_layout_dnnl_exec_arg_t() {
assert_eq!(
::std::mem::size_of::<dnnl_exec_arg_t>(),
16usize,
concat!("Size of: ", stringify!(dnnl_exec_arg_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_exec_arg_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_exec_arg_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_exec_arg_t>())).arg as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_exec_arg_t),
"::",
stringify!(arg)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_exec_arg_t>())).memory as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_exec_arg_t),
"::",
stringify!(memory)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " Primitive descriptor query specification"]
#[doc = ""]
#[doc = " For generic function dnnl_primitive_desc_query(), the type of result must"]
#[doc = " agree with the queried argument. The correspondence table:"]
#[doc = ""]
#[doc = " Query kind | Type of query result"]
#[doc = " --------------------------------|-----------------------------"]
#[doc = " #dnnl_query_engine | #dnnl_engine_t *"]
#[doc = " #dnnl_query_scratchpad_engine | #dnnl_engine_t *"]
#[doc = " #dnnl_query_primitive_kind | #dnnl_primitive_kind_t *"]
#[doc = " dnnl_query_*_s32 | int *"]
#[doc = " dnnl_query_*_s64 | #dnnl_dim_t * (same as int64_t *)"]
#[doc = " dnnl_query_*_f64 | double *"]
#[doc = " dnnl_query_*_str | const char **"]
#[doc = " #dnnl_query_op_d | #const_dnnl_op_desc_t *"]
#[doc = " dnnl_query_*_md | const #dnnl_memory_desc_t **"]
#[doc = " dnnl_query_*_\\<op\\>_d | const dnnl_\\<op\\>_desc_t **"]
#[doc = " dnnl_query_*_pd | #const_dnnl_primitive_desc_t *"]
#[doc = ""]
#[doc = " @note"]
#[doc = " Rule of thumb: all opaque types and structures are returned by"]
#[doc = " reference. All numbers are returned by value."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " All returned references point to constant objects and are valid only"]
#[doc = " during the lifetime of the queried primitive descriptor. Returned objects"]
#[doc = " must not be destroyed by the user. If you need to keep the object longer"]
#[doc = " than the lifetime of the queried primitive descriptor, use"]
#[doc = " dnnl_primitive_desc_clone() to make a copy."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_query_t {
#[doc = "< no query"]
dnnl_query_undef = 0,
#[doc = "< execution engine"]
dnnl_query_engine = 1,
#[doc = "< primitive kind"]
dnnl_query_primitive_kind = 2,
#[doc = "< number of inputs expected"]
dnnl_query_num_of_inputs_s32 = 3,
#[doc = "< number of outputs expected"]
dnnl_query_num_of_outputs_s32 = 4,
#[doc = "< runtime estimation (seconds)"]
dnnl_query_time_estimate_f64 = 5,
#[doc = "< memory consumption -- extra"]
dnnl_query_memory_consumption_s64 = 6,
#[doc = "< scratchpad engine -- engine to be used"]
dnnl_query_scratchpad_engine = 7,
#[doc = "< implementation name"]
dnnl_query_impl_info_str = 8,
#[doc = "< source engine"]
dnnl_query_reorder_src_engine = 9,
#[doc = "< destination engine"]
dnnl_query_reorder_dst_engine = 10,
#[doc = "< propagation kind"]
dnnl_query_prop_kind = 11,
#[doc = "< stub"]
dnnl_query_some_d = 64,
#[doc = "< op descriptor"]
dnnl_query_op_d = 65,
#[doc = "< convolution descriptor"]
dnnl_query_convolution_d = 66,
#[doc = "< deconvolution descriptor"]
dnnl_query_deconvolution_d = 67,
#[doc = "< shuffle descriptor"]
dnnl_query_shuffle_d = 68,
#[doc = "< eltwise descriptor"]
dnnl_query_eltwise_d = 69,
#[doc = "< softmax descriptor"]
dnnl_query_softmax_d = 70,
#[doc = "< pooling descriptor"]
dnnl_query_pooling_d = 71,
#[doc = "< lrn descriptor"]
dnnl_query_lrn_d = 72,
#[doc = "< batch normalization descriptor"]
dnnl_query_batch_normalization_d = 73,
#[doc = "< layer normalization descriptor"]
dnnl_query_layer_normalization_d = 74,
#[doc = "< inner product descriptor"]
dnnl_query_inner_product_d = 75,
#[doc = "< rnn descriptor"]
dnnl_query_rnn_d = 76,
#[doc = "< GEMM descriptor (internal)"]
dnnl_query_gemm_d = 77,
#[doc = "< binary descriptor"]
dnnl_query_binary_d = 78,
#[doc = "< logsoftmax descriptor"]
dnnl_query_logsoftmax_d = 79,
#[doc = "< matrix multiplication (matmul) descriptor"]
dnnl_query_matmul_d = 80,
#[doc = "< resampling descriptor"]
dnnl_query_resampling_d = 81,
#[doc = "< stub"]
dnnl_query_some_md = 128,
#[doc = "< source memory desc"]
dnnl_query_src_md = 129,
#[doc = "< source gradient memory desc"]
dnnl_query_diff_src_md = 130,
#[doc = "< weights memory descriptor desc"]
dnnl_query_weights_md = 131,
#[doc = "< weights grad. memory desc"]
dnnl_query_diff_weights_md = 132,
#[doc = "< destination memory desc"]
dnnl_query_dst_md = 133,
#[doc = "< destination grad. memory desc"]
dnnl_query_diff_dst_md = 134,
#[doc = "< workspace memory desc"]
dnnl_query_workspace_md = 135,
#[doc = "< scratchpad memory desc"]
dnnl_query_scratchpad_md = 136,
#[doc = "< memory desc of an execute argument"]
dnnl_query_exec_arg_md = 255,
}
impl dnnl_stream_flags_t {
pub const dnnl_stream_default_flags: dnnl_stream_flags_t =
dnnl_stream_flags_t::dnnl_stream_default_order;
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " @brief Stream flags."]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_stream_flags_t {
#[doc = " Default order execution. Either in-order or out-of-order depending on"]
#[doc = " the runtime."]
dnnl_stream_default_order = 1,
#[doc = " In-order execution."]
dnnl_stream_in_order = 2,
#[doc = " Out-of-order execution."]
dnnl_stream_out_of_order = 4,
}
#[doc = " @struct dnnl_stream"]
#[doc = " An opaque structure to describe an execution stream."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_stream {
_unused: [u8; 0],
}
#[doc = " An execution stream handle."]
pub type dnnl_stream_t = *mut dnnl_stream;
#[doc = " A constant execution stream handle."]
pub type const_dnnl_stream_t = *const dnnl_stream;
#[doc = " An opaque structure to describe execution stream attrbutes."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_stream_attr {
_unused: [u8; 0],
}
#[doc = " An execution stream attributes handle."]
pub type dnnl_stream_attr_t = *mut dnnl_stream_attr;
#[doc = " A constant execution stream attributes handle."]
pub type const_dnnl_stream_attr_t = *const dnnl_stream_attr;
#[doc = " Structure containing version information as per [Semantic"]
#[doc = " Versioning](https://semver.org)"]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct dnnl_version_t {
#[doc = "< Major version"]
pub major: ::libc::c_int,
#[doc = "< Minor version"]
pub minor: ::libc::c_int,
#[doc = "< Patch version"]
pub patch: ::libc::c_int,
#[doc = "< Git hash of the sources (may be absent)"]
pub hash: *const ::libc::c_char,
#[doc = "< CPU runtime"]
pub cpu_runtime: ::libc::c_uint,
#[doc = "< GPU runtime"]
pub gpu_runtime: ::libc::c_uint,
}
#[test]
fn bindgen_test_layout_dnnl_version_t() {
assert_eq!(
::std::mem::size_of::<dnnl_version_t>(),
32usize,
concat!("Size of: ", stringify!(dnnl_version_t))
);
assert_eq!(
::std::mem::align_of::<dnnl_version_t>(),
8usize,
concat!("Alignment of ", stringify!(dnnl_version_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_version_t>())).major as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(dnnl_version_t),
"::",
stringify!(major)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_version_t>())).minor as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(dnnl_version_t),
"::",
stringify!(minor)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_version_t>())).patch as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(dnnl_version_t),
"::",
stringify!(patch)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_version_t>())).hash as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(dnnl_version_t),
"::",
stringify!(hash)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_version_t>())).cpu_runtime as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(dnnl_version_t),
"::",
stringify!(cpu_runtime)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<dnnl_version_t>())).gpu_runtime as *const _ as usize },
28usize,
concat!(
"Offset of field: ",
stringify!(dnnl_version_t),
"::",
stringify!(gpu_runtime)
)
);
}
#[repr(u32)]
#[non_exhaustive]
#[doc = " CPU instruction set flags"]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum dnnl_cpu_isa_t {
#[doc = " Any ISA (no restrictions)"]
dnnl_cpu_isa_all = 0,
#[doc = " Intel Streaming SIMD Extensions 4.1 (Intel SSE4.1)"]
dnnl_cpu_isa_sse41 = 1,
#[doc = " Intel Advanced Vector Extensions (Intel AVX)"]
dnnl_cpu_isa_avx = 3,
#[doc = " Intel Advanced Vector Extensions 2 (Intel AVX2)"]
dnnl_cpu_isa_avx2 = 7,
#[doc = " Intel Advanced Vector Extensions 512 (Intel AVX-512) subset"]
#[doc = " for Intel Xeon Phi processors x200 Series."]
dnnl_cpu_isa_avx512_mic = 15,
#[doc = " Intel AVX-512 subset"]
#[doc = " for Intel Xeon Phi processors 7235, 7285, 7295 Series."]
dnnl_cpu_isa_avx512_mic_4ops = 31,
#[doc = " Intel AVX-512 subset for Intel Xeon Scalable processor family"]
#[doc = " and Intel Core processor family."]
dnnl_cpu_isa_avx512_core = 39,
#[doc = " Intel AVX-512 and Intel Deep Learning Boost (Intel DL Boost) support"]
#[doc = " for Intel Xeon Scalable processor family"]
#[doc = " and Intel Core processor family."]
dnnl_cpu_isa_avx512_core_vnni = 103,
#[doc = " Intel AVX-512, Intel DL Boost and bfloat16 support"]
#[doc = " for Intel Xeon Scalable processor family"]
#[doc = " and Intel Core processor family."]
dnnl_cpu_isa_avx512_core_bf16 = 231,
}
extern "C" {
#[doc = " Creates a primitive descriptor iterator."]
#[doc = ""]
#[doc = " @param iterator Output primitive descriptor iterator."]
#[doc = " @param op_desc Operation descriptor."]
#[doc = " @param attr Primitive attributes (can be NULL)."]
#[doc = " @param engine Engine to use."]
#[doc = " @param hint_forward_primitive_desc For backward propagation: primitive"]
#[doc = " descriptor for a respective forward propagation primitive. Pass NULL"]
#[doc = " for forward propagation."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_iterator_create(
iterator: *mut dnnl_primitive_desc_iterator_t,
op_desc: const_dnnl_op_desc_t,
attr: const_dnnl_primitive_attr_t,
engine: dnnl_engine_t,
hint_forward_primitive_desc: const_dnnl_primitive_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Advances the primitive descriptor iterator to point to the next available"]
#[doc = " implementation."]
#[doc = ""]
#[doc = " @param iterator A primitive descriptor iterator to advance."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
#[doc = " @returns #dnnl_iterator_ends if no more implementations available."]
pub fn dnnl_primitive_desc_iterator_next(
iterator: dnnl_primitive_desc_iterator_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Fetches the current primitive descriptor from a primitive descriptor"]
#[doc = " iterator."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The user is responsible for deleting the resulting primitive"]
#[doc = " descriptor using dnnl_primitive_desc_destroy()."]
#[doc = ""]
#[doc = " @param iterator A primitive descriptor iterator."]
#[doc = " @returns A primitive descriptor."]
pub fn dnnl_primitive_desc_iterator_fetch(
iterator: const_dnnl_primitive_desc_iterator_t,
) -> dnnl_primitive_desc_t;
}
extern "C" {
#[doc = " Destroys a primitive descriptor iterator."]
#[doc = ""]
#[doc = " @param iterator Primitive descriptor iterator to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_iterator_destroy(
iterator: dnnl_primitive_desc_iterator_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates a primitive descriptor. This function is equivalent to a sequence"]
#[doc = " of #dnnl_primitive_desc_iterator_create() and"]
#[doc = " #dnnl_primitive_desc_iterator_fetch(). In other words, the library will"]
#[doc = " pick the first suitable implementation."]
#[doc = ""]
#[doc = " @param primitive_desc Output primitive descriptor."]
#[doc = " @param op_desc Operation descriptor."]
#[doc = " @param attr Primitive attributes (can be NULL)."]
#[doc = " @param engine Engine to use."]
#[doc = " @param hint_forward_primitive_desc For backward propagation: primitive"]
#[doc = " descriptor for a respective forward propagation primitive. Pass NULL"]
#[doc = " for forward propagation."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_create(
primitive_desc: *mut dnnl_primitive_desc_t,
op_desc: const_dnnl_op_desc_t,
attr: const_dnnl_primitive_attr_t,
engine: dnnl_engine_t,
hint_forward_primitive_desc: const_dnnl_primitive_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Clones a primitive descriptor. The resulting primitive descriptor must be"]
#[doc = " destroyed separately."]
#[doc = ""]
#[doc = " @param primitive_desc Output primitive descriptor."]
#[doc = " @param existing_primitive_desc Primitive descriptor to clone."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_clone(
primitive_desc: *mut dnnl_primitive_desc_t,
existing_primitive_desc: const_dnnl_primitive_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns a constant reference to the attributes of a primitive descriptor."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " It is an error to destroy the resulting @p attr."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The lifetime of an @p attr is the same as that of a @p"]
#[doc = " primitive_desc, so it is an error to use the @p attr once the @p"]
#[doc = " primitive_desc has been destroyed."]
#[doc = ""]
#[doc = " @param primitive_desc Primitive descriptor."]
#[doc = " @param attr Output primitive attributes."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_get_attr(
primitive_desc: const_dnnl_primitive_desc_t,
attr: *mut const_dnnl_primitive_attr_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys a primitive descriptor."]
#[doc = ""]
#[doc = " @param primitive_desc Primitive descriptor to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_destroy(primitive_desc: dnnl_primitive_desc_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Queries a primitive descriptor for various pieces of information."]
#[doc = ""]
#[doc = " The most common use case is to query a primitive descriptor, created with"]
#[doc = " source, weights, and destination memory descriptors with format tags set"]
#[doc = " to #dnnl_format_tag_any, for the corresponding memory descriptors (in this"]
#[doc = " case the @p what is set to #dnnl_query_src_md, #dnnl_query_weights_md, and"]
#[doc = " #dnnl_query_dst_md respectively) so that it is possible to create memory"]
#[doc = " objects and reorder primitives if necessary."]
#[doc = ""]
#[doc = " Another typical use case is to query a primitive descriptor for workspace"]
#[doc = " memory descriptor (with @p what set to #dnnl_query_workspace_md). If this"]
#[doc = " query returns #dnnl_not_required status, then workspace memory is not"]
#[doc = " required."]
#[doc = ""]
#[doc = " @note"]
#[doc = " When querying for a memory descriptor for a scratchpad, a workspace,"]
#[doc = " or an optional parameter, the query will return a pointer to a zero"]
#[doc = " memory descriptor if the parameter is not needed."]
#[doc = ""]
#[doc = " A few other use cases:"]
#[doc = " - query a primitive descriptor for the underlying operation descriptor"]
#[doc = " (#dnnl_query_convolution_d, #dnnl_query_eltwise_d, #dnnl_query_rnn_d,"]
#[doc = " etc.)"]
#[doc = " - query a primitive descriptor for the implementation information string"]
#[doc = " (#dnnl_query_impl_info_str)"]
#[doc = " - query a primitive descriptor for the number of inputs and outputs"]
#[doc = " (#dnnl_query_num_of_inputs_s32 and #dnnl_query_num_of_outputs_s32"]
#[doc = " respectively)"]
#[doc = ""]
#[doc = " @sa dnnl_query_t for more options"]
#[doc = ""]
#[doc = " @param primitive_desc Primitive descriptor."]
#[doc = " @param what Parameter to query."]
#[doc = " @param index Index of the parameter to query for."]
#[doc = " @param result Output result. The type depends on the query. For example,"]
#[doc = " it must be a @c dnnl_memory_desc_t* if querying for a memory"]
#[doc = " descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_desc_query(
primitive_desc: const_dnnl_primitive_desc_t,
what: dnnl_query_t,
index: ::libc::c_int,
result: *mut ::libc::c_void,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Queries primitive descriptor for a memory descriptor."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This function is a convenience version of"]
#[doc = " #dnnl_primitive_desc_query()."]
#[doc = ""]
#[doc = " @param primitive_desc Primitive descriptor."]
#[doc = " @param what Kind of memory descriptor parameter to query for."]
#[doc = " @param index Index of the parameter to query."]
#[doc = " @returns A pointer to the requested memory descriptor."]
#[doc = " @returns A pointer to a zero memory descriptor if the parameter is not"]
#[doc = " needed."]
#[doc = " @returns NULL in case of any error."]
#[doc = ""]
pub fn dnnl_primitive_desc_query_md(
primitive_desc: const_dnnl_primitive_desc_t,
what: dnnl_query_t,
index: ::libc::c_int,
) -> *const dnnl_memory_desc_t;
}
extern "C" {
#[doc = " Queries primitive descriptor for a signed 32bit int."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This function is a convenience version of"]
#[doc = " #dnnl_primitive_desc_query()."]
#[doc = ""]
#[doc = " @param primitive_desc Primitive descriptor."]
#[doc = " @param what Kind of the value to query for."]
#[doc = " @param index Index of the parameter to query."]
#[doc = " @returns The requested value."]
#[doc = " @returns 0 in case of any error (in particular if the queried entity is"]
#[doc = " not of type int32_t). Note that 0 may also be the actual returned"]
#[doc = " value."]
pub fn dnnl_primitive_desc_query_s32(
primitive_desc: const_dnnl_primitive_desc_t,
what: dnnl_query_t,
index: ::libc::c_int,
) -> ::libc::c_int;
}
extern "C" {
#[doc = " Creates a primitive."]
#[doc = ""]
#[doc = " @param primitive Output primitive."]
#[doc = " @param primitive_desc Primitive descriptor used to create the primitive."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_create(
primitive: *mut dnnl_primitive_t,
primitive_desc: const_dnnl_primitive_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Executes a primitive."]
#[doc = ""]
#[doc = " @param primitive Primitive to execute."]
#[doc = " @param stream Stream to use."]
#[doc = " @param nargs Number of arguments."]
#[doc = " @param args Array of arguments. Each argument is an"]
#[doc = " <index, #dnnl_memory_t> pair. The index is one of the `DNNL_ARG_*`"]
#[doc = " values such as `DNNL_ARG_SRC`. Unless runtime shapes are used (see"]
#[doc = " #DNNL_RUNTIME_DIM_VAL), the memory object must have the same memory"]
#[doc = " descriptor as that returned by"]
#[doc = " #dnnl_primitive_desc_query_md(#dnnl_query_exec_arg_md, index)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_execute(
primitive: const_dnnl_primitive_t,
stream: dnnl_stream_t,
nargs: ::libc::c_int,
args: *const dnnl_exec_arg_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Retrieves a constant reference to the primitive descriptor of a given"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " It is an error to destroy the returned object. It is owned by the"]
#[doc = " primitive. The @c const qualifier of the returned object prevents"]
#[doc = " such attempts."]
#[doc = ""]
#[doc = " @param primitive Primitive to query for the primitive descriptor."]
#[doc = " @param primitive_desc Output primitive descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_get_primitive_desc(
primitive: const_dnnl_primitive_t,
primitive_desc: *mut const_dnnl_primitive_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys a primitive."]
#[doc = ""]
#[doc = " @param primitive The primitive to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_destroy(primitive: dnnl_primitive_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates an empty (default) primitive attributes with all the parameters"]
#[doc = " set to their default values."]
#[doc = ""]
#[doc = " Empty attributes are implied whenever the respective argument is NULL."]
#[doc = ""]
#[doc = " @param attr Output primitive attributes."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_create(attr: *mut dnnl_primitive_attr_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Clones primitive attributes."]
#[doc = ""]
#[doc = " @param attr Output primitive attributes."]
#[doc = " @param existing_attr Primitive attributes to clone."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_clone(
attr: *mut dnnl_primitive_attr_t,
existing_attr: const_dnnl_primitive_attr_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys primitive attributes."]
#[doc = ""]
#[doc = " @param attr Primitive attributes to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_destroy(attr: dnnl_primitive_attr_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the primitive attributes scratchpad mode."]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param mode Output scratchpad mode."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_get_scratchpad_mode(
attr: const_dnnl_primitive_attr_t,
mode: *mut dnnl_scratchpad_mode_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets primitive attributes scratchpad mode."]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param mode Scratchpad mode. The possible values are:"]
#[doc = " #dnnl_scratchpad_mode_library (default) and"]
#[doc = " #dnnl_scratchpad_mode_user."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_scratchpad_mode(
attr: dnnl_primitive_attr_t,
mode: dnnl_scratchpad_mode_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns primitive attributes output scaling factors correspondence mask"]
#[doc = " and values."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The @p scales array is an internal part of the primitive attributes"]
#[doc = " @p attr, so it is an error to modify or destroy the @p scales array."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The lifetime of @p scales array is the same as that of the primitive"]
#[doc = " attributes @p attr to which it belongs, so it is an error to use"]
#[doc = " @p scales after @p attr is destroyed."]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param count Output length of the array of scaling factors @p scales."]
#[doc = " @param mask Output scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p scales"]
#[doc = " vector. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of"]
#[doc = " 0 implies a common output scaling factor for the whole output tensor."]
#[doc = " @param scales Output pointer to a constant array of scaling factors."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_get_output_scales(
attr: const_dnnl_primitive_attr_t,
count: *mut dnnl_dim_t,
mask: *mut ::libc::c_int,
scales: *mut *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets output scaling factors correspondence mask and values."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The order of dimensions does not depend on how elements are laid"]
#[doc = " out in memory. For example:"]
#[doc = " - for a 2D CNN activations tensor the order is always (n, c)"]
#[doc = " - for a 4D CNN activations tensor the order is always (n, c, h, w)"]
#[doc = " - for a 5D CNN weights tensor the order is always"]
#[doc = " (g, oc, ic, kh, kw)"]
#[doc = ""]
#[doc = " Example usage:"]
#[doc = " @code"]
#[doc = " int mb = 32, oc = 32, oh = 14, ow = 14; // convolution output params"]
#[doc = " float scales[oc] = { ... }; // unique output scales per output channel"]
#[doc = " int oc_dim = 1; // mb_dim = 0, channel_dim = 1, height_dim = 2, ..."]
#[doc = ""]
#[doc = " dnnl_convolution_desc_t conv_d; // create a convolution descriptor"]
#[doc = ""]
#[doc = " dnnl_primitive_attr_t attr;"]
#[doc = " dnnl_primitive_attr_create(&attr); // create primitive attributes"]
#[doc = " dnnl_primitive_attr_set_output_scales(attr, oc, 1 << oc_dim, scales);"]
#[doc = ""]
#[doc = " dnnl_primitive_desc_t conv_pd;"]
#[doc = " dnnl_primitive_desc_create(&conv_pd, &conv_d, attr, engine, NULL);"]
#[doc = " @endcode"]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param count Length of the array of scaling factors @p scales."]
#[doc = " @param mask Scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p scales"]
#[doc = " array. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of"]
#[doc = " 0 implies a common output scaling factor for the whole output tensor."]
#[doc = " @param scales Array of output scaling factors. If the output scaling"]
#[doc = " factors are known at the time of this call, this array must contain @p"]
#[doc = " count values and the following equality must hold:"]
#[doc = " \\f[count = \\prod\\limits_{d \\in mask} output.dims[d].\\f]"]
#[doc = " Violations can only be detected when the attributes are used to create"]
#[doc = " a primitive descriptor."]
#[doc = " If the output scaling factors are not known at the time of the call,"]
#[doc = " this array must contain a single #DNNL_RUNTIME_F32_VAL value and the"]
#[doc = " output scaling factors must be passed at execution time as an argument"]
#[doc = " with index #DNNL_ARG_ATTR_OUTPUT_SCALES."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_output_scales(
attr: dnnl_primitive_attr_t,
count: dnnl_dim_t,
mask: ::libc::c_int,
scales: *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns primitive attributes scaling factors correspondence mask and values"]
#[doc = " for a given memory argument."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The output @p scales array is an internal part of the primitive"]
#[doc = " attributes @p attr, so it is an error to modify or destroy the @p"]
#[doc = " scales array."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The lifetime of the @p scales array is the same as that of the primitive"]
#[doc = " attributes @p attr to which it belongs, so it is an error to use @p"]
#[doc = " scales after @p attr is destroyed."]
#[doc = ""]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param arg Parameter argument index as passed to the"]
#[doc = " dnnl_primitive_execute() call."]
#[doc = " @param count Output length of the array of scaling factors @p scales."]
#[doc = " @param mask Output scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " scales array. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of 0"]
#[doc = " implies a common scaling factor for the whole output tensor."]
#[doc = " @param scales Output pointer to a constant array of float scaling factors."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_get_scales(
attr: dnnl_primitive_attr_t,
arg: ::libc::c_int,
count: *mut dnnl_dim_t,
mask: *mut ::libc::c_int,
scales: *mut *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets primitive attributes scaling factors for primitive operations for a"]
#[doc = " given memory argument."]
#[doc = ""]
#[doc = " @sa dnnl_primitive_attr_set_output_scales"]
#[doc = ""]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param arg Parameter argument index as passed to the"]
#[doc = " dnnl_primitive_execute() call."]
#[doc = " @param count Length of the array of scaling factors @p scales."]
#[doc = " @param mask Scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the tensor dimensions and the @p scales array."]
#[doc = " The set i-th bit indicates that a dedicated scaling factor is used for"]
#[doc = " each index along that dimension. Set the mask to 0 to use a common"]
#[doc = " scaling factor for the whole output tensor."]
#[doc = " @param scales Constant array of float scaling factors. This array must"]
#[doc = " contain @p count scales and the following equality must hold:"]
#[doc = " \\f[count = \\prod\\limits_{d \\in mask} output.dims[d].\\f]"]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_scales(
attr: dnnl_primitive_attr_t,
arg: ::libc::c_int,
count: dnnl_dim_t,
mask: ::libc::c_int,
scales: *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns @p count, correspondence zero point @p mask, and a pointer to a"]
#[doc = " constant int32_t array of @p zero_points for given @p attr and memory"]
#[doc = " argument (index), previously set by dnnl_primitive_attr_set_zero_points."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The output @p zero_points array is an internal part of the primitive"]
#[doc = " attributes @p attr, so it is an error to modify or destroy the @p"]
#[doc = " zero_points array."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The lifetime of @p zero_points array is the same as that of the"]
#[doc = " primitive attributes @p attr to which it belongs, so it is an error"]
#[doc = " to use @p zero_points after @p attr is destroyed."]
#[doc = ""]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param arg Parameter argument index as passed to the"]
#[doc = " dnnl_primitive_execute() call."]
#[doc = " @param count Output length of the array of zero points @p zero_points."]
#[doc = " @param mask Output zero points correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " zero_points array. The set i-th bit indicates that a dedicated output"]
#[doc = " zero point is used for each index along that dimension. The mask"]
#[doc = " value of 0 implies a common zero point for the whole output tensor."]
#[doc = " @param zero_points Output pointer to a constant array of int32_t zero"]
#[doc = " points."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_get_zero_points(
attr: const_dnnl_primitive_attr_t,
arg: ::libc::c_int,
count: *mut dnnl_dim_t,
mask: *mut ::libc::c_int,
zero_points: *mut *const i32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets primitive attributes zero points for primitive operations for a given"]
#[doc = " memory argument."]
#[doc = ""]
#[doc = " @sa dnnl_primitive_attr_set_output_scales"]
#[doc = ""]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param arg Parameter argument index as passed to the"]
#[doc = " dnnl_primitive_execute() call."]
#[doc = " @param count Length of the array of zero points @p zero_points."]
#[doc = " @param mask Zero point correspondence mask that defines the"]
#[doc = " correspondence between the tensor dimensions and the @p"]
#[doc = " zero_points array. The set i-th bit indicates that a dedicated"]
#[doc = " zero point is used for each index along that dimension. Set the"]
#[doc = " mask to 0 to use a common zero point for the whole output tensor."]
#[doc = " @param zero_points Constant array of int32_t zero points. If the zero"]
#[doc = " points are known at the time of this call, this array must contain @p"]
#[doc = " count zero points and the following equality must hold:"]
#[doc = " \\f[count = \\prod\\limits_{d \\in mask} output.dims[d].\\f]"]
#[doc = " If the zero points are not known at the time of the call, this array"]
#[doc = " must contain a single #DNNL_RUNTIME_S32_VAL and the zero points must"]
#[doc = " be passed at execution time as an argument with index"]
#[doc = " #DNNL_ARG_ATTR_ZERO_POINTS."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_zero_points(
attr: dnnl_primitive_attr_t,
arg: ::libc::c_int,
count: dnnl_dim_t,
mask: ::libc::c_int,
zero_points: *const i32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns primitive attributes post-ops."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " The output @p post_ops points to the internal @p attr field, so it is"]
#[doc = " an error to modify or destroy them. The lifetime of @p post_ops is"]
#[doc = " the same as that of the @p attr it belongs to, so it is an error to"]
#[doc = " use @p post_ops after @p attr has been destroyed."]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param post_ops Output post-ops."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_get_post_ops(
attr: const_dnnl_primitive_attr_t,
post_ops: *mut const_dnnl_post_ops_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets primitive attributes post-ops."]
#[doc = ""]
#[doc = " @note"]
#[doc = " There is no way to check whether the post-ops would be supported by"]
#[doc = " the target primitive. Any error will be reported by the"]
#[doc = " dnnl_primitive_desc_create() function call."]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param post_ops Post-ops to set."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_post_ops(
attr: dnnl_primitive_attr_t,
post_ops: const_dnnl_post_ops_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates empty post-ops sequence."]
#[doc = ""]
#[doc = " @param post_ops Output post-ops."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_post_ops_create(post_ops: *mut dnnl_post_ops_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys post-ops."]
#[doc = ""]
#[doc = " @param post_ops Post-ops to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_post_ops_destroy(post_ops: dnnl_post_ops_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the length of post-ops."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @returns The number of post-ops entries."]
pub fn dnnl_post_ops_len(post_ops: const_dnnl_post_ops_t) -> ::libc::c_int;
}
extern "C" {
#[doc = " Returns the kind of a post-op entry."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param index Post-op entry index."]
#[doc = " @returns The kind of the post-op with the specified index."]
#[doc = " @returns #dnnl_undefined_primitive if there is no post-op at the specified"]
#[doc = " index."]
pub fn dnnl_post_ops_get_kind(
post_ops: const_dnnl_post_ops_t,
index: ::libc::c_int,
) -> dnnl_primitive_kind_t;
}
extern "C" {
#[doc = " Appends an accumulation (sum) to post-ops. Prior to accumulating the"]
#[doc = " result, the previous value is multiplied by a scale."]
#[doc = ""]
#[doc = " The kind of this post-op is #dnnl_sum."]
#[doc = ""]
#[doc = " This feature may improve performance for cases like residual learning"]
#[doc = " blocks, where the result of convolution is accumulated to the previously"]
#[doc = " computed activations. The parameter @p scale may be used for the"]
#[doc = " integer-based computations when the result and previous activations have"]
#[doc = " different logical scaling factors."]
#[doc = ""]
#[doc = " In the simplest case when the accumulation is the only post-op, the"]
#[doc = " computations would be:"]
#[doc = ""]
#[doc = " dst[:] <- scale * dst[:] + op(...) // instead of dst[:] <- op(...)"]
#[doc = ""]
#[doc = " @note"]
#[doc = " This post-op executes in-place and does not change the"]
#[doc = " destination layout."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param scale Accumulation scaling factor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_post_ops_append_sum(post_ops: dnnl_post_ops_t, scale: f32) -> dnnl_status_t;
}
extern "C" {
#[doc = " Appends an accumulation v2 (sum) to post-ops. Prior to accumulating the"]
#[doc = " result, the previous value is multiplied by a scale."]
#[doc = ""]
#[doc = " The kind of this post-op is #dnnl_sum."]
#[doc = ""]
#[doc = " This feature may improve performance for cases like residual learning"]
#[doc = " blocks, where the result of convolution is accumulated to the previously"]
#[doc = " computed activations. The parameter @p scale may be used for the"]
#[doc = " integer-based computations when the result and previous activations have"]
#[doc = " different logical scaling factors."]
#[doc = ""]
#[doc = " In the simplest case when the accumulation is the only post-op, the"]
#[doc = " computations would be:"]
#[doc = ""]
#[doc = " dst[:] <- scale * dst[:] + op(...) // instead of dst[:] <- op(...)"]
#[doc = ""]
#[doc = " If @p data_type is specified, original dst tensor will be reinterpreted"]
#[doc = " as a tensor with provided data type. Since it is reinterpretation,"]
#[doc = " data_type and dst data type should have same size."]
#[doc = " As a result, computations would be:"]
#[doc = ""]
#[doc = " dst[:] <- scale * as_data_type(dst[:]) + op(...)"]
#[doc = " // instead of dst[:] <- op(...)"]
#[doc = " @note"]
#[doc = " This post-op executes in-place and does not change the"]
#[doc = " destination layout."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param scale Accumulation scaling factor."]
#[doc = " @param data_type Accumulation data_type."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_post_ops_append_sum_v2(
post_ops: dnnl_post_ops_t,
scale: f32,
data_type: dnnl_data_type_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the parameters of an accumulation (sum) post-op."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param index Index of the sum post-op."]
#[doc = " @param scale Output accumulation scaling factor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
#[doc = " @returns #dnnl_invalid_arguments if @p index does not refer to a sum"]
#[doc = " post-op."]
pub fn dnnl_post_ops_get_params_sum(
post_ops: const_dnnl_post_ops_t,
index: ::libc::c_int,
scale: *mut f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the parameters of an accumulation (sum) post-op with"]
#[doc = " a data type parameter."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param index Index of the sum post-op."]
#[doc = " @param scale Output accumulation scaling factor."]
#[doc = " @param data_type Data type for accumulation."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_post_ops_get_params_sum_v2(
post_ops: const_dnnl_post_ops_t,
index: ::libc::c_int,
scale: *mut f32,
data_type: *mut dnnl_data_type_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Appends an elementwise post-op."]
#[doc = ""]
#[doc = " The kind of this post operation is #dnnl_eltwise."]
#[doc = ""]
#[doc = " In the simplest case when the elementwise is the only post operation, the"]
#[doc = " computations would be:"]
#[doc = ""]
#[doc = " dst[:] <- scale * eltwise_op (op(...)) // instead of dst[:] <- op(...)"]
#[doc = ""]
#[doc = " where eltwise_op is configured with the given parameters."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param scale Scaling factor."]
#[doc = " @param alg_kind Elementwise algorithm for the post-op."]
#[doc = " @param alpha Alpha parameter for the elementwise algorithm."]
#[doc = " @param beta Beta parameter for the elementwise algorithm."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_post_ops_append_eltwise(
post_ops: dnnl_post_ops_t,
scale: f32,
alg_kind: dnnl_alg_kind_t,
alpha: f32,
beta: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the parameters of an elementwise post-up."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param index Index of the elementwise post-op."]
#[doc = " @param scale Output scaling factor."]
#[doc = " @param alg_kind Output elementwise algorithm kind."]
#[doc = " @param alpha Output alpha parameter for the elementwise algorithm."]
#[doc = " @param beta Output beta parameter for the elementwise algorithm."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
#[doc = " @returns #dnnl_invalid_arguments if @p index does not refer to an"]
#[doc = " elementwise post-op."]
pub fn dnnl_post_ops_get_params_eltwise(
post_ops: const_dnnl_post_ops_t,
index: ::libc::c_int,
scale: *mut f32,
alg_kind: *mut dnnl_alg_kind_t,
alpha: *mut f32,
beta: *mut f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Appends a depthwise post-op convolution with stride 1."]
#[doc = ""]
#[doc = " This post-op can only be fused with a 2D 1x1 convolution (convolution with"]
#[doc = " weights spatial dimension equal to 1 i.e., kh=kw=1)."]
#[doc = ""]
#[doc = " The kind of this post-op is #dnnl_convolution."]
#[doc = ""]
#[doc = " The number of outputs for primitive remain same as before. The output size"]
#[doc = " remain same as the original primitive due to stride=1."]
#[doc = ""]
#[doc = " The Post-op can be defined as:"]
#[doc = ""]
#[doc = " dst[:] <- scales * (conv_dw(conv_1x1))"]
#[doc = ""]
#[doc = " See @ref dev_guide_attributes_post_ops_depthwise and"]
#[doc = " @ref dev_guide_attributes_post_ops_depthwise_fusion for more info."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param weights_data_type Weights data type of depthwise post-op"]
#[doc = " @param bias_data_type Bias data type of depthwise post-op"]
#[doc = " @param dst_data_type Output data type of depthwise post-op"]
#[doc = " @param count Output length of the array of scaling factors @p scales."]
#[doc = " @param mask Output scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " scales array. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of 0"]
#[doc = " implies a common scaling factor for the whole output tensor."]
#[doc = " @param scales Output pointer to a constant array of float scaling factors."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise"]
pub fn dnnl_post_ops_append_dw_k3s1p1(
post_ops: dnnl_post_ops_t,
weights_data_type: dnnl_data_type_t,
bias_data_type: dnnl_data_type_t,
dst_data_type: dnnl_data_type_t,
count: dnnl_dim_t,
mask: ::libc::c_int,
scales: *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the parameters of an depthwise post-op with stride 1."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param index Index of the elementwise post-op."]
#[doc = " @param weights_data_type Weights data type of depthwise post-op"]
#[doc = " @param bias_data_type Bias data type of depthwise post-op"]
#[doc = " @param dst_data_type Output data type of depthwise post-op"]
#[doc = " @param count Output length of the array of scaling factors @p scales."]
#[doc = " @param mask Output scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " scales array. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of 0"]
#[doc = " implies a common scaling factor for the whole output tensor."]
#[doc = " @param scales Output pointer to a constant array of float scaling factors."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise"]
pub fn dnnl_post_ops_get_params_dw_k3s1p1(
post_ops: const_dnnl_post_ops_t,
index: ::libc::c_int,
weights_data_type: *mut dnnl_data_type_t,
bias_data_type: *mut dnnl_data_type_t,
dst_data_type: *mut dnnl_data_type_t,
count: *mut dnnl_dim_t,
mask: *mut ::libc::c_int,
scales: *mut *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Appends a depthwise post-op convolution with stride 2."]
#[doc = ""]
#[doc = " This post-op can only be fused with a 2D 1x1 convolution (convolution with"]
#[doc = " weights spatial dimension equal to 1 i.e., kh=kw=1)."]
#[doc = ""]
#[doc = " The kind of this post-op is #dnnl_convolution."]
#[doc = ""]
#[doc = " The number of outputs for primitive remain same as before. The output"]
#[doc = " spatial size can be derived as below:"]
#[doc = ""]
#[doc = " output_height = ceil(output_height_1x1_convolution, stride)"]
#[doc = " output_width = ceil(output_width_1x1_convolution, stride)"]
#[doc = ""]
#[doc = " The Post-op can be defined as:"]
#[doc = ""]
#[doc = " dst[:] <- scales * (conv_dw(conv_1x1))"]
#[doc = ""]
#[doc = " See @ref dev_guide_attributes_post_ops_depthwise and"]
#[doc = " @ref dev_guide_attributes_post_ops_depthwise_fusion for more info."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param weights_data_type Weights data type of depthwise post-op"]
#[doc = " @param bias_data_type Bias data type of depthwise post-op"]
#[doc = " @param dst_data_type Output data type of depthwise post-op"]
#[doc = " @param count Output length of the array of scaling factors @p scales."]
#[doc = " @param mask Output scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " scales array. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of 0"]
#[doc = " implies a common scaling factor for the whole output tensor."]
#[doc = " @param scales Output pointer to a constant array of float scaling factors."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise"]
pub fn dnnl_post_ops_append_dw_k3s2p1(
post_ops: dnnl_post_ops_t,
weights_data_type: dnnl_data_type_t,
bias_data_type: dnnl_data_type_t,
dst_data_type: dnnl_data_type_t,
count: dnnl_dim_t,
mask: ::libc::c_int,
scales: *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the parameters of an depthwise post-op with stride 2."]
#[doc = ""]
#[doc = " @param post_ops Post-ops."]
#[doc = " @param index Index of the elementwise post-op."]
#[doc = " @param weights_data_type Weights data type of depthwise post-op"]
#[doc = " @param bias_data_type Bias data type of depthwise post-op"]
#[doc = " @param dst_data_type Output data type of depthwise post-op"]
#[doc = " @param count Output length of the array of scaling factors @p scales."]
#[doc = " @param mask Output scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " scales array. The set i-th bit indicates that a dedicated output scaling"]
#[doc = " factor is used for each index along that dimension. The mask value of 0"]
#[doc = " implies a common scaling factor for the whole output tensor."]
#[doc = " @param scales Output pointer to a constant array of float scaling factors."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise"]
pub fn dnnl_post_ops_get_params_dw_k3s2p1(
post_ops: const_dnnl_post_ops_t,
index: ::libc::c_int,
weights_data_type: *mut dnnl_data_type_t,
bias_data_type: *mut dnnl_data_type_t,
dst_data_type: *mut dnnl_data_type_t,
count: *mut dnnl_dim_t,
mask: *mut ::libc::c_int,
scales: *mut *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a memory descriptor using dimensions and strides."]
#[doc = ""]
#[doc = " @note"]
#[doc = " As always, the logical order of dimensions corresponds to the `abc...`"]
#[doc = " format tag, and the physical meaning of the dimensions depends on both"]
#[doc = " the primitive that consumes the memory and the context of that"]
#[doc = " consumption."]
#[doc = ""]
#[doc = " @param memory_desc Output memory descriptor."]
#[doc = " @param ndims Number of dimensions"]
#[doc = " @param dims Array of dimensions."]
#[doc = " @param data_type Elements data type."]
#[doc = " @param strides Strides in each dimension."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_desc_init_by_strides(
memory_desc: *mut dnnl_memory_desc_t,
ndims: ::libc::c_int,
dims: *mut dnnl_dim_t,
data_type: dnnl_data_type_t,
strides: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a memory descriptor using dimensions and memory format tag."]
#[doc = ""]
#[doc = " @note"]
#[doc = " As always, the logical order of dimensions corresponds to the `abc...`"]
#[doc = " format tag, and the physical meaning of the dimensions depends on both"]
#[doc = " the primitive that consumes the memory and the context of that"]
#[doc = " consumption."]
#[doc = ""]
#[doc = " @param memory_desc Output memory descriptor."]
#[doc = " @param ndims Number of dimensions"]
#[doc = " @param dims Array of dimensions."]
#[doc = " @param data_type Elements data type."]
#[doc = " @param tag Memory format tag. Can be #dnnl_format_tag_any which would"]
#[doc = " allow a primitive to chose the final memory format. In this case the"]
#[doc = " format_kind field of the memory descriptor would be set to"]
#[doc = " #dnnl_format_kind_any."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_desc_init_by_tag(
memory_desc: *mut dnnl_memory_desc_t,
ndims: ::libc::c_int,
dims: *mut dnnl_dim_t,
data_type: dnnl_data_type_t,
tag: dnnl_format_tag_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " @param memory_desc Output memory descriptor."]
#[doc = " @param parent_memory_desc An existing memory descriptor."]
#[doc = " @param dims Sizes of the region."]
#[doc = " @param offsets Offsets to the region from the encompassing"]
#[doc = " memory object in each dimension"]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_desc_init_submemory(
memory_desc: *mut dnnl_memory_desc_t,
parent_memory_desc: *const dnnl_memory_desc_t,
dims: *mut dnnl_dim_t,
offsets: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a memory descriptor by reshaping an existing one. The new"]
#[doc = " memory descriptor inherits the data type. This operation is valid only for"]
#[doc = " memory descriptors that have format_kind set to #dnnl_blocked or"]
#[doc = " #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " The operation ensures the transformation of the physical memory format"]
#[doc = " corresponds to the transformation of the logical dimensions. If such"]
#[doc = " transformation is impossible, the function returns #dnnl_invalid_arguments."]
#[doc = ""]
#[doc = " The reshape operation can be described as a combination of the following"]
#[doc = " basic operations:"]
#[doc = " 1. Add a dimension of size `1`. This is always possible."]
#[doc = " 2. Remove a dimension of size `1`. This is possible only if the dimension"]
#[doc = " has no padding (i.e. `padded_dims[dim] == dims[dim] && dims[dim] == 1`)."]
#[doc = " 3. Split a dimension into multiple ones. This is possible only if the size"]
#[doc = " of the dimension is exactly equal to the product of the split ones and"]
#[doc = " the dimension does not have padding (i.e."]
#[doc = " `padded_dims[dim] = dims[dim]`)."]
#[doc = " 4. Joining multiple consecutive dimensions into a single one. As in the"]
#[doc = " cases above, this requires that the dimensions do not have padding and"]
#[doc = " that the memory format is such that in physical memory these dimensions"]
#[doc = " are dense and have the same order as their logical counterparts. This"]
#[doc = " also assumes that these dimensions are not blocked."]
#[doc = " - Here, dense means:"]
#[doc = " `stride for dim[i] == (stride for dim[i + 1]) * dim[i + 1]`;"]
#[doc = " - And same order means:"]
#[doc = " `i < j` if and only if `stride for dim[j] <= stride for dim[i]`."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " Some combinations of physical memory layout and/or offsets or"]
#[doc = " dimensions may result in a failure to make a reshape."]
#[doc = ""]
#[doc = " @param out_memory_desc Output memory descriptor."]
#[doc = " @param in_memory_desc An existing memory descriptor. Must have format_kind"]
#[doc = " set to #dnnl_blocked or #dnnl_format_kind_any."]
#[doc = " @param ndims Number of dimensions for the output memory descriptor."]
#[doc = " @param dims Dimensions for the output memory descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_desc_reshape(
out_memory_desc: *mut dnnl_memory_desc_t,
in_memory_desc: *const dnnl_memory_desc_t,
ndims: ::libc::c_int,
dims: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a memory descriptor by permuting axes in an existing one."]
#[doc = ""]
#[doc = " The physical memory layout representation is adjusted accordingly to"]
#[doc = " maintain the consistency between the logical and physical parts of the"]
#[doc = " memory descriptor."]
#[doc = ""]
#[doc = " The new memory descriptor inherits the data type. This operation is valid"]
#[doc = " only for memory descriptors that have format_kind set to #dnnl_blocked or"]
#[doc = " #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " The logical axes will be permuted in the following manner:"]
#[doc = " ```"]
#[doc = " for (i: 0 .. in_memory_desc->ndims)"]
#[doc = " out_memory_desc->dims[permutation[i]] = in_memory_desc->dims[i];"]
#[doc = " ```"]
#[doc = ""]
#[doc = " Example:"]
#[doc = " @code"]
#[doc = " dnnl_memory_desc_t in_md, out_md, expect_out_md;"]
#[doc = ""]
#[doc = " const int permutation[] = {1, 0}; // swap the first and the second axes"]
#[doc = ""]
#[doc = " dnnl_dims_t in_dims = {2, 3}, out_dims = {3, 2};"]
#[doc = " dnnl_format_tag_t in_tag = dnnl_ab, out_tag = dnnl_ba;"]
#[doc = ""]
#[doc = " dnnl_memory_desc_init_by_tag("]
#[doc = " &in_md, 2, in_dims, data_type, in_tag);"]
#[doc = " dnnl_memory_desc_init_by_tag("]
#[doc = " &expect_out_md, 2, out_dims, data_type, out_tag);"]
#[doc = ""]
#[doc = " dnnl_memory_desc_permute_axes(&out_md, in_md, permutation);"]
#[doc = " assert(dnnl_memory_desc_equal(&out_md, &expect_out_md));"]
#[doc = " @endcode"]
#[doc = ""]
#[doc = " @param out_memory_desc Output memory descriptor."]
#[doc = " @param in_memory_desc An existing memory descriptor. Must have format_kind"]
#[doc = " set to #dnnl_blocked or #dnnl_format_kind_any."]
#[doc = " @param permutation Axes permutation (of size `in_memory_desc->ndims`)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_desc_permute_axes(
out_memory_desc: *mut dnnl_memory_desc_t,
in_memory_desc: *const dnnl_memory_desc_t,
permutation: *const ::libc::c_int,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Compares two memory descriptors."]
#[doc = ""]
#[doc = " Use this function to identify whether a reorder is required between the"]
#[doc = " two memories"]
#[doc = ""]
#[doc = " @param lhs Left-hand side of the comparison."]
#[doc = " @param rhs Right-hand side of the comparison."]
#[doc = " @returns 1 if the descriptors are the same."]
#[doc = " @returns 0 if the descriptors are different."]
pub fn dnnl_memory_desc_equal(
lhs: *const dnnl_memory_desc_t,
rhs: *const dnnl_memory_desc_t,
) -> ::libc::c_int;
}
extern "C" {
#[doc = " Returns the size of a memory descriptor."]
#[doc = ""]
#[doc = " @param memory_desc Memory descriptor."]
#[doc = " @returns The number of bytes required for memory described by a memory"]
#[doc = " descriptor."]
pub fn dnnl_memory_desc_get_size(memory_desc: *const dnnl_memory_desc_t) -> usize;
}
extern "C" {
#[doc = " Creates a memory object."]
#[doc = ""]
#[doc = " Unless @p handle is equal to DNNL_MEMORY_NONE, the constructed memory"]
#[doc = " object will have the underlying buffer set. In this case, the buffer will"]
#[doc = " be initialized as if dnnl_memory_set_data_handle() had been called."]
#[doc = ""]
#[doc = " @sa dnnl_memory_set_data_handle()"]
#[doc = ""]
#[doc = " @param memory Output memory object."]
#[doc = " @param memory_desc Memory descriptor."]
#[doc = " @param engine Engine to use."]
#[doc = " @param handle Handle of the memory buffer to use as an underlying storage."]
#[doc = " - A pointer to the user-allocated buffer. In this case the library"]
#[doc = " doesn't own the buffer."]
#[doc = " - The DNNL_MEMORY_ALLOCATE special value. Instructs the library to"]
#[doc = " allocate the buffer for the memory object. In this case the library"]
#[doc = " owns the buffer."]
#[doc = " - DNNL_MEMORY_NONE to create dnnl_memory without an underlying buffer."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_create(
memory: *mut dnnl_memory_t,
memory_desc: *const dnnl_memory_desc_t,
engine: dnnl_engine_t,
handle: *mut ::libc::c_void,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the memory descriptor for a memory object."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param memory_desc Output memory descriptor (a copy)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_get_memory_desc(
memory: const_dnnl_memory_t,
memory_desc: *mut *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the engine of a memory object."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param engine Output engine on which the memory is located."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_get_engine(
memory: const_dnnl_memory_t,
engine: *mut dnnl_engine_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Maps a memory object and returns a host-side pointer to a memory buffer"]
#[doc = " with a copy of its contents."]
#[doc = ""]
#[doc = " Mapping enables explicit direct access to memory contents for the engines"]
#[doc = " that do not support it implicitly."]
#[doc = ""]
#[doc = " Mapping is an exclusive operation - a memory object cannot be used in"]
#[doc = " other operations until this memory object is unmapped."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Any primitives working with @p memory should be completed before"]
#[doc = " the memory is mapped. Use dnnl_stream_wait to synchronize the"]
#[doc = " corresponding execution stream."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The dnnl_memory_map_data() and dnnl_memory_unmap_data() functions are"]
#[doc = " mainly provided for debug and testing purposes, and their performance"]
#[doc = " may be suboptimal."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param mapped_ptr Output pointer to the mapped buffer."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_map_data(
memory: const_dnnl_memory_t,
mapped_ptr: *mut *mut ::libc::c_void,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Unmaps a memory object and writes back any changes made to the previously"]
#[doc = " mapped memory buffer. The pointer to the mapped buffer must be obtained"]
#[doc = " via the dnnl_memory_map_data() call."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The dnnl_memory_map_data() and dnnl_memory_unmap_data() functions are"]
#[doc = " mainly provided for debug and testing purposes, and their performance"]
#[doc = " may be suboptimal."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param mapped_ptr Pointer to the mapped buffer that must have been"]
#[doc = " obtained using the dnnl_memory_map_data() function."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_unmap_data(
memory: const_dnnl_memory_t,
mapped_ptr: *mut ::libc::c_void,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns memory object's data handle."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param handle Output data handle. For the CPU engine, the data handle is a"]
#[doc = " pointer to the actual data. For OpenCL it is a cl_mem."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_get_data_handle(
memory: const_dnnl_memory_t,
handle: *mut *mut ::libc::c_void,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets the underlying memory buffer."]
#[doc = ""]
#[doc = " See the description of dnnl_memory_set_data_handle_v2() for more details."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param handle Data handle. For the CPU engine, the data handle is a"]
#[doc = " pointer to the actual data. For OpenCL it is a `cl_mem`."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_set_data_handle(
memory: dnnl_memory_t,
handle: *mut ::libc::c_void,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets the underlying memory buffer."]
#[doc = ""]
#[doc = " This function may write zero values to the memory specified by the @p"]
#[doc = " handle if the memory object has a zero padding area. This may be time"]
#[doc = " consuming and happens each time this function is called. The operation is"]
#[doc = " always blocking and the stream parameter is a hint."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The zero padding is required by memory objects created with blocked"]
#[doc = " memory format tags like #dnnl_aBcd8b when any of the dimensions is not"]
#[doc = " a multiple of the corresponding block size. For \"plain\" formats like"]
#[doc = " #dnnl_nchw or #dnnl_nhwc zero padding area needs to be set up"]
#[doc = " explicitly when creating the corresponding memory descriptors. See"]
#[doc = " @ref dev_guide_understanding_memory_formats for more details."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Even when the memory object is used to hold values that stay constant"]
#[doc = " during the execution of the program (pre-packed weights during"]
#[doc = " inference, for example), the function will still write zeroes to the"]
#[doc = " padding area if it exists. Hence, the @p handle parameter cannot and"]
#[doc = " does not have a const qualifier."]
#[doc = ""]
#[doc = " @param memory Memory object."]
#[doc = " @param handle Data handle. For the CPU engine, the data handle is a"]
#[doc = " pointer to the actual data. For OpenCL it is a `cl_mem`."]
#[doc = " @param stream Stream to use to execute padding in."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_set_data_handle_v2(
memory: dnnl_memory_t,
handle: *mut ::libc::c_void,
stream: dnnl_stream_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys a memory object."]
#[doc = ""]
#[doc = " @param memory Memory object to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_memory_destroy(memory: dnnl_memory_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates a primitive descriptor for a reorder primitive."]
#[doc = ""]
#[doc = " @param reorder_primitive_desc Output primitive descriptor."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param src_engine Engine on which the source memory object will be"]
#[doc = " located."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param dst_engine Engine on which the destination memory object"]
#[doc = " will be located."]
#[doc = " @param attr Primitive attributes to use (can be NULL)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_reorder_primitive_desc_create(
reorder_primitive_desc: *mut dnnl_primitive_desc_t,
src_desc: *const dnnl_memory_desc_t,
src_engine: dnnl_engine_t,
dst_desc: *const dnnl_memory_desc_t,
dst_engine: dnnl_engine_t,
attr: const_dnnl_primitive_attr_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates a primitive descriptor for an out-of-place concatenation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @param concat_primitive_desc Output primitive descriptor."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param n Number of source parameters."]
#[doc = " @param concat_dimension Source tensors will be concatenated over"]
#[doc = " dimension with this index. Note that order of dimensions does"]
#[doc = " not depend on memory format."]
#[doc = " @param src_descs Array of source memory descriptors with @p n elements."]
#[doc = " @param attr Primitive attributes to use (can be NULL)."]
#[doc = " @param engine Engine to use."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_concat_primitive_desc_create(
concat_primitive_desc: *mut dnnl_primitive_desc_t,
dst_desc: *const dnnl_memory_desc_t,
n: ::libc::c_int,
concat_dimension: ::libc::c_int,
src_descs: *const dnnl_memory_desc_t,
attr: const_dnnl_primitive_attr_t,
engine: dnnl_engine_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates a primitive descriptor for an (out-of-place) sum primitive."]
#[doc = ""]
#[doc = " @param sum_primitive_desc Output primitive descriptor."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param n Number of source parameters."]
#[doc = " @param scales Vector of scales to multiply data in each source"]
#[doc = " memory by."]
#[doc = " @param src_descs Array of source memory descriptors having @p n elements."]
#[doc = " @param attr Primitive attributes to use (can be NULL)."]
#[doc = " @param engine Engine to use."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_sum_primitive_desc_create(
sum_primitive_desc: *mut dnnl_primitive_desc_t,
dst_desc: *const dnnl_memory_desc_t,
n: ::libc::c_int,
scales: *const f32,
src_descs: *const dnnl_memory_desc_t,
attr: const_dnnl_primitive_attr_t,
engine: dnnl_engine_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a binary primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptor @p dst_desc is allowed to be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Both memory descriptors must have the same number of dimensions."]
#[doc = " Element broadcasting is supported for memory descriptor @p src1_desc"]
#[doc = " and are applied to @ src1_desc dimensions that have size equal to 1."]
#[doc = ""]
#[doc = " @param binary_desc Output descriptor for a binary primitive."]
#[doc = " @param alg_kind Algorithm kind. Valid values are #dnnl_binary_add,"]
#[doc = " #dnnl_binary_mul, #dnnl_binary_max and #dnnl_binary_min."]
#[doc = " @param src0_desc Source 0 memory descriptor."]
#[doc = " @param src1_desc Source 1 memory descriptor."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_binary_desc_init(
binary_desc: *mut dnnl_binary_desc_t,
alg_kind: dnnl_alg_kind_t,
src0_desc: *const dnnl_memory_desc_t,
src1_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a convolution forward propagation primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p padding_l, and @p padding_r contain values for"]
#[doc = " spatial dimensions only and hence must have the same number of elements as"]
#[doc = " there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param conv_desc Output descriptor for a convolution primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind Convolution algorithm. Possible values are"]
#[doc = " #dnnl_convolution_direct, #dnnl_convolution_winograd,"]
#[doc = " #dnnl_convolution_auto."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param bias_desc Bias memory descriptor. Passing NULL, a zero memory"]
#[doc = " descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is assumed to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_convolution_forward_desc_init(
conv_desc: *mut dnnl_convolution_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a dilated convolution forward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p dilates, @p padding_l, and @p padding_r contain"]
#[doc = " values for spatial dimensions only and hence must have the same number of"]
#[doc = " elements as there are spatial dimensions. The order of values is the same"]
#[doc = " as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors),"]
#[doc = " and width."]
#[doc = ""]
#[doc = " @param conv_desc Output descriptor for a convolution primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind Convolution algorithm. Possible values are"]
#[doc = " #dnnl_convolution_direct, #dnnl_convolution_winograd,"]
#[doc = " #dnnl_convolution_auto."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param bias_desc Bias memory descriptor. Passing NULL, a zero memory"]
#[doc = " descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param dilates Array of dilations for spatial dimension. A zero value"]
#[doc = " means no dilation in the corresponding dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_dilated_convolution_forward_desc_init(
conv_desc: *mut dnnl_convolution_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
dilates: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a convolution backward propagation primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p padding_l, and @p padding_r contain values for"]
#[doc = " spatial dimensions only and hence must have the same number of elements as"]
#[doc = " there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param conv_desc Output descriptor for a convolution primitive."]
#[doc = " @param alg_kind Convolution algorithm. Possible values are"]
#[doc = " #dnnl_convolution_direct, #dnnl_convolution_winograd,"]
#[doc = " #dnnl_convolution_auto."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is assumed to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_convolution_backward_data_desc_init(
conv_desc: *mut dnnl_convolution_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a dilated convolution backward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p dilates, @p padding_l, and @p padding_r contain"]
#[doc = " values for spatial dimensions only and hence must have the same number of"]
#[doc = " elements as there are spatial dimensions. The order of values is the same"]
#[doc = " as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors),"]
#[doc = " and width."]
#[doc = ""]
#[doc = " @param conv_desc Output descriptor for a convolution primitive."]
#[doc = " @param alg_kind Convolution algorithm. Possible values are"]
#[doc = " #dnnl_convolution_direct, #dnnl_convolution_winograd,"]
#[doc = " #dnnl_convolution_auto."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param dilates Array of dilations for spatial dimension. A zero value"]
#[doc = " means no dilation in the corresponding dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_dilated_convolution_backward_data_desc_init(
conv_desc: *mut dnnl_convolution_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
dilates: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a convolution weights gradient primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p padding_l, and @p padding_r contain values for"]
#[doc = " spatial dimensions only and hence must have the same number of elements as"]
#[doc = " there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param conv_desc Output descriptor for a convolution primitive."]
#[doc = " @param alg_kind Convolution algorithm. Possible values are"]
#[doc = " #dnnl_convolution_direct, #dnnl_convolution_winograd,"]
#[doc = " #dnnl_convolution_auto."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param diff_weights_desc Diff weights memory descriptor."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor. Passing NULL, a zero"]
#[doc = " memory descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_convolution_backward_weights_desc_init(
conv_desc: *mut dnnl_convolution_desc_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
diff_weights_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a dilated convolution weights gradient"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p dilates, @p padding_l, and @p padding_r contain"]
#[doc = " values for spatial dimensions only and hence must have the same number of"]
#[doc = " elements as there are spatial dimensions. The order of values is the same"]
#[doc = " as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors),"]
#[doc = " and width."]
#[doc = ""]
#[doc = " @param conv_desc Output descriptor for a convolution primitive."]
#[doc = " @param alg_kind Convolution algorithm. Possible values are"]
#[doc = " #dnnl_convolution_direct, #dnnl_convolution_winograd,"]
#[doc = " #dnnl_convolution_auto."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param diff_weights_desc Diff weights memory descriptor."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor. Passing NULL, a zero"]
#[doc = " memory descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param dilates Array of dilations for spatial dimension. A zero value"]
#[doc = " means no dilation in the corresponding dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_dilated_convolution_backward_weights_desc_init(
conv_desc: *mut dnnl_convolution_desc_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
diff_weights_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
dilates: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a deconvolution forward propagation primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p padding_l, and @p padding_r contain values for"]
#[doc = " spatial dimensions only and hence must have the same number of elements as"]
#[doc = " there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param deconv_desc Output descriptor for a deconvolution primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind Deconvolution algorithm. Possible values are"]
#[doc = " #dnnl_deconvolution_direct, #dnnl_deconvolution_winograd."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param bias_desc Bias memory descriptor. Passing NULL, a zero memory"]
#[doc = " descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_deconvolution_forward_desc_init(
deconv_desc: *mut dnnl_deconvolution_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a dilated deconvolution forward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p dilates, @p padding_l, and @p padding_r contain"]
#[doc = " values for spatial dimensions only and hence must have the same number of"]
#[doc = " elements as there are spatial dimensions. The order of values is the same"]
#[doc = " as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors),"]
#[doc = " and width."]
#[doc = ""]
#[doc = " @param deconv_desc Output descriptor for a deconvolution primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind Deconvolution algorithm. Possible values are"]
#[doc = " #dnnl_deconvolution_direct, #dnnl_deconvolution_winograd."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param bias_desc Bias memory descriptor. Passing NULL, a zero memory"]
#[doc = " descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param dilates Array of dilations for spatial dimension. A zero value"]
#[doc = " means no dilation in the corresponding dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_dilated_deconvolution_forward_desc_init(
deconv_desc: *mut dnnl_deconvolution_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
dilates: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a deconvolution backward propagation primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p padding_l, and @p padding_r contain values for"]
#[doc = " spatial dimensions only and hence must have the same number of elements as"]
#[doc = " there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param deconv_desc Output descriptor for a deconvolution primitive."]
#[doc = " @param alg_kind Deconvolution algorithm. Possible values are"]
#[doc = " #dnnl_deconvolution_direct, #dnnl_deconvolution_winograd."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_deconvolution_backward_data_desc_init(
deconv_desc: *mut dnnl_deconvolution_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a dilated deconvolution backward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p dilates, @p padding_l, and @p padding_r contain"]
#[doc = " values for spatial dimensions only and hence must have the same number of"]
#[doc = " elements as there are spatial dimensions. The order of values is the same"]
#[doc = " as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors),"]
#[doc = " and width."]
#[doc = ""]
#[doc = " @param deconv_desc Output descriptor for a deconvolution primitive."]
#[doc = " @param alg_kind Deconvolution algorithm. Possible values are"]
#[doc = " #dnnl_deconvolution_direct, #dnnl_deconvolution_winograd."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param dilates Array of dilations for spatial dimension. A zero value"]
#[doc = " means no dilation in the corresponding dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_dilated_deconvolution_backward_data_desc_init(
deconv_desc: *mut dnnl_deconvolution_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
dilates: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a deconvolution weights gradient primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p padding_l, and @p padding_r contain values for"]
#[doc = " spatial dimensions only and hence must have the same number of elements as"]
#[doc = " there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param deconv_desc Output descriptor for a deconvolution primitive."]
#[doc = " @param alg_kind Deconvolution algorithm. Possible values are"]
#[doc = " #dnnl_deconvolution_direct, #dnnl_deconvolution_winograd."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param diff_weights_desc Diff weights memory descriptor."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor. Passing NULL, a zero"]
#[doc = " memory descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_deconvolution_backward_weights_desc_init(
deconv_desc: *mut dnnl_deconvolution_desc_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
diff_weights_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a dilated deconvolution weights gradient"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " Arrays @p strides, @p dilates, @p padding_l, and @p padding_r contain"]
#[doc = " values for spatial dimensions only and hence must have the same number of"]
#[doc = " elements as there are spatial dimensions. The order of values is the same"]
#[doc = " as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors),"]
#[doc = " and width."]
#[doc = ""]
#[doc = " @param deconv_desc Output descriptor for a deconvolution primitive."]
#[doc = " @param alg_kind Deconvolution algorithm. Possible values are"]
#[doc = " #dnnl_deconvolution_direct, #dnnl_deconvolution_winograd."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param diff_weights_desc Diff weights memory descriptor."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor. Passing NULL, a zero"]
#[doc = " memory descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param dilates Array of dilations for spatial dimension. A zero value"]
#[doc = " means no dilation in the corresponding dimension."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_dilated_deconvolution_backward_weights_desc_init(
deconv_desc: *mut dnnl_deconvolution_desc_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
diff_weights_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
dilates: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for shuffle forward propagation primitive."]
#[doc = ""]
#[doc = " @param shuffle_desc Output descriptor for a shuffle primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param axis The axis along which the data is shuffled."]
#[doc = " @param group_size Shuffle group size."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_shuffle_forward_desc_init(
shuffle_desc: *mut dnnl_shuffle_desc_t,
prop_kind: dnnl_prop_kind_t,
data_desc: *const dnnl_memory_desc_t,
axis: ::libc::c_int,
group_size: dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for shuffle backward propagation primitive."]
#[doc = ""]
#[doc = " @param shuffle_desc Output descriptor for a shuffle primitive."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptor."]
#[doc = " @param axis The axis along which the data is shuffled."]
#[doc = " @param group_size Shuffle group size."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_shuffle_backward_desc_init(
shuffle_desc: *mut dnnl_shuffle_desc_t,
diff_data_desc: *const dnnl_memory_desc_t,
axis: ::libc::c_int,
group_size: dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for eltwise forward propagation primitive."]
#[doc = ""]
#[doc = " @param eltwise_desc Output descriptor for an eltwise primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind Elementwise algorithm kind."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param alpha The alpha parameter for the elementwise operation. Specific"]
#[doc = " meaning depends on the algorithm."]
#[doc = " @param beta The beta parameter for the elementwise operation. Specific"]
#[doc = " meaning depends on the algorithm."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_eltwise_forward_desc_init(
eltwise_desc: *mut dnnl_eltwise_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
data_desc: *const dnnl_memory_desc_t,
alpha: f32,
beta: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for eltwise backward propagation primitive."]
#[doc = ""]
#[doc = " @param eltwise_desc Output descriptor for an eltwise primitive."]
#[doc = " @param alg_kind Elementwise algorithm kind."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptors."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param alpha The alpha parameter for the elementwise operation. Specific"]
#[doc = " meaning depends on the algorithm."]
#[doc = " @param beta The beta parameter for the elementwise operation. Specific"]
#[doc = " meaning depends on the algorithm."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_eltwise_backward_desc_init(
eltwise_desc: *mut dnnl_eltwise_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_data_desc: *const dnnl_memory_desc_t,
data_desc: *const dnnl_memory_desc_t,
alpha: f32,
beta: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for softmax forward propagation primitive."]
#[doc = ""]
#[doc = " @param softmax_desc Output descriptor for a softmax primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param softmax_axis Axis over which softmax is computed."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_softmax_forward_desc_init(
softmax_desc: *mut dnnl_softmax_desc_t,
prop_kind: dnnl_prop_kind_t,
data_desc: *const dnnl_memory_desc_t,
softmax_axis: ::libc::c_int,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for softmax backward propagation primitive."]
#[doc = ""]
#[doc = " @param softmax_desc Output descriptor for a softmax primitive."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptors."]
#[doc = " @param data_desc Destination memory descriptor."]
#[doc = " @param softmax_axis Axis over which softmax is computed."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_softmax_backward_desc_init(
softmax_desc: *mut dnnl_softmax_desc_t,
diff_data_desc: *const dnnl_memory_desc_t,
data_desc: *const dnnl_memory_desc_t,
softmax_axis: ::libc::c_int,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for logsoftmax forward propagation primitive."]
#[doc = ""]
#[doc = " @param logsoftmax_desc Output descriptor for a logsoftmax primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param logsoftmax_axis Axis over which logsoftmax is computed."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_logsoftmax_forward_desc_init(
logsoftmax_desc: *mut dnnl_logsoftmax_desc_t,
prop_kind: dnnl_prop_kind_t,
data_desc: *const dnnl_memory_desc_t,
logsoftmax_axis: ::libc::c_int,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for logsoftmax backward propagation primitive."]
#[doc = ""]
#[doc = " @param logsoftmax_desc Output descriptor for a logsoftmax primitive."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptors."]
#[doc = " @param data_desc Destination memory descriptor."]
#[doc = " @param logsoftmax_axis Axis over which softmax is computed."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_logsoftmax_backward_desc_init(
logsoftmax_desc: *mut dnnl_logsoftmax_desc_t,
diff_data_desc: *const dnnl_memory_desc_t,
data_desc: *const dnnl_memory_desc_t,
logsoftmax_axis: ::libc::c_int,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for pooling forward propagation primitive."]
#[doc = ""]
#[doc = " Arrays @p strides, @p kernel, @p padding_l, and @p padding_r contain values"]
#[doc = " for spatial dimensions only and hence must have the same number of elements"]
#[doc = " as there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param pool_desc Output descriptor for a pooling primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind Pooling algorithm kind: either #dnnl_pooling_max,"]
#[doc = " #dnnl_pooling_avg_include_padding, or #dnnl_pooling_avg (same as"]
#[doc = " #dnnl_pooling_avg_exclude_padding)."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param kernel Array of kernel spatial dimensions."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_pooling_forward_desc_init(
pool_desc: *mut dnnl_pooling_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
src_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
kernel: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for pooling backward propagation primitive."]
#[doc = ""]
#[doc = " Arrays @p strides, @p kernel, @p padding_l, and @p padding_r contain values"]
#[doc = " for spatial dimensions only and hence must have the same number of elements"]
#[doc = " as there are spatial dimensions. The order of values is the same as in the"]
#[doc = " tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width."]
#[doc = ""]
#[doc = " @param pool_desc Output descriptor for a pooling primitive."]
#[doc = " @param alg_kind Pooling algorithm kind: either #dnnl_pooling_max,"]
#[doc = " #dnnl_pooling_avg_include_padding, or #dnnl_pooling_avg (same as"]
#[doc = " #dnnl_pooling_avg_exclude_padding)."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param strides Array of strides for spatial dimension."]
#[doc = " @param kernel Array of kernel spatial dimensions."]
#[doc = " @param padding_l Array of padding values for low indices for each spatial"]
#[doc = " dimension `([[front,] top,] left)`."]
#[doc = " @param padding_r Array of padding values for high indices for each spatial"]
#[doc = " dimension `([[back,] bottom,] right)`. Can be NULL in which case"]
#[doc = " padding is considered to be symmetrical."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_pooling_backward_desc_init(
pool_desc: *mut dnnl_pooling_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_src_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
strides: *mut dnnl_dim_t,
kernel: *mut dnnl_dim_t,
padding_l: *mut dnnl_dim_t,
padding_r: *mut dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for LRN forward propagation primitive."]
#[doc = ""]
#[doc = " @param lrn_desc Output descriptor for a LRN primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind LRN algorithm kind: either #dnnl_lrn_across_channels or"]
#[doc = " #dnnl_lrn_within_channel."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param local_size Regularization local size."]
#[doc = " @param alpha The alpha regularization parameter."]
#[doc = " @param beta The beta regularization parameter."]
#[doc = " @param k The k regularization parameter."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lrn_forward_desc_init(
lrn_desc: *mut dnnl_lrn_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
data_desc: *const dnnl_memory_desc_t,
local_size: dnnl_dim_t,
alpha: f32,
beta: f32,
k: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for LRN backward propagation primitive."]
#[doc = ""]
#[doc = " @param lrn_desc Output descriptor for a LRN primitive."]
#[doc = " @param alg_kind LRN algorithm kind: either #dnnl_lrn_across_channels or"]
#[doc = " #dnnl_lrn_within_channel."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptor."]
#[doc = " @param data_desc Source memory descriptor."]
#[doc = " @param local_size Regularization local size."]
#[doc = " @param alpha The alpha regularization parameter."]
#[doc = " @param beta The beta regularization parameter."]
#[doc = " @param k The k regularization parameter."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lrn_backward_desc_init(
lrn_desc: *mut dnnl_lrn_desc_t,
alg_kind: dnnl_alg_kind_t,
diff_data_desc: *const dnnl_memory_desc_t,
data_desc: *const dnnl_memory_desc_t,
local_size: dnnl_dim_t,
alpha: f32,
beta: f32,
k: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a batch normalization forward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " In-place operation is supported: the dst can refer to the same memory"]
#[doc = " as the src."]
#[doc = ""]
#[doc = " @param bnrm_desc Output descriptor for batch normalization primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param epsilon Batch normalization epsilon parameter."]
#[doc = " @param flags Batch normalization flags (@ref dnnl_normalization_flags_t)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_batch_normalization_forward_desc_init(
bnrm_desc: *mut dnnl_batch_normalization_desc_t,
prop_kind: dnnl_prop_kind_t,
data_desc: *const dnnl_memory_desc_t,
epsilon: f32,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a batch normalization backward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " In-place operation is supported: the diff_dst can refer to the same"]
#[doc = " memory as the diff_src."]
#[doc = ""]
#[doc = " @param bnrm_desc Output descriptor for batch normalization primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_backward_data and #dnnl_backward (diffs for all parameters are"]
#[doc = " computed in this case)."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptor."]
#[doc = " @param data_desc Source memory descriptor."]
#[doc = " @param epsilon Batch normalization epsilon parameter."]
#[doc = " @param flags Batch normalization flags (@ref dnnl_normalization_flags_t)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_batch_normalization_backward_desc_init(
bnrm_desc: *mut dnnl_batch_normalization_desc_t,
prop_kind: dnnl_prop_kind_t,
diff_data_desc: *const dnnl_memory_desc_t,
data_desc: *const dnnl_memory_desc_t,
epsilon: f32,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for layer normalization forward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " In-place operation is supported: the dst can refer to the same memory"]
#[doc = " as the src."]
#[doc = ""]
#[doc = " @param lnrm_desc Output descriptor for layer normalization primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param data_desc Source and destination memory descriptor."]
#[doc = " @param stat_desc Memory descriptor for mean and variance. If this"]
#[doc = " parameter is NULL, a zero memory descriptor, or a memory descriptor"]
#[doc = " with format_kind set to #dnnl_format_kind_undef, then the memory"]
#[doc = " descriptor for stats is derived from @p data_desc by removing the last"]
#[doc = " dimension."]
#[doc = " @param epsilon Layer normalization epsilon parameter."]
#[doc = " @param flags Layer normalization flags (@ref dnnl_normalization_flags_t)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_layer_normalization_forward_desc_init(
lnrm_desc: *mut dnnl_layer_normalization_desc_t,
prop_kind: dnnl_prop_kind_t,
data_desc: *const dnnl_memory_desc_t,
stat_desc: *const dnnl_memory_desc_t,
epsilon: f32,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a layer normalization backward propagation"]
#[doc = " primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " In-place operation is supported: the diff_dst can refer to the same"]
#[doc = " memory as the diff_src."]
#[doc = ""]
#[doc = " @param lnrm_desc Output descriptor for layer normalization primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_backward_data and #dnnl_backward (diffs for all parameters are"]
#[doc = " computed in this case)."]
#[doc = " @param diff_data_desc Diff source and diff destination memory descriptor."]
#[doc = " @param data_desc Source memory descriptor."]
#[doc = " @param stat_desc Memory descriptor for mean and variance. If this"]
#[doc = " parameter is NULL, a zero memory descriptor, or a memory descriptor"]
#[doc = " with format_kind set to #dnnl_format_kind_undef, then the memory"]
#[doc = " descriptor for stats is derived from @p data_desc by removing the last"]
#[doc = " dimension."]
#[doc = " @param epsilon Layer normalization epsilon parameter."]
#[doc = " @param flags Layer normalization flags (@ref dnnl_normalization_flags_t)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_layer_normalization_backward_desc_init(
lnrm_desc: *mut dnnl_layer_normalization_desc_t,
prop_kind: dnnl_prop_kind_t,
diff_data_desc: *const dnnl_memory_desc_t,
data_desc: *const dnnl_memory_desc_t,
stat_desc: *const dnnl_memory_desc_t,
epsilon: f32,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes descriptor for inner product forward propagation."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param ip_desc Output descriptor for inner product primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param bias_desc Bias memory descriptor. Passing NULL, a zero memory"]
#[doc = " descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_inner_product_forward_desc_init(
ip_desc: *mut dnnl_inner_product_desc_t,
prop_kind: dnnl_prop_kind_t,
src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes descriptor for inner product backward propagation."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param ip_desc Output descriptor for inner product primitive."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param weights_desc Weights memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_inner_product_backward_data_desc_init(
ip_desc: *mut dnnl_inner_product_desc_t,
diff_src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes descriptor for inner product weights gradient primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param ip_desc Output descriptor for inner product primitive."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param diff_weights_desc Diff weights memory descriptor."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor. Passing NULL, a zero"]
#[doc = " memory descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_inner_product_backward_weights_desc_init(
ip_desc: *mut dnnl_inner_product_desc_t,
src_desc: *const dnnl_memory_desc_t,
diff_weights_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Set quantization scale and shift parameters for RNN data tensors."]
#[doc = ""]
#[doc = " For performance reasons, the low-precision configuration of the RNN"]
#[doc = " primitives expects input activations to have the unsigned 8-bit integer"]
#[doc = " data type. The scale and shift parameters are used to quantize"]
#[doc = " floating-point data to unsigned integer and must be passed to the RNN"]
#[doc = " primitive using attributes."]
#[doc = ""]
#[doc = " The quantization formula is `scale * (data + shift)`."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Quantization scale and shift are common for src_layer, src_iter,"]
#[doc = " dst_iter, and dst_layer."]
#[doc = ""]
#[doc = " Example usage:"]
#[doc = " @code"]
#[doc = " // RNN parameters"]
#[doc = " int l = 2, t = 2, mb = 32, sic = 32, slc = 32, dic = 32, dlc = 32;"]
#[doc = " // Activations quantization parameters"]
#[doc = " float scale = ..., shift = ..;"]
#[doc = ""]
#[doc = " dnnl_primitive_attr_t rnn_attr;"]
#[doc = " // Create default attributes"]
#[doc = " dnnl_primitive_attr_create(&rnn_attr);"]
#[doc = ""]
#[doc = " // Set scale and shift for int8 quantization of activation"]
#[doc = " dnnl_primitive_attr_set_rnn_data_qparams(rnn_attr, scale, shift);"]
#[doc = ""]
#[doc = " // Create and configure rnn op_desc"]
#[doc = " dnnl_rnn_desc_t rnn_d;"]
#[doc = " dnnl_primitive_desc_t rnn_pd;"]
#[doc = " dnnl_primitive_desc_create(&rnn_pd, &rnn_d, attr, engine, NULL);"]
#[doc = " @endcode"]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param scale The value to scale the data by."]
#[doc = " @param shift The value to shift the data by."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_rnn_data_qparams(
attr: dnnl_primitive_attr_t,
scale: f32,
shift: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets quantization scaling factors for RNN weights tensors. The"]
#[doc = " low-precision configuration of the RNN primitives expects input weights to"]
#[doc = " use the signed 8-bit integer data type. The scaling factors are used to"]
#[doc = " quantize floating-point data to signed integer and must be passed to RNN"]
#[doc = " primitives using attributes."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The dimension order is always native and does not depend on the actual"]
#[doc = " layout used. For example, five-dimensional weights always have (l, d,"]
#[doc = " i, g, o) logical dimension ordering."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Quantization scales are common for weights_layer and weights_iteration"]
#[doc = ""]
#[doc = " @param attr Primitive attributes."]
#[doc = " @param count Number of elements in the @p scales array."]
#[doc = " @param mask Scaling factors correspondence mask that defines the"]
#[doc = " correspondence between the output tensor dimensions and the @p"]
#[doc = " scales vector. The set i-th bit indicates that a dedicated scaling"]
#[doc = " factor should be used for each index along that dimension. Set the"]
#[doc = " mask to 0 to use a common scaling factor for the whole output"]
#[doc = " tensor."]
#[doc = " @param scales Array of output scaling factors that must contain @p count"]
#[doc = " values and the following equality must hold:"]
#[doc = " \\f[count = \\prod\\limits_{d \\in mask} weights.dims[d].\\f]"]
#[doc = " Violations can only be detected when the attributes are used to create"]
#[doc = " a primitive descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_primitive_attr_set_rnn_weights_qparams(
attr: dnnl_primitive_attr_t,
count: dnnl_dim_t,
mask: ::libc::c_int,
scales: *const f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for vanilla RNN forward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc,"]
#[doc = " - @p bias_desc,"]
#[doc = " - @p dst_iter_desc."]
#[doc = ""]
#[doc = " This would then indicate that the RNN forward propagation primitive should"]
#[doc = " not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for vanilla RNN primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param activation Activation kind. Possible values are #dnnl_eltwise_relu,"]
#[doc = " #dnnl_eltwise_tanh or #dnnl_eltwise_logistic."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param flags Unused."]
#[doc = " @param alpha Negative slope if activation is #dnnl_eltwise_relu."]
#[doc = " @param beta Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_vanilla_rnn_forward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
activation: dnnl_alg_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
alpha: f32,
beta: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for vanilla RNN backward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p diff_src_iter_desc,"]
#[doc = " - @p bias_desc together with @p diff_bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p diff_dst_iter_desc."]
#[doc = ""]
#[doc = " This would then indicate that the RNN backward propagation primitive should"]
#[doc = " not use the respective data and should use zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for vanilla RNN primitive."]
#[doc = " @param prop_kind Propagation kind. Must be #dnnl_backward."]
#[doc = " @param activation Activation kind. Possible values are #dnnl_eltwise_relu,"]
#[doc = " #dnnl_eltwise_tanh or #dnnl_eltwise_logistic."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param diff_src_layer_desc Memory descriptor for the diff of input vector."]
#[doc = " @param diff_src_iter_desc Memory descriptor for the diff of input recurrent"]
#[doc = " hidden state vector."]
#[doc = " @param diff_weights_layer_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the layer input."]
#[doc = " @param diff_weights_iter_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the recurrent input."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor."]
#[doc = " @param diff_dst_layer_desc Memory descriptor for the diff of output"]
#[doc = " vector."]
#[doc = " @param diff_dst_iter_desc Memory descriptor for the diff of output"]
#[doc = " recurrent hidden state vector."]
#[doc = " @param flags Unused."]
#[doc = " @param alpha Negative slope if activation is #dnnl_eltwise_relu."]
#[doc = " @param beta Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_vanilla_rnn_backward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
activation: dnnl_alg_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
diff_src_layer_desc: *const dnnl_memory_desc_t,
diff_src_iter_desc: *const dnnl_memory_desc_t,
diff_weights_layer_desc: *const dnnl_memory_desc_t,
diff_weights_iter_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_layer_desc: *const dnnl_memory_desc_t,
diff_dst_iter_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
alpha: f32,
beta: f32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for LSTM forward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p src_iter_c_desc,"]
#[doc = " - @p bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p dst_iter_c_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LSTM forward propagation primitive should"]
#[doc = " not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @sa dnnl_lstm_forward_desc_init_v2 to initialize forward LSTM with and"]
#[doc = " without peephole"]
#[doc = " @sa dnnl_lstm_forward_desc_init_v3 to initialize forward LSTM with and"]
#[doc = " without peephole / recurrent projection layer"]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LSTM primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param src_iter_c_desc Memory descriptor for the input recurrent cell"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param dst_iter_c_desc Memory descriptor for the output recurrent cell"]
#[doc = " state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lstm_forward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
src_iter_c_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
dst_iter_c_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for an LSTM (with or without peephole) forward"]
#[doc = " propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p src_iter_c_desc,"]
#[doc = " - @p weights_peephole_desc,"]
#[doc = " - @p bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p dst_iter_c_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LSTM forward propagation primitive should"]
#[doc = " not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with #dnnl_format_tag_any or"]
#[doc = " with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @sa dnnl_lstm_forward_desc_init_v3 to initialize forward LSTM with and"]
#[doc = " without peephole / recurrent projection layer"]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LSTM primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param src_iter_c_desc Memory descriptor for the input recurrent cell"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param weights_peephole_desc Memory descriptor for the weights applied to"]
#[doc = " the cell states (according to the Peephole LSTM formula)."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param dst_iter_c_desc Memory descriptor for the output recurrent cell"]
#[doc = " state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lstm_forward_desc_init_v2(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
src_iter_c_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
weights_peephole_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
dst_iter_c_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for an LSTM (with or without peephole and with"]
#[doc = " or without recurrent projection layer) forward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p src_iter_c_desc,"]
#[doc = " - @p weights_peephole_desc,"]
#[doc = " - @p bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p dst_iter_c_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LSTM forward propagation primitive should"]
#[doc = " not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " The @p weights_projection_desc could either be @c NULL or point to a zero"]
#[doc = " memory descriptor. This would then indicate that the LSTM doesn't have"]
#[doc = " recurrent projection layer."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with #dnnl_format_tag_any or"]
#[doc = " with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LSTM primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param src_iter_c_desc Memory descriptor for the input recurrent cell"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param weights_peephole_desc Memory descriptor for the weights applied to"]
#[doc = " the cell states (according to the Peephole LSTM formula)."]
#[doc = " @param weights_projection_desc Memory descriptor for the weights applied to"]
#[doc = " the hidden states to get the recurrent projection (according to the"]
#[doc = " Projection LSTM formula)."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param dst_iter_c_desc Memory descriptor for the output recurrent cell"]
#[doc = " state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lstm_forward_desc_init_v3(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
src_iter_c_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
weights_peephole_desc: *const dnnl_memory_desc_t,
weights_projection_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
dst_iter_c_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for an LSTM backward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p src_iter_c_desc, diff_src_iter_desc,"]
#[doc = " and @p diff_src_iter_c_desc,"]
#[doc = " - @p bias_desc together with @p diff_bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p dst_iter_c_desc, diff_dst_iter_desc,"]
#[doc = " and @p diff_dst_iter_c_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LSTM backward propagation primitive"]
#[doc = " should not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @sa dnnl_lstm_backward_desc_init_v2 to initialize backward LSTM with and"]
#[doc = " without peephole"]
#[doc = " @sa dnnl_lstm_backward_desc_init_v3 to initialize backward LSTM with and"]
#[doc = " without peephole / recurrent projection layer"]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LSTM primitive."]
#[doc = " @param prop_kind Propagation kind. Must be #dnnl_backward."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param src_iter_c_desc Memory descriptor for the input recurrent cell"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param dst_iter_c_desc Memory descriptor for the output recurrent cell"]
#[doc = " state vector."]
#[doc = " @param diff_src_layer_desc Memory descriptor for the diff of input vector."]
#[doc = " @param diff_src_iter_desc Memory descriptor for the diff of input recurrent"]
#[doc = " hidden state vector."]
#[doc = " @param diff_src_iter_c_desc Memory descriptor for the diff of input"]
#[doc = " recurrent cell state vector."]
#[doc = " @param diff_weights_layer_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the layer input."]
#[doc = " @param diff_weights_iter_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the recurrent input."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor."]
#[doc = " @param diff_dst_layer_desc Memory descriptor for the diff of output"]
#[doc = " vector."]
#[doc = " @param diff_dst_iter_desc Memory descriptor for the diff of output"]
#[doc = " recurrent hidden state vector."]
#[doc = " @param diff_dst_iter_c_desc Memory descriptor for the diff of output"]
#[doc = " recurrent cell state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lstm_backward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
src_iter_c_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
dst_iter_c_desc: *const dnnl_memory_desc_t,
diff_src_layer_desc: *const dnnl_memory_desc_t,
diff_src_iter_desc: *const dnnl_memory_desc_t,
diff_src_iter_c_desc: *const dnnl_memory_desc_t,
diff_weights_layer_desc: *const dnnl_memory_desc_t,
diff_weights_iter_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_layer_desc: *const dnnl_memory_desc_t,
diff_dst_iter_desc: *const dnnl_memory_desc_t,
diff_dst_iter_c_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for an LSTM (with or without peephole) backward"]
#[doc = " propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p src_iter_c_desc, diff_src_iter_desc,"]
#[doc = " and @p diff_src_iter_c_desc,"]
#[doc = " - @p weights_peephole_desc together with @p diff_weights_peephole_desc,"]
#[doc = " - @p bias_desc together with @p diff_bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p dst_iter_c_desc, diff_dst_iter_desc,"]
#[doc = " and @p diff_dst_iter_c_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LSTM backward propagation primitive"]
#[doc = " should not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with #dnnl_format_tag_any or"]
#[doc = " with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @sa dnnl_lstm_backward_desc_init_v3 to initialize backward LSTM with and"]
#[doc = " without peephole / recurrent projection layer"]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LSTM primitive."]
#[doc = " @param prop_kind Propagation kind. Must be #dnnl_backward."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param src_iter_c_desc Memory descriptor for the input recurrent cell"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param weights_peephole_desc Memory descriptor for the weights applied to"]
#[doc = " the cell states (according to the Peephole LSTM formula)."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param dst_iter_c_desc Memory descriptor for the output recurrent cell"]
#[doc = " state vector."]
#[doc = " @param diff_src_layer_desc Memory descriptor for the diff of input vector."]
#[doc = " @param diff_src_iter_desc Memory descriptor for the diff of input recurrent"]
#[doc = " hidden state vector."]
#[doc = " @param diff_src_iter_c_desc Memory descriptor for the diff of input"]
#[doc = " recurrent cell state vector."]
#[doc = " @param diff_weights_layer_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the layer input."]
#[doc = " @param diff_weights_iter_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the recurrent input."]
#[doc = " @param diff_weights_peephole_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the cell states (according to the Peephole LSTM formula)."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor."]
#[doc = " @param diff_dst_layer_desc Memory descriptor for the diff of output"]
#[doc = " vector."]
#[doc = " @param diff_dst_iter_desc Memory descriptor for the diff of output"]
#[doc = " recurrent hidden state vector."]
#[doc = " @param diff_dst_iter_c_desc Memory descriptor for the diff of output"]
#[doc = " recurrent cell state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lstm_backward_desc_init_v2(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
src_iter_c_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
weights_peephole_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
dst_iter_c_desc: *const dnnl_memory_desc_t,
diff_src_layer_desc: *const dnnl_memory_desc_t,
diff_src_iter_desc: *const dnnl_memory_desc_t,
diff_src_iter_c_desc: *const dnnl_memory_desc_t,
diff_weights_layer_desc: *const dnnl_memory_desc_t,
diff_weights_iter_desc: *const dnnl_memory_desc_t,
diff_weights_peephole_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_layer_desc: *const dnnl_memory_desc_t,
diff_dst_iter_desc: *const dnnl_memory_desc_t,
diff_dst_iter_c_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for an LSTM (with or without peephole and with or"]
#[doc = " with out recurrent projection layer) backward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p src_iter_c_desc, diff_src_iter_desc,"]
#[doc = " and @p diff_src_iter_c_desc,"]
#[doc = " - @p weights_peephole_desc together with @p diff_weights_peephole_desc,"]
#[doc = " - @p bias_desc together with @p diff_bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p dst_iter_c_desc, diff_dst_iter_desc,"]
#[doc = " and @p diff_dst_iter_c_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LSTM backward propagation primitive"]
#[doc = " should not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " The @p weights_projection_desc together with @p"]
#[doc = " diff_weights_projection_desc could either be @c NULL or point to a zero"]
#[doc = " memory descriptor. This would then indicate that the LSTM doesn't have"]
#[doc = " recurrent projection layer."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with #dnnl_format_tag_any or"]
#[doc = " with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LSTM primitive."]
#[doc = " @param prop_kind Propagation kind. Must be #dnnl_backward."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param src_iter_c_desc Memory descriptor for the input recurrent cell"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param weights_peephole_desc Memory descriptor for the weights applied to"]
#[doc = " the cell states (according to the Peephole LSTM formula)."]
#[doc = " @param weights_projection_desc Memory descriptor for the weights applied to"]
#[doc = " the hidden states to get the recurrent projection (according to the"]
#[doc = " Projection LSTM formula)."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param dst_iter_c_desc Memory descriptor for the output recurrent cell"]
#[doc = " state vector."]
#[doc = " @param diff_src_layer_desc Memory descriptor for the diff of input vector."]
#[doc = " @param diff_src_iter_desc Memory descriptor for the diff of input recurrent"]
#[doc = " hidden state vector."]
#[doc = " @param diff_src_iter_c_desc Memory descriptor for the diff of input"]
#[doc = " recurrent cell state vector."]
#[doc = " @param diff_weights_layer_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the layer input."]
#[doc = " @param diff_weights_iter_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the recurrent input."]
#[doc = " @param diff_weights_peephole_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the cell states (according to the Peephole LSTM formula)."]
#[doc = " @param diff_weights_projection_desc Memory descriptor for the diff of"]
#[doc = " weights applied to the hidden states to get the recurrent projection"]
#[doc = " (according to the Projection LSTM formula)."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor."]
#[doc = " @param diff_dst_layer_desc Memory descriptor for the diff of output"]
#[doc = " vector."]
#[doc = " @param diff_dst_iter_desc Memory descriptor for the diff of output"]
#[doc = " recurrent hidden state vector."]
#[doc = " @param diff_dst_iter_c_desc Memory descriptor for the diff of output"]
#[doc = " recurrent cell state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lstm_backward_desc_init_v3(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
src_iter_c_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
weights_peephole_desc: *const dnnl_memory_desc_t,
weights_projection_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
dst_iter_c_desc: *const dnnl_memory_desc_t,
diff_src_layer_desc: *const dnnl_memory_desc_t,
diff_src_iter_desc: *const dnnl_memory_desc_t,
diff_src_iter_c_desc: *const dnnl_memory_desc_t,
diff_weights_layer_desc: *const dnnl_memory_desc_t,
diff_weights_iter_desc: *const dnnl_memory_desc_t,
diff_weights_peephole_desc: *const dnnl_memory_desc_t,
diff_weights_projection_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_layer_desc: *const dnnl_memory_desc_t,
diff_dst_iter_desc: *const dnnl_memory_desc_t,
diff_dst_iter_c_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for GRU forward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc,"]
#[doc = " - @p bias_desc,"]
#[doc = " - @p dst_iter_desc."]
#[doc = ""]
#[doc = " This would then indicate that the GRU forward propagation primitive should"]
#[doc = " not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for GRU primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_gru_forward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for GRU backward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p diff_src_iter_desc,"]
#[doc = " - @p bias_desc together with @p diff_bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p diff_dst_iter_desc."]
#[doc = ""]
#[doc = " This would then indicate that the GRU backward propagation primitive"]
#[doc = " should not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for GRU primitive."]
#[doc = " @param prop_kind Propagation kind. Must be #dnnl_backward."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param diff_src_layer_desc Memory descriptor for the diff of input vector."]
#[doc = " @param diff_src_iter_desc Memory descriptor for the diff of input recurrent"]
#[doc = " hidden state vector."]
#[doc = " @param diff_weights_layer_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the layer input."]
#[doc = " @param diff_weights_iter_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the recurrent input."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor."]
#[doc = " @param diff_dst_layer_desc Memory descriptor for the diff of output"]
#[doc = " vector."]
#[doc = " @param diff_dst_iter_desc Memory descriptor for the diff of output"]
#[doc = " recurrent hidden state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_gru_backward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
diff_src_layer_desc: *const dnnl_memory_desc_t,
diff_src_iter_desc: *const dnnl_memory_desc_t,
diff_weights_layer_desc: *const dnnl_memory_desc_t,
diff_weights_iter_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_layer_desc: *const dnnl_memory_desc_t,
diff_dst_iter_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for LBR GRU forward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc,"]
#[doc = " - @p bias_desc,"]
#[doc = " - @p dst_iter_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LBR GRU forward propagation primitive"]
#[doc = " should not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LBR GRU primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lbr_gru_forward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for LBR GRU backward propagation primitive."]
#[doc = ""]
#[doc = " The following arguments may either be @c NULL or point to a zero memory"]
#[doc = " descriptor:"]
#[doc = " - @p src_iter_desc together with @p diff_src_iter_desc,"]
#[doc = " - @p bias_desc together with @p diff_bias_desc,"]
#[doc = " - @p dst_iter_desc together with @p diff_dst_iter_desc."]
#[doc = ""]
#[doc = " This would then indicate that the LBR GRU backward propagation primitive"]
#[doc = " should not use them and should default to zero values instead."]
#[doc = ""]
#[doc = " @note"]
#[doc = " All memory descriptors can be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = " @param rnn_desc Output descriptor for LBR GRU primitive."]
#[doc = " @param prop_kind Propagation kind. Must be #dnnl_backward."]
#[doc = " @param direction RNN direction. See @ref dnnl_rnn_direction_t for more"]
#[doc = " info."]
#[doc = " @param src_layer_desc Memory descriptor for the input vector."]
#[doc = " @param src_iter_desc Memory descriptor for the input recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param weights_layer_desc Memory descriptor for the weights applied to the"]
#[doc = " layer input."]
#[doc = " @param weights_iter_desc Memory descriptor for the weights applied to the"]
#[doc = " recurrent input."]
#[doc = " @param bias_desc Bias memory descriptor."]
#[doc = " @param dst_layer_desc Memory descriptor for the output vector."]
#[doc = " @param dst_iter_desc Memory descriptor for the output recurrent hidden"]
#[doc = " state vector."]
#[doc = " @param diff_src_layer_desc Memory descriptor for the diff of input vector."]
#[doc = " @param diff_src_iter_desc Memory descriptor for the diff of input recurrent"]
#[doc = " hidden state vector."]
#[doc = " @param diff_weights_layer_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the layer input."]
#[doc = " @param diff_weights_iter_desc Memory descriptor for the diff of weights"]
#[doc = " applied to the recurrent input."]
#[doc = " @param diff_bias_desc Diff bias memory descriptor."]
#[doc = " @param diff_dst_layer_desc Memory descriptor for the diff of output"]
#[doc = " vector."]
#[doc = " @param diff_dst_iter_desc Memory descriptor for the diff of output"]
#[doc = " recurrent hidden state vector."]
#[doc = " @param flags Unused."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_lbr_gru_backward_desc_init(
rnn_desc: *mut dnnl_rnn_desc_t,
prop_kind: dnnl_prop_kind_t,
direction: dnnl_rnn_direction_t,
src_layer_desc: *const dnnl_memory_desc_t,
src_iter_desc: *const dnnl_memory_desc_t,
weights_layer_desc: *const dnnl_memory_desc_t,
weights_iter_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_layer_desc: *const dnnl_memory_desc_t,
dst_iter_desc: *const dnnl_memory_desc_t,
diff_src_layer_desc: *const dnnl_memory_desc_t,
diff_src_iter_desc: *const dnnl_memory_desc_t,
diff_weights_layer_desc: *const dnnl_memory_desc_t,
diff_weights_iter_desc: *const dnnl_memory_desc_t,
diff_bias_desc: *const dnnl_memory_desc_t,
diff_dst_layer_desc: *const dnnl_memory_desc_t,
diff_dst_iter_desc: *const dnnl_memory_desc_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a matrix multiplication descriptor."]
#[doc = ""]
#[doc = " @param matmul_desc Output descriptor for matmul primitive."]
#[doc = " @param src_desc Source memory descriptor (matrix A)"]
#[doc = " @param weights_desc Weights memory descriptor (matrix B)"]
#[doc = " @param bias_desc Bias memory descriptor. Passing NULL, a zero memory"]
#[doc = " descriptor, or a memory descriptor with format_kind set to"]
#[doc = " #dnnl_format_kind_undef disables the bias term."]
#[doc = " @param dst_desc Destination memory descriptor (matrix C)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_matmul_desc_init(
matmul_desc: *mut dnnl_matmul_desc_t,
src_desc: *const dnnl_memory_desc_t,
weights_desc: *const dnnl_memory_desc_t,
bias_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for a resampling forward propagation primitive."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Destination memory descriptor is allowed to be initialized with"]
#[doc = " #dnnl_format_tag_any or with format_kind set to #dnnl_format_kind_any."]
#[doc = ""]
#[doc = ""]
#[doc = " @param resampling_desc Output descriptor for a resampling primitive."]
#[doc = " @param prop_kind Propagation kind. Possible values are"]
#[doc = " #dnnl_forward_training and #dnnl_forward_inference."]
#[doc = " @param alg_kind resampling algorithm kind: either #dnnl_resampling_nearest,"]
#[doc = " or #dnnl_resampling_linear."]
#[doc = " @param factors Array of scaling factors for spatial dimension."]
#[doc = " @param src_desc Source memory descriptor."]
#[doc = " @param dst_desc Destination memory descriptor."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_resampling_forward_desc_init(
resampling_desc: *mut dnnl_resampling_desc_t,
prop_kind: dnnl_prop_kind_t,
alg_kind: dnnl_alg_kind_t,
factors: *const f32,
src_desc: *const dnnl_memory_desc_t,
dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Initializes a descriptor for resampling backward propagation primitive."]
#[doc = ""]
#[doc = " @param resampling_desc Output descriptor for a resampling primitive."]
#[doc = " @param alg_kind resamplinging algorithm kind: either"]
#[doc = " #dnnl_resampling_nearest, or #dnnl_resampling_linear."]
#[doc = " @param diff_src_desc Diff source memory descriptor."]
#[doc = " @param diff_dst_desc Diff destination memory descriptor."]
#[doc = " @param factors Array of scaling factors for spatial dimension."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
#[doc = ""]
pub fn dnnl_resampling_backward_desc_init(
resampling_desc: *mut dnnl_resampling_desc_t,
alg_kind: dnnl_alg_kind_t,
factors: *const f32,
diff_src_desc: *const dnnl_memory_desc_t,
diff_dst_desc: *const dnnl_memory_desc_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the number of engines of a particular kind."]
#[doc = ""]
#[doc = " @param kind Kind of engines to count."]
#[doc = " @returns Count of the engines."]
pub fn dnnl_engine_get_count(kind: dnnl_engine_kind_t) -> usize;
}
extern "C" {
#[doc = " Creates an engine."]
#[doc = ""]
#[doc = " @param engine Output engine."]
#[doc = " @param kind Engine kind."]
#[doc = " @param index Engine index that should be between 0 and the count of"]
#[doc = " engines of the requested kind."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_engine_create(
engine: *mut dnnl_engine_t,
kind: dnnl_engine_kind_t,
index: usize,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the kind of an engine."]
#[doc = ""]
#[doc = " @param engine Engine to query."]
#[doc = " @param kind Output engine kind."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_engine_get_kind(
engine: dnnl_engine_t,
kind: *mut dnnl_engine_kind_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys an engine."]
#[doc = ""]
#[doc = " @param engine Engine to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_engine_destroy(engine: dnnl_engine_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates execution stream attributes for a stream that runs on an engine of"]
#[doc = " a particular kind."]
#[doc = ""]
#[doc = " @param attr Output execution stream attributes."]
#[doc = " @param kind Target engine kind."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_stream_attr_create(
attr: *mut dnnl_stream_attr_t,
kind: dnnl_engine_kind_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys execution stream attributes."]
#[doc = ""]
#[doc = " @param attr Execution stream attributes to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_stream_attr_destroy(attr: dnnl_stream_attr_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates an execution stream."]
#[doc = ""]
#[doc = " @param stream Output execution stream."]
#[doc = " @param engine Engine to create the execution stream on."]
#[doc = " @param flags Stream behavior flags (@sa dnnl_stream_flags_t)."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_stream_create(
stream: *mut dnnl_stream_t,
engine: dnnl_engine_t,
flags: ::libc::c_uint,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Creates an execution stream."]
#[doc = ""]
#[doc = " @param stream Output execution stream."]
#[doc = " @param engine Engine to create the execution stream on."]
#[doc = " @param flags Stream behavior flags (@sa dnnl_stream_flags_t)."]
#[doc = " @param attr Stream attributes."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_stream_create_v2(
stream: *mut dnnl_stream_t,
engine: dnnl_engine_t,
flags: ::libc::c_uint,
attr: const_dnnl_stream_attr_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Waits for all primitives in the execution stream to finish computations."]
#[doc = ""]
#[doc = " @param stream Execution stream."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_stream_wait(stream: dnnl_stream_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Destroys an execution stream."]
#[doc = ""]
#[doc = " @param stream Execution stream to destroy."]
#[doc = " @returns #dnnl_success on success and a status describing the error"]
#[doc = " otherwise."]
pub fn dnnl_stream_destroy(stream: dnnl_stream_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns the number of primitives that can be held in the primitive cache"]
#[doc = " at the same time."]
#[doc = ""]
#[doc = " @param capacity Primitive cache capacity to query. Concurrently"]
#[doc = " accessing @p capacity is safe."]
#[doc = " @returns #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the"]
#[doc = " @p capacity value is invalid, and #dnnl_success/#dnnl::status::success on"]
#[doc = " success."]
pub fn dnnl_get_primitive_cache_capacity(capacity: *mut ::libc::c_int) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets a number of primitives that can be held in the primitive cache"]
#[doc = " at a time."]
#[doc = ""]
#[doc = " @param capacity Primitive cache capacity to set. If a new @p capacity is"]
#[doc = " less than a number of primitives that the primitive cache already has"]
#[doc = " then the excess entries will be evicted. Setting the @p capacity to 0"]
#[doc = " clears the primitive cache and disables it. Concurrently modifying"]
#[doc = " @p capacity is safe."]
#[doc = " @returns #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the"]
#[doc = " @p capacity value is invalid, and #dnnl_success/#dnnl::status::success on"]
#[doc = " success."]
pub fn dnnl_set_primitive_cache_capacity(capacity: ::libc::c_int) -> dnnl_status_t;
}
extern "C" {
#[doc = " Configures verbose output to stdout."]
#[doc = ""]
#[doc = " @note"]
#[doc = " Enabling verbose output affects performance."]
#[doc = " This setting overrides the DNNL_VERBOSE environment variable."]
#[doc = ""]
#[doc = " @param level Verbosity level:"]
#[doc = " - 0: no verbose output (default),"]
#[doc = " - 1: primitive information at execution,"]
#[doc = " - 2: primitive information at creation and execution."]
#[doc = " @returns #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the"]
#[doc = " @p level value is invalid, and #dnnl_success/#dnnl::status::success on"]
#[doc = " success."]
pub fn dnnl_set_verbose(level: ::libc::c_int) -> dnnl_status_t;
}
extern "C" {
#[doc = " Configures dumping of JIT-generated code."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This setting overrides the DNNL_JIT_DUMP environment variable."]
#[doc = ""]
#[doc = " @param enable Flag value. Set to 0 to disable and set to 1 to enable."]
#[doc = " @returns #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the"]
#[doc = " @p flag value is invalid, and #dnnl_success/#dnnl::status::success on"]
#[doc = " success."]
pub fn dnnl_set_jit_dump(enable: ::libc::c_int) -> dnnl_status_t;
}
extern "C" {
#[doc = " Returns library version information."]
#[doc = " @returns Pointer to a constant structure containing"]
#[doc = " - major: major version number,"]
#[doc = " - minor: minor version number,"]
#[doc = " - patch: patch release number,"]
#[doc = " - hash: git commit hash."]
pub fn dnnl_version() -> *const dnnl_version_t;
}
extern "C" {
#[doc = " Sets library profiling flags. The flags define which profilers are"]
#[doc = " supported."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This setting overrides DNNL_JIT_PROFILE environment variable."]
#[doc = ""]
#[doc = " @sa @ref dev_guide_profilers"]
#[doc = ""]
#[doc = " @param flags Profiling flags that can contain the following bits:"]
#[doc = " - @ref DNNL_JIT_PROFILE_VTUNE -- integration with VTune Amplifier"]
#[doc = " (on by default)"]
#[doc = " - @ref DNNL_JIT_PROFILE_LINUX_JITDUMP -- produce Linux-specific"]
#[doc = " jit-pid.dump output (off by default). The location of the output"]
#[doc = " is controlled via JITDUMPDIR environment variable or via"]
#[doc = " dnnl_set_jit_profiling_jitdumpdir() function."]
#[doc = " - @ref DNNL_JIT_PROFILE_LINUX_PERFMAP -- produce Linux-specific"]
#[doc = " perf-pid.map output (off by default). The output is always placed"]
#[doc = " into /tmp."]
#[doc = ""]
#[doc = " Passing @ref DNNL_JIT_PROFILE_NONE disables profiling completely."]
#[doc = ""]
#[doc = " @returns #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the"]
#[doc = " @p flags value is invalid, and #dnnl_success/#dnnl::status::success on"]
#[doc = " success."]
pub fn dnnl_set_jit_profiling_flags(flags: ::libc::c_uint) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets JIT dump output path. Only applicable to Linux and is only"]
#[doc = " used when profiling flags have DNNL_JIT_PROFILE_LINUX_PERF bit set."]
#[doc = ""]
#[doc = " After the first JIT kernel is generated, the jitdump output will be placed"]
#[doc = " into temporary directory created using the mkdtemp template"]
#[doc = " 'dir/.debug/jit/dnnl.XXXXXX'."]
#[doc = ""]
#[doc = " @sa @ref dev_guide_profilers"]
#[doc = ""]
#[doc = " @note"]
#[doc = " This setting overrides JITDUMPDIR environment variable. If"]
#[doc = " JITDUMPDIR is not set, and this function is never called, the path"]
#[doc = " defaults to HOME. Passing NULL reverts the value to default."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The directory is accessed only when the first JIT kernel is being"]
#[doc = " created. JIT profiling will be disabled in case of any errors"]
#[doc = " accessing or creating this directory."]
#[doc = ""]
#[doc = " @param dir JIT dump output path."]
#[doc = " @returns #dnnl_success/#dnnl::status::success if the"]
#[doc = " output directory was set correctly and an error status otherwise."]
#[doc = " @returns #dnnl_unimplemented/#dnnl::status::unimplemented on Windows."]
pub fn dnnl_set_jit_profiling_jitdumpdir(dir: *const ::libc::c_char) -> dnnl_status_t;
}
extern "C" {
#[doc = " Sets the maximal ISA the library can dispatch to on the CPU. See"]
#[doc = " #dnnl_cpu_isa_t and #dnnl::cpu_isa for the list of the values accepted by"]
#[doc = " the C and C++ API functions respectively."]
#[doc = ""]
#[doc = " This function has effect only before the first JIT kernel is generated and"]
#[doc = " will return an error afterwards."]
#[doc = ""]
#[doc = " This function overrides the DNNL_MAX_CPU_ISA environment variable. The"]
#[doc = " environment variable can be set to the desired maximal ISA name in upper"]
#[doc = " case and with dnnl_cpu_isa prefix removed. For example:"]
#[doc = " `DNNL_MAX_CPU_ISA=AVX2`."]
#[doc = ""]
#[doc = " @note"]
#[doc = " The ISAs are only partially ordered:"]
#[doc = " - SSE41 < AVX < AVX2,"]
#[doc = " - AVX2 < AVX512_MIC < AVX512_MIC_4OPS,"]
#[doc = " - AVX2 < AVX512_CORE < AVX512_CORE_VNNI < AVX512_CORE_BF16."]
#[doc = ""]
#[doc = " @sa @ref dev_guide_cpu_dispatcher_control for more details"]
#[doc = ""]
#[doc = " @param isa Maximal ISA the library should dispatch to. Pass"]
#[doc = " #dnnl_cpu_isa_all/#dnnl::cpu_isa::all to remove ISA restrictions."]
#[doc = " @returns #dnnl_success/#dnnl::status::success on success and a"]
#[doc = " #dnnl_invalid_arguments/#dnnl::status::invalid_arguments if the @p isa"]
#[doc = " parameter is invalid or the ISA cannot be changed at this time."]
#[doc = " @returns #dnnl_unimplemented/#dnnl::status::unimplemented if the feature"]
#[doc = " was disabled at build time (see @ref dev_guide_build_options for more"]
#[doc = " details)."]
pub fn dnnl_set_max_cpu_isa(isa: dnnl_cpu_isa_t) -> dnnl_status_t;
}
extern "C" {
#[doc = " Gets the maximal ISA the library can dispatch to on the CPU. See"]
#[doc = " #dnnl_cpu_isa_t and #dnnl::cpu_isa for the list of the values returned by"]
#[doc = " the C and C++ API functions respectively."]
#[doc = ""]
#[doc = " @sa @ref dev_guide_cpu_dispatcher_control for more details"]
#[doc = ""]
#[doc = " @returns #dnnl_cpu_isa_t value reflecting the maximal ISA the library may"]
#[doc = " dispatch to."]
pub fn dnnl_get_effective_cpu_isa() -> dnnl_cpu_isa_t;
}
extern "C" {
#[doc = " Performs single-precision matrix-matrix multiply."]
#[doc = ""]
#[doc = " The operation is defined as:"]
#[doc = ""]
#[doc = " `C := alpha * op( A ) * op( B ) + beta * C`"]
#[doc = ""]
#[doc = " where"]
#[doc = " - `op( X ) = X` or `op( X ) = X**T`,"]
#[doc = " - `alpha` and `beta` are scalars, and"]
#[doc = " - `A`, `B`, and `C` are matrices:"]
#[doc = " - `op( A )` is an `MxK` matrix,"]
#[doc = " - `op( B )` is an `KxN` matrix,"]
#[doc = " - `C` is an `MxN` matrix."]
#[doc = ""]
#[doc = " The matrices are assumed to be stored in row-major order (the elements in"]
#[doc = " each of the matrix rows are contiguous in memory)."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This API does not support XERBLA. Instead, unlike the standard BLAS"]
#[doc = " functions, this one returns a dnnl_status_t value to allow error"]
#[doc = " handling."]
#[doc = ""]
#[doc = " @param transa Transposition flag for matrix A: 'N' or 'n' means A is not"]
#[doc = " transposed, and 'T' or 't' means that A is transposed."]
#[doc = " @param transb Transposition flag for matrix B: 'N' or 'n' means B is not"]
#[doc = " transposed, and 'T' or 't' means that B is transposed."]
#[doc = " @param M The M dimension."]
#[doc = " @param N The N dimension."]
#[doc = " @param K The K dimension."]
#[doc = " @param alpha The alpha parameter that is used to scale the product of"]
#[doc = " matrices A and B."]
#[doc = " @param A A pointer to the A matrix data."]
#[doc = " @param lda The leading dimension for the matrix A."]
#[doc = " @param B A pointer to the B matrix data."]
#[doc = " @param ldb The leading dimension for the matrix B."]
#[doc = " @param beta The beta parameter that is used to scale the matrix C."]
#[doc = " @param C A pointer to the C matrix data."]
#[doc = " @param ldc The leading dimension for the matrix C."]
#[doc = " @returns #dnnl_success/#dnnl::status::success on success and a status"]
#[doc = " describing the error otherwise."]
pub fn dnnl_sgemm(
transa: ::libc::c_char,
transb: ::libc::c_char,
M: dnnl_dim_t,
N: dnnl_dim_t,
K: dnnl_dim_t,
alpha: f32,
A: *const f32,
lda: dnnl_dim_t,
B: *const f32,
ldb: dnnl_dim_t,
beta: f32,
C: *mut f32,
ldc: dnnl_dim_t,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Performs integer matrix-matrix multiply on 8-bit unsigned matrix A, 8-bit"]
#[doc = " signed matrix B, and 32-bit signed resulting matrix C."]
#[doc = ""]
#[doc = " The operation is defined as:"]
#[doc = ""]
#[doc = " `C := alpha * (op(A) - A_offset) * (op(B) - B_offset) + beta * C + C_offset`"]
#[doc = ""]
#[doc = " where"]
#[doc = " - `op( X ) = X` or `op( X ) = X**T`,"]
#[doc = " - `alpha` and `beta` are scalars, and"]
#[doc = " - `A`, `B`, and `C` are matrices:"]
#[doc = " - `op( A )` is an `MxK` matrix,"]
#[doc = " - `op( B )` is an `KxN` matrix,"]
#[doc = " - `C` is an `MxN` matrix."]
#[doc = " - `A_offset` is an `MxK` matrix with every element equal the `ao` value,"]
#[doc = " - `B_offset` is an `KxN` matrix with every element equal the `bo` value,"]
#[doc = " - `C_offset` is an `MxN` matrix which is defined by the `co` array of size `len`:"]
#[doc = " - if `offsetc = F`: the `len` must be at least `1`,"]
#[doc = " - if `offsetc = C`: the `len` must be at least `max(1, m)`,"]
#[doc = " - if `offsetc = R`: the `len` must be at least `max(1, n)`,"]
#[doc = ""]
#[doc = " The matrices are assumed to be stored in row-major order (the elements in"]
#[doc = " each of the matrix rows are contiguous in memory)."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This API does not support XERBLA. Instead, unlike the standard BLAS"]
#[doc = " functions, this one returns a dnnl_status_t value to allow error"]
#[doc = " handling."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " On some architectures saturation may happen during intermediate"]
#[doc = " computations, which would lead to unexpected results. For more"]
#[doc = " details, refer to @ref dev_guide_int8_computations."]
#[doc = ""]
#[doc = " @param transa Transposition flag for matrix A: 'N' or 'n' means A is not"]
#[doc = " transposed, and 'T' or 't' means that A is transposed."]
#[doc = " @param transb Transposition flag for matrix B: 'N' or 'n' means B is not"]
#[doc = " transposed, and 'T' or 't' means that B is transposed."]
#[doc = " @param offsetc Flag specifying how offsets should be applied to matrix C:"]
#[doc = " - 'F' means that the same offset will be applied to each element of"]
#[doc = " the matrix C,"]
#[doc = " - 'C' means that individual offset will be applied to each element"]
#[doc = " within each column,"]
#[doc = " - 'R' means that individual offset will be applied to each element"]
#[doc = " within each row."]
#[doc = " @param M The M dimension."]
#[doc = " @param N The N dimension."]
#[doc = " @param K The K dimension."]
#[doc = " @param alpha The alpha parameter that is used to scale the product of"]
#[doc = " matrices A and B."]
#[doc = " @param A A pointer to the A matrix data."]
#[doc = " @param lda The leading dimension for the matrix A."]
#[doc = " @param ao The offset value for the matrix A."]
#[doc = " @param B A pointer to the B matrix data."]
#[doc = " @param ldb The leading dimension for the matrix B."]
#[doc = " @param bo The offset value for the matrix B."]
#[doc = " @param beta The beta parameter that is used to scale the matrix C."]
#[doc = " @param C A pointer to the C matrix data."]
#[doc = " @param ldc The leading dimension for the matrix C."]
#[doc = " @param co An array of offset values for the matrix C. The number of"]
#[doc = " elements in the array depends on the value of @p offsetc."]
#[doc = " @returns #dnnl_success/#dnnl::status::success on success and a status"]
#[doc = " describing the error otherwise."]
pub fn dnnl_gemm_u8s8s32(
transa: ::libc::c_char,
transb: ::libc::c_char,
offsetc: ::libc::c_char,
M: dnnl_dim_t,
N: dnnl_dim_t,
K: dnnl_dim_t,
alpha: f32,
A: *const u8,
lda: dnnl_dim_t,
ao: u8,
B: *const i8,
ldb: dnnl_dim_t,
bo: i8,
beta: f32,
C: *mut i32,
ldc: dnnl_dim_t,
co: *const i32,
) -> dnnl_status_t;
}
extern "C" {
#[doc = " Performs integer matrix-matrix multiply on 8-bit signed matrix A, 8-bit"]
#[doc = " signed matrix B, and 32-bit signed resulting matrix C."]
#[doc = ""]
#[doc = " The operation is defined as:"]
#[doc = ""]
#[doc = " `C := alpha * (op(A) - A_offset) * (op(B) - B_offset) + beta * C + C_offset`"]
#[doc = ""]
#[doc = " where"]
#[doc = " - `op( X ) = X` or `op( X ) = X**T`,"]
#[doc = " - `alpha` and `beta` are scalars, and"]
#[doc = " - `A`, `B`, and `C` are matrices:"]
#[doc = " - `op( A )` is an `MxK` matrix,"]
#[doc = " - `op( B )` is an `KxN` matrix,"]
#[doc = " - `C` is an `MxN` matrix."]
#[doc = " - `A_offset` is an `MxK` matrix with every element equal the `ao` value,"]
#[doc = " - `B_offset` is an `KxN` matrix with every element equal the `bo` value,"]
#[doc = " - `C_offset` is an `MxN` matrix which is defined by the `co` array of size `len`:"]
#[doc = " - if `offsetc = F`: the `len` must be at least `1`,"]
#[doc = " - if `offsetc = C`: the `len` must be at least `max(1, m)`,"]
#[doc = " - if `offsetc = R`: the `len` must be at least `max(1, n)`,"]
#[doc = ""]
#[doc = " The matrices are assumed to be stored in row-major order (the elements in"]
#[doc = " each of the matrix rows are contiguous in memory)."]
#[doc = ""]
#[doc = " @note"]
#[doc = " This API does not support XERBLA. Instead, unlike the standard BLAS"]
#[doc = " functions, this one returns a dnnl_status_t value to allow error"]
#[doc = " handling."]
#[doc = ""]
#[doc = " @warning"]
#[doc = " On some architectures saturation may happen during intermediate"]
#[doc = " computations, which would lead to unexpected results. For more"]
#[doc = " details, refer to @ref dev_guide_int8_computations."]
#[doc = ""]
#[doc = " @param transa Transposition flag for matrix A: 'N' or 'n' means A is not"]
#[doc = " transposed, and 'T' or 't' means that A is transposed."]
#[doc = " @param transb Transposition flag for matrix B: 'N' or 'n' means B is not"]
#[doc = " transposed, and 'T' or 't' means that B is transposed."]
#[doc = " @param offsetc Flag specifying how offsets should be applied to matrix C:"]
#[doc = " - 'F' means that the same offset will be applied to each element of"]
#[doc = " the matrix C,"]
#[doc = " - 'C' means that individual offset will be applied to each element"]
#[doc = " within each column,"]
#[doc = " - 'R' means that individual offset will be applied to each element"]
#[doc = " within each row."]
#[doc = " @param M The M dimension."]
#[doc = " @param N The N dimension."]
#[doc = " @param K The K dimension."]
#[doc = " @param alpha The alpha parameter that is used to scale the product of"]
#[doc = " matrices A and B."]
#[doc = " @param A A pointer to the A matrix data."]
#[doc = " @param lda The leading dimension for the matrix A."]
#[doc = " @param ao The offset value for the matrix A."]
#[doc = " @param B A pointer to the B matrix data."]
#[doc = " @param ldb The leading dimension for the matrix B."]
#[doc = " @param bo The offset value for the matrix B."]
#[doc = " @param beta The beta parameter that is used to scale the matrix C."]
#[doc = " @param C A pointer to the C matrix data."]
#[doc = " @param ldc The leading dimension for the matrix C."]
#[doc = " @param co An array of offset values for the matrix C. The number of"]
#[doc = " elements in the array depends on the value of @p offsetc."]
#[doc = " @returns #dnnl_success/#dnnl::status::success on success and a status"]
#[doc = " describing the error otherwise."]
pub fn dnnl_gemm_s8s8s32(
transa: ::libc::c_char,
transb: ::libc::c_char,
offsetc: ::libc::c_char,
M: dnnl_dim_t,
N: dnnl_dim_t,
K: dnnl_dim_t,
alpha: f32,
A: *const i8,
lda: dnnl_dim_t,
ao: i8,
B: *const i8,
ldb: dnnl_dim_t,
bo: i8,
beta: f32,
C: *mut i32,
ldc: dnnl_dim_t,
co: *const i32,
) -> dnnl_status_t;
}