pub struct NeonToken { /* private fields */ }Expand description
Proof that NEON is available.
NEON is available on virtually all AArch64 processors, but requires
runtime detection via summon() unless compiled with -Ctarget-feature=+neon.
Implementations§
Source§impl NeonToken
impl NeonToken
Sourcepub fn dangerously_disable_token_process_wide(
disabled: bool,
) -> Result<(), CompileTimeGuaranteedError>
pub fn dangerously_disable_token_process_wide( disabled: bool, ) -> Result<(), CompileTimeGuaranteedError>
Disable this token process-wide for testing and benchmarking.
When disabled, summon() will return None even if the CPU supports
the required features.
Returns Err when all required features are compile-time enabled
(e.g., via -Ctarget-cpu=native), since the compiler has already
elided the runtime checks.
Cascading: Also affects descendants:
NeonAesTokenNeonSha3TokenNeonCrcToken
Sourcepub fn manually_disabled() -> Result<bool, CompileTimeGuaranteedError>
pub fn manually_disabled() -> Result<bool, CompileTimeGuaranteedError>
Check if this token has been manually disabled process-wide.
Returns Err when all required features are compile-time enabled.
Trait Implementations§
Source§impl IntoConcreteToken for NeonToken
impl IntoConcreteToken for NeonToken
Source§fn as_x64v1(self) -> Option<X64V1Token>
fn as_x64v1(self) -> Option<X64V1Token>
Try to cast to X64V1Token.
Source§fn as_x64v2(self) -> Option<X64V2Token>
fn as_x64v2(self) -> Option<X64V2Token>
Try to cast to X64V2Token.
Source§fn as_x64v3(self) -> Option<X64V3Token>
fn as_x64v3(self) -> Option<X64V3Token>
Try to cast to X64V3Token.
Source§fn as_x64v4(self) -> Option<X64V4Token>
fn as_x64v4(self) -> Option<X64V4Token>
Try to cast to X64V4Token.
Source§fn as_avx512_modern(self) -> Option<Avx512ModernToken>
fn as_avx512_modern(self) -> Option<Avx512ModernToken>
Try to cast to Avx512ModernToken.
Source§fn as_avx512_fp16(self) -> Option<Avx512Fp16Token>
fn as_avx512_fp16(self) -> Option<Avx512Fp16Token>
Try to cast to Avx512Fp16Token.
Source§fn as_neon_aes(self) -> Option<NeonAesToken>
fn as_neon_aes(self) -> Option<NeonAesToken>
Try to cast to NeonAesToken.
Source§fn as_neon_sha3(self) -> Option<NeonSha3Token>
fn as_neon_sha3(self) -> Option<NeonSha3Token>
Try to cast to NeonSha3Token.
Source§fn as_neon_crc(self) -> Option<NeonCrcToken>
fn as_neon_crc(self) -> Option<NeonCrcToken>
Try to cast to NeonCrcToken.
Source§fn as_wasm128(self) -> Option<Wasm128Token>
fn as_wasm128(self) -> Option<Wasm128Token>
Try to cast to Wasm128Token.
Source§fn as_scalar(self) -> Option<ScalarToken>
fn as_scalar(self) -> Option<ScalarToken>
Try to cast to ScalarToken.
Source§impl SimdToken for NeonToken
impl SimdToken for NeonToken
Source§const TARGET_FEATURES: &'static str = "neon"
const TARGET_FEATURES: &'static str = "neon"
Comma-delimited target features (e.g.,
"sse,sse2,avx2,fma,bmi1,bmi2,f16c,lzcnt"). Read moreSource§const ENABLE_TARGET_FEATURES: &'static str = "-Ctarget-feature=+neon"
const ENABLE_TARGET_FEATURES: &'static str = "-Ctarget-feature=+neon"
RUSTFLAGS to enable these features at compile time. Read more
Source§const DISABLE_TARGET_FEATURES: &'static str = "-Ctarget-feature=-neon"
const DISABLE_TARGET_FEATURES: &'static str = "-Ctarget-feature=-neon"
RUSTFLAGS to disable these features at compile time. Read more
Source§fn compiled_with() -> Option<bool>
fn compiled_with() -> Option<bool>
Check if this binary was compiled with the required target features enabled. Read more
Source§fn summon() -> Option<NeonToken>
fn summon() -> Option<NeonToken>
Attempt to create a token with runtime feature detection. Read more
Source§unsafe fn forge_token_dangerously() -> NeonToken
unsafe fn forge_token_dangerously() -> NeonToken
👎Deprecated since 0.5.0: Pass tokens through from summon() instead of forging
Create a token without any checks. Read more
Source§fn guaranteed() -> Option<bool>
fn guaranteed() -> Option<bool>
👎Deprecated since 0.6.0: Use compiled_with() instead
Deprecated alias for
compiled_with().Source§impl WidthDispatch for NeonToken
impl WidthDispatch for NeonToken
type F32x4 = f32x4
type F64x2 = f64x2
type I8x16 = i8x16
type U8x16 = u8x16
type I16x8 = i16x8
type U16x8 = u16x8
type I32x4 = i32x4
type U32x4 = u32x4
type I64x2 = i64x2
type U64x2 = u64x2
type F32x8 = f32x8
type F64x4 = f64x4
type I8x32 = i8x32
type U8x32 = u8x32
type I16x16 = i16x16
type U16x16 = u16x16
type I32x8 = i32x8
type U32x8 = u32x8
type I64x4 = i64x4
type U64x4 = u64x4
type F32x16 = [f32x4; 4]
type F64x8 = [f64x2; 4]
type I8x64 = [i8x16; 4]
type U8x64 = [u8x16; 4]
type I16x32 = [i16x8; 4]
type U16x32 = [u16x8; 4]
type I32x16 = [i32x4; 4]
type U32x16 = [u32x4; 4]
type I64x8 = [i64x2; 4]
type U64x8 = [u64x2; 4]
fn f32x4_splat(self, v: f32) -> <NeonToken as WidthDispatch>::F32x4
fn f32x4_zero(self) -> <NeonToken as WidthDispatch>::F32x4
fn f32x4_load(self, data: &[f32; 4]) -> <NeonToken as WidthDispatch>::F32x4
fn f64x2_splat(self, v: f64) -> <NeonToken as WidthDispatch>::F64x2
fn f64x2_zero(self) -> <NeonToken as WidthDispatch>::F64x2
fn f64x2_load(self, data: &[f64; 2]) -> <NeonToken as WidthDispatch>::F64x2
fn i8x16_splat(self, v: i8) -> <NeonToken as WidthDispatch>::I8x16
fn i8x16_zero(self) -> <NeonToken as WidthDispatch>::I8x16
fn i8x16_load(self, data: &[i8; 16]) -> <NeonToken as WidthDispatch>::I8x16
fn u8x16_splat(self, v: u8) -> <NeonToken as WidthDispatch>::U8x16
fn u8x16_zero(self) -> <NeonToken as WidthDispatch>::U8x16
fn u8x16_load(self, data: &[u8; 16]) -> <NeonToken as WidthDispatch>::U8x16
fn i16x8_splat(self, v: i16) -> <NeonToken as WidthDispatch>::I16x8
fn i16x8_zero(self) -> <NeonToken as WidthDispatch>::I16x8
fn i16x8_load(self, data: &[i16; 8]) -> <NeonToken as WidthDispatch>::I16x8
fn u16x8_splat(self, v: u16) -> <NeonToken as WidthDispatch>::U16x8
fn u16x8_zero(self) -> <NeonToken as WidthDispatch>::U16x8
fn u16x8_load(self, data: &[u16; 8]) -> <NeonToken as WidthDispatch>::U16x8
fn i32x4_splat(self, v: i32) -> <NeonToken as WidthDispatch>::I32x4
fn i32x4_zero(self) -> <NeonToken as WidthDispatch>::I32x4
fn i32x4_load(self, data: &[i32; 4]) -> <NeonToken as WidthDispatch>::I32x4
fn u32x4_splat(self, v: u32) -> <NeonToken as WidthDispatch>::U32x4
fn u32x4_zero(self) -> <NeonToken as WidthDispatch>::U32x4
fn u32x4_load(self, data: &[u32; 4]) -> <NeonToken as WidthDispatch>::U32x4
fn i64x2_splat(self, v: i64) -> <NeonToken as WidthDispatch>::I64x2
fn i64x2_zero(self) -> <NeonToken as WidthDispatch>::I64x2
fn i64x2_load(self, data: &[i64; 2]) -> <NeonToken as WidthDispatch>::I64x2
fn u64x2_splat(self, v: u64) -> <NeonToken as WidthDispatch>::U64x2
fn u64x2_zero(self) -> <NeonToken as WidthDispatch>::U64x2
fn u64x2_load(self, data: &[u64; 2]) -> <NeonToken as WidthDispatch>::U64x2
fn f32x8_splat(self, v: f32) -> <NeonToken as WidthDispatch>::F32x8
fn f32x8_zero(self) -> <NeonToken as WidthDispatch>::F32x8
fn f32x8_load(self, data: &[f32; 8]) -> <NeonToken as WidthDispatch>::F32x8
fn f64x4_splat(self, v: f64) -> <NeonToken as WidthDispatch>::F64x4
fn f64x4_zero(self) -> <NeonToken as WidthDispatch>::F64x4
fn f64x4_load(self, data: &[f64; 4]) -> <NeonToken as WidthDispatch>::F64x4
fn i8x32_splat(self, v: i8) -> <NeonToken as WidthDispatch>::I8x32
fn i8x32_zero(self) -> <NeonToken as WidthDispatch>::I8x32
fn i8x32_load(self, data: &[i8; 32]) -> <NeonToken as WidthDispatch>::I8x32
fn u8x32_splat(self, v: u8) -> <NeonToken as WidthDispatch>::U8x32
fn u8x32_zero(self) -> <NeonToken as WidthDispatch>::U8x32
fn u8x32_load(self, data: &[u8; 32]) -> <NeonToken as WidthDispatch>::U8x32
fn i16x16_splat(self, v: i16) -> <NeonToken as WidthDispatch>::I16x16
fn i16x16_zero(self) -> <NeonToken as WidthDispatch>::I16x16
fn i16x16_load(self, data: &[i16; 16]) -> <NeonToken as WidthDispatch>::I16x16
fn u16x16_splat(self, v: u16) -> <NeonToken as WidthDispatch>::U16x16
fn u16x16_zero(self) -> <NeonToken as WidthDispatch>::U16x16
fn u16x16_load(self, data: &[u16; 16]) -> <NeonToken as WidthDispatch>::U16x16
fn i32x8_splat(self, v: i32) -> <NeonToken as WidthDispatch>::I32x8
fn i32x8_zero(self) -> <NeonToken as WidthDispatch>::I32x8
fn i32x8_load(self, data: &[i32; 8]) -> <NeonToken as WidthDispatch>::I32x8
fn u32x8_splat(self, v: u32) -> <NeonToken as WidthDispatch>::U32x8
fn u32x8_zero(self) -> <NeonToken as WidthDispatch>::U32x8
fn u32x8_load(self, data: &[u32; 8]) -> <NeonToken as WidthDispatch>::U32x8
fn i64x4_splat(self, v: i64) -> <NeonToken as WidthDispatch>::I64x4
fn i64x4_zero(self) -> <NeonToken as WidthDispatch>::I64x4
fn i64x4_load(self, data: &[i64; 4]) -> <NeonToken as WidthDispatch>::I64x4
fn u64x4_splat(self, v: u64) -> <NeonToken as WidthDispatch>::U64x4
fn u64x4_zero(self) -> <NeonToken as WidthDispatch>::U64x4
fn u64x4_load(self, data: &[u64; 4]) -> <NeonToken as WidthDispatch>::U64x4
fn f32x16_splat(self, v: f32) -> <NeonToken as WidthDispatch>::F32x16
fn f32x16_zero(self) -> <NeonToken as WidthDispatch>::F32x16
fn f32x16_load(self, data: &[f32; 16]) -> <NeonToken as WidthDispatch>::F32x16
fn f64x8_splat(self, v: f64) -> <NeonToken as WidthDispatch>::F64x8
fn f64x8_zero(self) -> <NeonToken as WidthDispatch>::F64x8
fn f64x8_load(self, data: &[f64; 8]) -> <NeonToken as WidthDispatch>::F64x8
fn i8x64_splat(self, v: i8) -> <NeonToken as WidthDispatch>::I8x64
fn i8x64_zero(self) -> <NeonToken as WidthDispatch>::I8x64
fn i8x64_load(self, data: &[i8; 64]) -> <NeonToken as WidthDispatch>::I8x64
fn u8x64_splat(self, v: u8) -> <NeonToken as WidthDispatch>::U8x64
fn u8x64_zero(self) -> <NeonToken as WidthDispatch>::U8x64
fn u8x64_load(self, data: &[u8; 64]) -> <NeonToken as WidthDispatch>::U8x64
fn i16x32_splat(self, v: i16) -> <NeonToken as WidthDispatch>::I16x32
fn i16x32_zero(self) -> <NeonToken as WidthDispatch>::I16x32
fn i16x32_load(self, data: &[i16; 32]) -> <NeonToken as WidthDispatch>::I16x32
fn u16x32_splat(self, v: u16) -> <NeonToken as WidthDispatch>::U16x32
fn u16x32_zero(self) -> <NeonToken as WidthDispatch>::U16x32
fn u16x32_load(self, data: &[u16; 32]) -> <NeonToken as WidthDispatch>::U16x32
fn i32x16_splat(self, v: i32) -> <NeonToken as WidthDispatch>::I32x16
fn i32x16_zero(self) -> <NeonToken as WidthDispatch>::I32x16
fn i32x16_load(self, data: &[i32; 16]) -> <NeonToken as WidthDispatch>::I32x16
fn u32x16_splat(self, v: u32) -> <NeonToken as WidthDispatch>::U32x16
fn u32x16_zero(self) -> <NeonToken as WidthDispatch>::U32x16
fn u32x16_load(self, data: &[u32; 16]) -> <NeonToken as WidthDispatch>::U32x16
fn i64x8_splat(self, v: i64) -> <NeonToken as WidthDispatch>::I64x8
fn i64x8_zero(self) -> <NeonToken as WidthDispatch>::I64x8
fn i64x8_load(self, data: &[i64; 8]) -> <NeonToken as WidthDispatch>::I64x8
fn u64x8_splat(self, v: u64) -> <NeonToken as WidthDispatch>::U64x8
fn u64x8_zero(self) -> <NeonToken as WidthDispatch>::U64x8
fn u64x8_load(self, data: &[u64; 8]) -> <NeonToken as WidthDispatch>::U64x8
impl Copy for NeonToken
impl Has128BitSimd for NeonToken
impl HasNeon for NeonToken
impl Sealed for NeonToken
Auto Trait Implementations§
impl Freeze for NeonToken
impl RefUnwindSafe for NeonToken
impl Send for NeonToken
impl Sync for NeonToken
impl Unpin for NeonToken
impl UnwindSafe for NeonToken
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more