#ifndef SIMD_H
#define SIMD_H
#if (defined(__x86_64__) || defined(_M_AMD64))
#include <emmintrin.h>
#define USE_SSE2
typedef __m128i Vector8;
typedef __m128i Vector32;
#elif defined(__aarch64__) && defined(__ARM_NEON)
#include <arm_neon.h>
#define USE_NEON
typedef uint8x16_t Vector8;
typedef uint32x4_t Vector32;
#else
#define USE_NO_SIMD
typedef uint64 Vector8;
#endif
static inline void vector8_load(Vector8 *v, const uint8 *s);
#ifndef USE_NO_SIMD
static inline void vector32_load(Vector32 *v, const uint32 *s);
#endif
static inline Vector8 vector8_broadcast(const uint8 c);
#ifndef USE_NO_SIMD
static inline Vector32 vector32_broadcast(const uint32 c);
#endif
static inline bool vector8_has(const Vector8 v, const uint8 c);
static inline bool vector8_has_zero(const Vector8 v);
static inline bool vector8_has_le(const Vector8 v, const uint8 c);
static inline bool vector8_is_highbit_set(const Vector8 v);
#ifndef USE_NO_SIMD
static inline bool vector32_is_highbit_set(const Vector32 v);
static inline uint32 vector8_highbit_mask(const Vector8 v);
#endif
static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
#ifndef USE_NO_SIMD
static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
static inline Vector8 vector8_ssub(const Vector8 v1, const Vector8 v2);
#endif
#ifndef USE_NO_SIMD
static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
static inline Vector8 vector8_min(const Vector8 v1, const Vector8 v2);
static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
#endif
static inline void
vector8_load(Vector8 *v, const uint8 *s)
{
#if defined(USE_SSE2)
*v = _mm_loadu_si128((const __m128i *) s);
#elif defined(USE_NEON)
*v = vld1q_u8(s);
#else
memcpy(v, s, sizeof(Vector8));
#endif
}
#ifndef USE_NO_SIMD
static inline void
vector32_load(Vector32 *v, const uint32 *s)
{
#ifdef USE_SSE2
*v = _mm_loadu_si128((const __m128i *) s);
#elif defined(USE_NEON)
*v = vld1q_u32(s);
#endif
}
#endif
static inline Vector8
vector8_broadcast(const uint8 c)
{
#if defined(USE_SSE2)
return _mm_set1_epi8(c);
#elif defined(USE_NEON)
return vdupq_n_u8(c);
#else
return ~UINT64CONST(0) / 0xFF * c;
#endif
}
#ifndef USE_NO_SIMD
static inline Vector32
vector32_broadcast(const uint32 c)
{
#ifdef USE_SSE2
return _mm_set1_epi32(c);
#elif defined(USE_NEON)
return vdupq_n_u32(c);
#endif
}
#endif
static inline bool
vector8_has(const Vector8 v, const uint8 c)
{
bool result;
#ifdef USE_ASSERT_CHECKING
bool assert_result = false;
for (Size i = 0; i < sizeof(Vector8); i++)
{
if (((const uint8 *) &v)[i] == c)
{
assert_result = true;
break;
}
}
#endif
#if defined(USE_NO_SIMD)
result = vector8_has_zero(v ^ vector8_broadcast(c));
#else
result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
#endif
Assert(assert_result == result);
return result;
}
static inline bool
vector8_has_zero(const Vector8 v)
{
#if defined(USE_NO_SIMD)
return vector8_has_le(v, 0);
#else
return vector8_has(v, 0);
#endif
}
static inline bool
vector8_has_le(const Vector8 v, const uint8 c)
{
bool result = false;
#ifdef USE_ASSERT_CHECKING
bool assert_result = false;
for (Size i = 0; i < sizeof(Vector8); i++)
{
if (((const uint8 *) &v)[i] <= c)
{
assert_result = true;
break;
}
}
#endif
#if defined(USE_NO_SIMD)
if ((int64) v >= 0 && c < 0x80)
result = (v - vector8_broadcast(c + 1)) & ~v & vector8_broadcast(0x80);
else
{
for (Size i = 0; i < sizeof(Vector8); i++)
{
if (((const uint8 *) &v)[i] <= c)
{
result = true;
break;
}
}
}
#else
result = vector8_has_zero(vector8_ssub(v, vector8_broadcast(c)));
#endif
Assert(assert_result == result);
return result;
}
static inline bool
vector8_is_highbit_set(const Vector8 v)
{
#ifdef USE_SSE2
return _mm_movemask_epi8(v) != 0;
#elif defined(USE_NEON)
return vmaxvq_u8(v) > 0x7F;
#else
return v & vector8_broadcast(0x80);
#endif
}
#ifndef USE_NO_SIMD
static inline bool
vector32_is_highbit_set(const Vector32 v)
{
#if defined(USE_NEON)
return vector8_is_highbit_set((Vector8) v);
#else
return vector8_is_highbit_set(v);
#endif
}
#endif
#ifndef USE_NO_SIMD
static inline uint32
vector8_highbit_mask(const Vector8 v)
{
#ifdef USE_SSE2
return (uint32) _mm_movemask_epi8(v);
#elif defined(USE_NEON)
static const uint8 mask[16] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3,
1 << 4, 1 << 5, 1 << 6, 1 << 7,
1 << 0, 1 << 1, 1 << 2, 1 << 3,
1 << 4, 1 << 5, 1 << 6, 1 << 7,
};
uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8((int8x16_t) v, 7));
uint8x16_t maskedhi = vextq_u8(masked, masked, 8);
return (uint32) vaddvq_u16((uint16x8_t) vzip1q_u8(masked, maskedhi));
#endif
}
#endif
static inline Vector8
vector8_or(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_or_si128(v1, v2);
#elif defined(USE_NEON)
return vorrq_u8(v1, v2);
#else
return v1 | v2;
#endif
}
#ifndef USE_NO_SIMD
static inline Vector32
vector32_or(const Vector32 v1, const Vector32 v2)
{
#ifdef USE_SSE2
return _mm_or_si128(v1, v2);
#elif defined(USE_NEON)
return vorrq_u32(v1, v2);
#endif
}
#endif
#ifndef USE_NO_SIMD
static inline Vector8
vector8_ssub(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_subs_epu8(v1, v2);
#elif defined(USE_NEON)
return vqsubq_u8(v1, v2);
#endif
}
#endif
#ifndef USE_NO_SIMD
static inline Vector8
vector8_eq(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_cmpeq_epi8(v1, v2);
#elif defined(USE_NEON)
return vceqq_u8(v1, v2);
#endif
}
#endif
#ifndef USE_NO_SIMD
static inline Vector32
vector32_eq(const Vector32 v1, const Vector32 v2)
{
#ifdef USE_SSE2
return _mm_cmpeq_epi32(v1, v2);
#elif defined(USE_NEON)
return vceqq_u32(v1, v2);
#endif
}
#endif
#ifndef USE_NO_SIMD
static inline Vector8
vector8_min(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_min_epu8(v1, v2);
#elif defined(USE_NEON)
return vminq_u8(v1, v2);
#endif
}
#endif
#endif