#include "zbuild.h"
#include "arch_functions.h"
#include <vecintrin.h>
typedef unsigned char uv16qi __attribute__((vector_size(16)));
typedef unsigned int uv4si __attribute__((vector_size(16)));
typedef unsigned long long uv2di __attribute__((vector_size(16)));
static uint32_t crc32_le_vgfm_16(uint32_t crc, const uint8_t *buf, size_t len) {
const uv16qi perm_le2be = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
const uv2di r2r1 = {0x1C6E41596, 0x154442BD4};
const uv2di r4r3 = {0x0CCAA009E, 0x1751997D0};
const uv2di r5 = {0, 0x163CD6124};
const uv2di ru_poly = {0, 0x1F7011641};
const uv2di crc_poly = {0, 0x1DB710641};
uv2di v0 = {0, 0};
v0 = (uv2di)vec_insert(crc, (uv4si)v0, 3);
uv2di v1 = vec_perm(((uv2di *)buf)[0], ((uv2di *)buf)[0], perm_le2be);
uv2di v2 = vec_perm(((uv2di *)buf)[1], ((uv2di *)buf)[1], perm_le2be);
uv2di v3 = vec_perm(((uv2di *)buf)[2], ((uv2di *)buf)[2], perm_le2be);
uv2di v4 = vec_perm(((uv2di *)buf)[3], ((uv2di *)buf)[3], perm_le2be);
v1 ^= v0;
buf += 64;
len -= 64;
while (len >= 64) {
uv16qi part1 = vec_perm(((uv16qi *)buf)[0], ((uv16qi *)buf)[0], perm_le2be);
uv16qi part2 = vec_perm(((uv16qi *)buf)[1], ((uv16qi *)buf)[1], perm_le2be);
uv16qi part3 = vec_perm(((uv16qi *)buf)[2], ((uv16qi *)buf)[2], perm_le2be);
uv16qi part4 = vec_perm(((uv16qi *)buf)[3], ((uv16qi *)buf)[3], perm_le2be);
v1 = (uv2di)vec_gfmsum_accum_128(r2r1, v1, part1);
v2 = (uv2di)vec_gfmsum_accum_128(r2r1, v2, part2);
v3 = (uv2di)vec_gfmsum_accum_128(r2r1, v3, part3);
v4 = (uv2di)vec_gfmsum_accum_128(r2r1, v4, part4);
buf += 64;
len -= 64;
}
v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v3);
v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v4);
while (len >= 16) {
v2 = vec_perm(*(uv2di *)buf, *(uv2di *)buf, perm_le2be);
v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
buf += 16;
len -= 16;
}
uv16qi v9 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
v9 = vec_insert((unsigned char)0x40, v9, 7);
v0 = vec_srb(r4r3, (uv2di)v9);
v0[0] = 1;
v1 = (uv2di)vec_gfmsum_128(v0, v1);
v9 = vec_insert((unsigned char)0x20, v9, 7);
v2 = vec_srb(v1, (uv2di)v9);
v1 = vec_unpackl((uv4si)v1);
v1 = (uv2di)vec_gfmsum_accum_128(r5, v1, (uv16qi)v2);
v2 = vec_unpackl((uv4si)v1);
v2 = (uv2di)vec_gfmsum_128(ru_poly, v2);
v2 = vec_unpackl((uv4si)v2);
v2 = (uv2di)vec_gfmsum_accum_128(crc_poly, v2, (uv16qi)v1);
return ((uv4si)v2)[2];
}
#define VX_MIN_LEN 64
#define VX_ALIGNMENT 16L
#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
uint32_t Z_INTERNAL crc32_s390_vx(uint32_t crc, const unsigned char *buf, size_t len) {
size_t prealign, aligned, remaining;
if (len < VX_MIN_LEN + VX_ALIGN_MASK)
return crc32_braid(crc, buf, len);
if ((uintptr_t)buf & VX_ALIGN_MASK) {
prealign = VX_ALIGNMENT - ((uintptr_t)buf & VX_ALIGN_MASK);
len -= prealign;
crc = crc32_braid(crc, buf, prealign);
buf += prealign;
}
aligned = len & ~VX_ALIGN_MASK;
remaining = len & VX_ALIGN_MASK;
crc = crc32_le_vgfm_16(crc ^ 0xffffffff, buf, aligned) ^ 0xffffffff;
if (remaining)
crc = crc32_braid(crc, buf + aligned, remaining);
return crc;
}