pub(crate) mod config;
pub(crate) mod kernels;
pub(crate) mod portable;
#[cfg(target_arch = "x86_64")]
mod x86_64;
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(target_arch = "powerpc64")]
mod power;
#[cfg(target_arch = "s390x")]
mod s390x;
#[cfg(target_arch = "riscv64")]
mod riscv64;
#[allow(unused_imports)]
pub use config::{Crc64Config, Crc64Force};
#[cfg(any(test, feature = "std"))]
use crate::checksum::common::reference::crc64_bitwise;
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
use crate::checksum::common::tables::generate_crc64_tables_8;
use crate::checksum::common::tables::{CRC64_NVME_POLY, CRC64_XZ_POLY, generate_crc64_tables_16};
#[cfg(feature = "diag")]
use crate::checksum::diag::{Crc64Polynomial, Crc64SelectionDiag};
#[allow(unused_imports)]
pub(super) use crate::traits::{Checksum, ChecksumCombine};
#[inline]
#[must_use]
pub(crate) fn crc64_xz_selected_kernel_name(len: usize) -> &'static str {
let cfg = config::get();
match cfg.effective_force {
Crc64Force::Reference => return kernels::REFERENCE,
Crc64Force::Portable => return kernels::PORTABLE,
#[cfg(target_arch = "aarch64")]
Crc64Force::Pmull => {
return if len < 64 {
"aarch64/pmull-small"
} else {
"aarch64/pmull"
};
}
#[cfg(target_arch = "aarch64")]
Crc64Force::PmullEor3 => return "aarch64/pmull-eor3",
#[cfg(target_arch = "aarch64")]
Crc64Force::Sve2Pmull => {
return if len < 64 {
"aarch64/sve2-pmull-small"
} else {
"aarch64/sve2-pmull"
};
}
_ => {}
}
let table = crate::checksum::kernel_table::active_crc64_table();
table.select_names(len).crc64_xz_name
}
#[inline]
#[must_use]
pub(crate) fn crc64_nvme_selected_kernel_name(len: usize) -> &'static str {
let cfg = config::get();
match cfg.effective_force {
Crc64Force::Reference => return kernels::REFERENCE,
Crc64Force::Portable => return kernels::PORTABLE,
#[cfg(target_arch = "aarch64")]
Crc64Force::Pmull => {
return if len < 64 {
"aarch64/pmull-small"
} else {
"aarch64/pmull"
};
}
#[cfg(target_arch = "aarch64")]
Crc64Force::PmullEor3 => return "aarch64/pmull-eor3",
#[cfg(target_arch = "aarch64")]
Crc64Force::Sve2Pmull => {
return if len < 64 {
"aarch64/sve2-pmull-small"
} else {
"aarch64/sve2-pmull"
};
}
_ => {}
}
let table = crate::checksum::kernel_table::active_crc64_table();
table.select_names(len).crc64_nvme_name
}
#[cfg(feature = "diag")]
#[inline]
#[must_use]
pub(crate) fn diag_crc64_xz(len: usize) -> Crc64SelectionDiag {
let cfg = config::get();
let selected_kernel = crc64_xz_selected_kernel_name(len);
let reason = if cfg.effective_force != Crc64Force::Auto {
crate::checksum::diag::SelectionReason::Forced
} else {
crate::checksum::diag::SelectionReason::Auto
};
let table = crate::checksum::kernel_table::active_crc64_table();
Crc64SelectionDiag {
polynomial: Crc64Polynomial::Xz,
len,
arch: crate::platform::arch(),
reason,
effective_force: cfg.effective_force,
policy_family: "dispatch",
selected_kernel,
selected_streams: 1, portable_to_clmul: table.boundaries[0], pclmul_to_vpclmul: table.boundaries[2], small_kernel_max_bytes: table.boundaries[1], use_4x512: false,
min_bytes_per_lane: usize::MAX,
}
}
#[cfg(feature = "diag")]
#[inline]
#[must_use]
pub(crate) fn diag_crc64_nvme(len: usize) -> Crc64SelectionDiag {
let cfg = config::get();
let selected_kernel = crc64_nvme_selected_kernel_name(len);
let reason = if cfg.effective_force != Crc64Force::Auto {
crate::checksum::diag::SelectionReason::Forced
} else {
crate::checksum::diag::SelectionReason::Auto
};
let table = crate::checksum::kernel_table::active_crc64_table();
Crc64SelectionDiag {
polynomial: Crc64Polynomial::Nvme,
len,
arch: crate::platform::arch(),
reason,
effective_force: cfg.effective_force,
policy_family: "dispatch",
selected_kernel,
selected_streams: 1,
portable_to_clmul: table.boundaries[0], pclmul_to_vpclmul: table.boundaries[2], small_kernel_max_bytes: table.boundaries[1], use_4x512: false,
min_bytes_per_lane: usize::MAX,
}
}
mod kernel_tables {
use super::*;
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
pub static XZ_TABLES_8: [[u64; 256]; 8] = generate_crc64_tables_8(CRC64_XZ_POLY);
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
pub static NVME_TABLES_8: [[u64; 256]; 8] = generate_crc64_tables_8(CRC64_NVME_POLY);
pub static XZ_TABLES_16: [[u64; 256]; 16] = generate_crc64_tables_16(CRC64_XZ_POLY);
pub static NVME_TABLES_16: [[u64; 256]; 16] = generate_crc64_tables_16(CRC64_NVME_POLY);
}
#[cfg(any(test, feature = "std"))]
#[cfg_attr(all(test, not(feature = "std")), allow(dead_code))]
fn crc64_xz_portable(crc: u64, data: &[u8]) -> u64 {
portable::crc64_slice16_xz(crc, data)
}
#[cfg(any(test, feature = "std"))]
#[cfg_attr(all(test, not(feature = "std")), allow(dead_code))]
fn crc64_nvme_portable(crc: u64, data: &[u8]) -> u64 {
portable::crc64_slice16_nvme(crc, data)
}
#[cfg(any(test, feature = "std"))]
fn crc64_xz_reference(crc: u64, data: &[u8]) -> u64 {
crc64_bitwise(CRC64_XZ_POLY, crc, data)
}
#[cfg(any(test, feature = "std"))]
fn crc64_nvme_reference(crc: u64, data: &[u8]) -> u64 {
crc64_bitwise(CRC64_NVME_POLY, crc, data)
}
#[cfg(feature = "alloc")]
const CRC64_BUFFERED_THRESHOLD: usize = 64;
type Crc64DispatchFn = crate::checksum::dispatchers::Crc64Fn;
#[cfg(feature = "std")]
type Crc64DispatchVectoredFn = fn(u64, &[&[u8]]) -> u64;
#[cfg(feature = "std")]
#[inline]
fn crc64_apply_kernel_vectored(mut crc: u64, bufs: &[&[u8]], kernel: Crc64DispatchFn) -> u64 {
for &buf in bufs {
if !buf.is_empty() {
crc = kernel(crc, buf);
}
}
crc
}
#[inline]
fn crc64_xz_dispatch_auto(crc: u64, data: &[u8]) -> u64 {
let table = crate::checksum::kernel_table::active_crc64_table();
let kernel = table.select_fns(data.len()).crc64_xz;
kernel(crc, data)
}
#[inline]
fn crc64_xz_dispatch_auto_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc_vectored_dispatch!(crate::checksum::kernel_table::active_crc64_table(), crc, crc64_xz, bufs)
}
#[inline]
fn crc64_nvme_dispatch_auto(crc: u64, data: &[u8]) -> u64 {
let table = crate::checksum::kernel_table::active_crc64_table();
let kernel = table.select_fns(data.len()).crc64_nvme;
kernel(crc, data)
}
#[inline]
fn crc64_nvme_dispatch_auto_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc_vectored_dispatch!(
crate::checksum::kernel_table::active_crc64_table(),
crc,
crc64_nvme,
bufs
)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_xz_dispatch_reference(crc: u64, data: &[u8]) -> u64 {
crc64_xz_reference(crc, data)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_xz_dispatch_portable(crc: u64, data: &[u8]) -> u64 {
crc64_xz_portable(crc, data)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_xz_dispatch_reference_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_xz_reference)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_xz_dispatch_portable_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_xz_portable)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_nvme_dispatch_reference(crc: u64, data: &[u8]) -> u64 {
crc64_nvme_reference(crc, data)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_nvme_dispatch_portable(crc: u64, data: &[u8]) -> u64 {
crc64_nvme_portable(crc, data)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_nvme_dispatch_reference_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_nvme_reference)
}
#[cfg(feature = "std")]
#[inline]
fn crc64_nvme_dispatch_portable_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_nvme_portable)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_force_pmull_kernel(len: usize) -> Crc64DispatchFn {
if len < 64 {
kernels::aarch64::XZ_PMULL_SMALL
} else {
kernels::aarch64::XZ_PMULL[0]
}
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_force_pmull_kernel(len: usize) -> Crc64DispatchFn {
if len < 64 {
kernels::aarch64::NVME_PMULL_SMALL
} else {
kernels::aarch64::NVME_PMULL[0]
}
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_force_sve2_pmull_kernel(len: usize) -> Crc64DispatchFn {
if len < 64 {
kernels::aarch64::XZ_SVE2_PMULL_SMALL
} else {
kernels::aarch64::XZ_SVE2_PMULL[0]
}
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_force_sve2_pmull_kernel(len: usize) -> Crc64DispatchFn {
if len < 64 {
kernels::aarch64::NVME_SVE2_PMULL_SMALL
} else {
kernels::aarch64::NVME_SVE2_PMULL[0]
}
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_dispatch_pmull(crc: u64, data: &[u8]) -> u64 {
crc64_xz_force_pmull_kernel(data.len())(crc, data)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_dispatch_pmull_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_xz_force_pmull_kernel(64))
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_dispatch_pmull_eor3(crc: u64, data: &[u8]) -> u64 {
(kernels::aarch64::XZ_PMULL_EOR3[0])(crc, data)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_dispatch_pmull_eor3_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, kernels::aarch64::XZ_PMULL_EOR3[0])
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_dispatch_sve2_pmull(crc: u64, data: &[u8]) -> u64 {
crc64_xz_force_sve2_pmull_kernel(data.len())(crc, data)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_xz_dispatch_sve2_pmull_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_xz_force_sve2_pmull_kernel(64))
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_dispatch_pmull(crc: u64, data: &[u8]) -> u64 {
crc64_nvme_force_pmull_kernel(data.len())(crc, data)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_dispatch_pmull_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_nvme_force_pmull_kernel(64))
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_dispatch_pmull_eor3(crc: u64, data: &[u8]) -> u64 {
(kernels::aarch64::NVME_PMULL_EOR3[0])(crc, data)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_dispatch_pmull_eor3_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, kernels::aarch64::NVME_PMULL_EOR3[0])
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_dispatch_sve2_pmull(crc: u64, data: &[u8]) -> u64 {
crc64_nvme_force_sve2_pmull_kernel(data.len())(crc, data)
}
#[cfg(all(feature = "std", target_arch = "aarch64"))]
#[inline]
fn crc64_nvme_dispatch_sve2_pmull_vectored(crc: u64, bufs: &[&[u8]]) -> u64 {
crc64_apply_kernel_vectored(crc, bufs, crc64_nvme_force_sve2_pmull_kernel(64))
}
define_crc_dispatch! {
word_ty: u64,
dispatch_fn_ty: Crc64DispatchFn,
dispatch_vectored_fn_ty: Crc64DispatchVectoredFn,
auto_force: Crc64Force::Auto,
force_expr: config::get().effective_force,
active_table: crate::checksum::kernel_table::active_crc64_table(),
auto_dispatch: crc64_xz_dispatch_auto,
auto_vectored_dispatch: crc64_xz_dispatch_auto_vectored,
dispatch_cache: CRC64_XZ_DISPATCH,
dispatch_vectored_cache: CRC64_XZ_DISPATCH_VECTORED,
resolve_dispatch: resolve_crc64_xz_dispatch,
resolve_dispatch_vectored: resolve_crc64_xz_dispatch_vectored,
dispatch: crc64_xz_dispatch,
dispatch_vectored: crc64_xz_dispatch_vectored,
resolved_dispatch: crc64_xz_resolved_dispatch,
runtime_paths: crc64_xz_runtime_paths,
resolve_match: {
Crc64Force::Reference => crc64_xz_dispatch_reference,
Crc64Force::Portable => crc64_xz_dispatch_portable,
#[cfg(target_arch = "aarch64")]
Crc64Force::Pmull => crc64_xz_dispatch_pmull,
#[cfg(target_arch = "aarch64")]
Crc64Force::PmullEor3 => crc64_xz_dispatch_pmull_eor3,
#[cfg(target_arch = "aarch64")]
Crc64Force::Sve2Pmull => crc64_xz_dispatch_sve2_pmull,
_ => crc64_xz_dispatch_auto,
},
resolve_vectored_match: {
Crc64Force::Reference => crc64_xz_dispatch_reference_vectored,
Crc64Force::Portable => crc64_xz_dispatch_portable_vectored,
#[cfg(target_arch = "aarch64")]
Crc64Force::Pmull => crc64_xz_dispatch_pmull_vectored,
#[cfg(target_arch = "aarch64")]
Crc64Force::PmullEor3 => crc64_xz_dispatch_pmull_eor3_vectored,
#[cfg(target_arch = "aarch64")]
Crc64Force::Sve2Pmull => crc64_xz_dispatch_sve2_pmull_vectored,
_ => crc64_xz_dispatch_auto_vectored,
}
}
define_crc_dispatch! {
word_ty: u64,
dispatch_fn_ty: Crc64DispatchFn,
dispatch_vectored_fn_ty: Crc64DispatchVectoredFn,
auto_force: Crc64Force::Auto,
force_expr: config::get().effective_force,
active_table: crate::checksum::kernel_table::active_crc64_table(),
auto_dispatch: crc64_nvme_dispatch_auto,
auto_vectored_dispatch: crc64_nvme_dispatch_auto_vectored,
dispatch_cache: CRC64_NVME_DISPATCH,
dispatch_vectored_cache: CRC64_NVME_DISPATCH_VECTORED,
resolve_dispatch: resolve_crc64_nvme_dispatch,
resolve_dispatch_vectored: resolve_crc64_nvme_dispatch_vectored,
dispatch: crc64_nvme_dispatch,
dispatch_vectored: crc64_nvme_dispatch_vectored,
resolved_dispatch: crc64_nvme_resolved_dispatch,
runtime_paths: crc64_nvme_runtime_paths,
resolve_match: {
Crc64Force::Reference => crc64_nvme_dispatch_reference,
Crc64Force::Portable => crc64_nvme_dispatch_portable,
#[cfg(target_arch = "aarch64")]
Crc64Force::Pmull => crc64_nvme_dispatch_pmull,
#[cfg(target_arch = "aarch64")]
Crc64Force::PmullEor3 => crc64_nvme_dispatch_pmull_eor3,
#[cfg(target_arch = "aarch64")]
Crc64Force::Sve2Pmull => crc64_nvme_dispatch_sve2_pmull,
_ => crc64_nvme_dispatch_auto,
},
resolve_vectored_match: {
Crc64Force::Reference => crc64_nvme_dispatch_reference_vectored,
Crc64Force::Portable => crc64_nvme_dispatch_portable_vectored,
#[cfg(target_arch = "aarch64")]
Crc64Force::Pmull => crc64_nvme_dispatch_pmull_vectored,
#[cfg(target_arch = "aarch64")]
Crc64Force::PmullEor3 => crc64_nvme_dispatch_pmull_eor3_vectored,
#[cfg(target_arch = "aarch64")]
Crc64Force::Sve2Pmull => crc64_nvme_dispatch_sve2_pmull_vectored,
_ => crc64_nvme_dispatch_auto_vectored,
}
}
#[derive(Clone, Copy)]
pub struct Crc64 {
state: u64,
dispatch: Crc64DispatchFn,
auto_table: Option<&'static crate::checksum::kernel_table::KernelTable>,
}
pub type Crc64Xz = Crc64;
impl Crc64 {
const SHIFT8_MATRIX: crate::checksum::common::combine::Gf2Matrix64 =
crate::checksum::common::combine::generate_shift8_matrix_64(CRC64_XZ_POLY);
#[inline]
#[must_use]
pub const fn resume(crc: u64) -> Self {
Self {
state: crc ^ !0,
dispatch: crc64_xz_dispatch,
auto_table: None,
}
}
#[must_use]
pub fn config() -> Crc64Config {
config::get()
}
#[must_use]
pub fn kernel_name_for_len(len: usize) -> &'static str {
crc64_xz_selected_kernel_name(len)
}
}
impl crate::traits::Checksum for Crc64 {
const OUTPUT_SIZE: usize = 8;
type Output = u64;
#[inline]
fn new() -> Self {
let (dispatch, auto_table) = crc64_xz_runtime_paths();
Self {
state: !0,
dispatch,
auto_table,
}
}
#[inline]
fn with_initial(initial: u64) -> Self {
let (dispatch, auto_table) = crc64_xz_runtime_paths();
Self {
state: initial ^ !0,
dispatch,
auto_table,
}
}
#[inline]
fn update(&mut self, data: &[u8]) {
if let Some(table) = self.auto_table {
if data.len() <= 7 {
self.state = portable::crc64_xz_bytewise(self.state, data);
return;
}
let kernel = table.select_fns(data.len()).crc64_xz;
self.state = kernel(self.state, data);
} else {
self.state = (self.dispatch)(self.state, data);
}
}
#[inline]
fn update_vectored(&mut self, bufs: &[&[u8]]) {
self.state = crc64_xz_dispatch_vectored(self.state, bufs);
}
#[inline]
fn finalize(&self) -> u64 {
self.state ^ !0
}
#[inline]
fn reset(&mut self) {
self.state = !0;
}
#[inline]
fn checksum(data: &[u8]) -> u64 {
crate::checksum::kernel_table::crc64_xz(data)
}
}
impl core::fmt::Debug for Crc64 {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Crc64").finish_non_exhaustive()
}
}
impl Default for Crc64 {
fn default() -> Self {
<Self as crate::traits::Checksum>::new()
}
}
impl crate::traits::ChecksumCombine for Crc64 {
fn combine(crc_a: u64, crc_b: u64, len_b: usize) -> u64 {
crate::checksum::common::combine::combine_crc64(crc_a, crc_b, len_b, Self::SHIFT8_MATRIX)
}
}
#[cfg(feature = "alloc")]
impl Crc64 {
#[must_use]
pub fn buffered() -> BufferedCrc64 {
BufferedCrc64::new()
}
}
#[derive(Clone, Copy)]
pub struct Crc64Nvme {
state: u64,
dispatch: Crc64DispatchFn,
auto_table: Option<&'static crate::checksum::kernel_table::KernelTable>,
}
impl Crc64Nvme {
const SHIFT8_MATRIX: crate::checksum::common::combine::Gf2Matrix64 =
crate::checksum::common::combine::generate_shift8_matrix_64(CRC64_NVME_POLY);
#[inline]
#[must_use]
pub const fn resume(crc: u64) -> Self {
Self {
state: crc ^ !0,
dispatch: crc64_nvme_dispatch,
auto_table: None,
}
}
#[must_use]
pub fn config() -> Crc64Config {
config::get()
}
#[must_use]
pub fn kernel_name_for_len(len: usize) -> &'static str {
crc64_nvme_selected_kernel_name(len)
}
}
impl crate::traits::Checksum for Crc64Nvme {
const OUTPUT_SIZE: usize = 8;
type Output = u64;
#[inline]
fn new() -> Self {
let (dispatch, auto_table) = crc64_nvme_runtime_paths();
Self {
state: !0,
dispatch,
auto_table,
}
}
#[inline]
fn with_initial(initial: u64) -> Self {
let (dispatch, auto_table) = crc64_nvme_runtime_paths();
Self {
state: initial ^ !0,
dispatch,
auto_table,
}
}
#[inline]
fn update(&mut self, data: &[u8]) {
if let Some(table) = self.auto_table {
if data.len() <= 7 {
self.state = portable::crc64_nvme_bytewise(self.state, data);
return;
}
let kernel = table.select_fns(data.len()).crc64_nvme;
self.state = kernel(self.state, data);
} else {
self.state = (self.dispatch)(self.state, data);
}
}
#[inline]
fn update_vectored(&mut self, bufs: &[&[u8]]) {
self.state = crc64_nvme_dispatch_vectored(self.state, bufs);
}
#[inline]
fn finalize(&self) -> u64 {
self.state ^ !0
}
#[inline]
fn reset(&mut self) {
self.state = !0;
}
#[inline]
fn checksum(data: &[u8]) -> u64 {
crate::checksum::kernel_table::crc64_nvme(data)
}
}
impl core::fmt::Debug for Crc64Nvme {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Crc64Nvme").finish_non_exhaustive()
}
}
impl Default for Crc64Nvme {
fn default() -> Self {
<Self as crate::traits::Checksum>::new()
}
}
impl crate::traits::ChecksumCombine for Crc64Nvme {
fn combine(crc_a: u64, crc_b: u64, len_b: usize) -> u64 {
crate::checksum::common::combine::combine_crc64(crc_a, crc_b, len_b, Self::SHIFT8_MATRIX)
}
}
#[cfg(feature = "alloc")]
impl Crc64Nvme {
#[must_use]
pub fn buffered() -> BufferedCrc64Nvme {
BufferedCrc64Nvme::new()
}
}
#[cfg(feature = "alloc")]
const BUFFERED_CRC_BUFFER_SIZE: usize = 512;
#[cfg(feature = "alloc")]
define_buffered_crc! {
pub struct BufferedCrc64<Crc64> {
buffer_size: BUFFERED_CRC_BUFFER_SIZE,
threshold_fn: || CRC64_BUFFERED_THRESHOLD,
}
}
#[cfg(feature = "alloc")]
define_buffered_crc! {
pub struct BufferedCrc64Nvme<Crc64Nvme> {
buffer_size: BUFFERED_CRC_BUFFER_SIZE,
threshold_fn: || CRC64_BUFFERED_THRESHOLD,
}
}
#[cfg(feature = "diag")]
impl crate::checksum::introspect::KernelIntrospect for Crc64 {
fn kernel_name_for_len(len: usize) -> &'static str {
Self::kernel_name_for_len(len)
}
}
#[cfg(feature = "diag")]
impl crate::checksum::introspect::KernelIntrospect for Crc64Nvme {
fn kernel_name_for_len(len: usize) -> &'static str {
Self::kernel_name_for_len(len)
}
}
#[cfg(test)]
mod tests {
extern crate std;
use alloc::vec::Vec;
use super::*;
const TEST_DATA: &[u8] = b"123456789";
#[test]
fn test_crc64_xz_checksum() {
let crc = Crc64::checksum(TEST_DATA);
assert_eq!(crc, 0x995DC9BBDF1939FA);
}
#[test]
fn test_crc64_nvme_checksum() {
let crc = Crc64Nvme::checksum(TEST_DATA);
assert_eq!(crc, 0xAE8B14860A799888);
}
#[test]
fn test_crc64_xz_streaming() {
let oneshot = Crc64::checksum(TEST_DATA);
let mut hasher = Crc64::new();
hasher.update(&TEST_DATA[..5]);
hasher.update(&TEST_DATA[5..]);
assert_eq!(hasher.finalize(), oneshot);
}
#[test]
fn test_crc64_nvme_streaming() {
let oneshot = Crc64Nvme::checksum(TEST_DATA);
let mut hasher = Crc64Nvme::new();
for chunk in TEST_DATA.chunks(3) {
hasher.update(chunk);
}
assert_eq!(hasher.finalize(), oneshot);
}
#[test]
fn test_crc64_xz_combine() {
let data = b"hello world";
let (a, b) = data.split_at(6);
let crc_a = Crc64::checksum(a);
let crc_b = Crc64::checksum(b);
let combined = Crc64::combine(crc_a, crc_b, b.len());
assert_eq!(combined, Crc64::checksum(data));
}
#[test]
fn test_crc64_nvme_combine() {
let data = b"hello world";
let (a, b) = data.split_at(6);
let crc_a = Crc64Nvme::checksum(a);
let crc_b = Crc64Nvme::checksum(b);
let combined = Crc64Nvme::combine(crc_a, crc_b, b.len());
assert_eq!(combined, Crc64Nvme::checksum(data));
}
#[test]
fn test_crc64_empty() {
let crc = Crc64::checksum(&[]);
assert_eq!(crc, 0);
let crc = Crc64Nvme::checksum(&[]);
assert_eq!(crc, 0);
}
#[test]
fn test_streaming_resume_xz() {
let data = b"The quick brown fox jumps over the lazy dog";
let oneshot = Crc64::checksum(data);
for &split in &[1, data.len() / 4, data.len() / 2, data.len().strict_sub(1)] {
let (a, b) = data.split_at(split);
let crc_a = Crc64::checksum(a);
let mut resumed = Crc64::resume(crc_a);
resumed.update(b);
assert_eq!(resumed.finalize(), oneshot, "Crc64Xz resume failed at split={split}");
}
}
#[test]
fn test_streaming_resume_nvme() {
let data = b"The quick brown fox jumps over the lazy dog";
let oneshot = Crc64Nvme::checksum(data);
for &split in &[1, data.len() / 4, data.len() / 2, data.len().strict_sub(1)] {
let (a, b) = data.split_at(split);
let crc_a = Crc64Nvme::checksum(a);
let mut resumed = Crc64Nvme::resume(crc_a);
resumed.update(b);
assert_eq!(resumed.finalize(), oneshot, "Crc64Nvme resume failed at split={split}");
}
}
#[test]
fn test_crc64_combine_all_splits() {
for split in 0..=TEST_DATA.len() {
let (a, b) = TEST_DATA.split_at(split);
let crc_a = Crc64::checksum(a);
let crc_b = Crc64::checksum(b);
let combined = Crc64::combine(crc_a, crc_b, b.len());
assert_eq!(combined, Crc64::checksum(TEST_DATA), "Failed at split {split}");
}
}
#[test]
fn test_kernel_probe_not_empty() {
assert!(!Crc64::kernel_name_for_len(1024).is_empty());
assert!(!Crc64Nvme::kernel_name_for_len(1024).is_empty());
}
#[test]
fn test_crc64_various_lengths() {
let mut data = [0u8; 512];
for (i, byte) in data.iter_mut().enumerate() {
*byte = (i as u8).wrapping_mul(17).wrapping_add(i as u8);
}
let test_lengths = [
0, 1, 7, 8, 9, 15, 16, 17, 31, 32, 48, 63, 64, 65, 100, 127, 128, 200, 255, 256, 300, 400, 512,
];
for &len in &test_lengths {
let slice = &data[..len];
let oneshot = Crc64::checksum(slice);
let mut hasher = Crc64::new();
hasher.update(slice);
let streamed = hasher.finalize();
assert_eq!(oneshot, streamed, "Streaming mismatch at length {len}");
let mut chunked = Crc64::new();
for chunk in slice.chunks(37) {
chunked.update(chunk);
}
assert_eq!(oneshot, chunked.finalize(), "Chunked mismatch at length {len}");
}
}
#[test]
fn test_crc64_streaming_across_threshold() {
let table = crate::checksum::kernel_table::active_crc64_table();
let threshold = table.boundaries[0];
let size = threshold + 128;
let data: Vec<u8> = (0..size).map(|i| (i as u8).wrapping_mul(31)).collect();
let oneshot = Crc64::checksum(&data);
let mut hasher = Crc64::new();
hasher.update(&data[..16]); hasher.update(&data[16..]); assert_eq!(hasher.finalize(), oneshot, "Small-then-large streaming failed");
let mut hasher = Crc64::new();
hasher.update(&data[..threshold + 64]); hasher.update(&data[threshold + 64..]); assert_eq!(hasher.finalize(), oneshot, "Large-then-small streaming failed");
let mut hasher = Crc64::new();
for chunk in data.chunks(17) {
hasher.update(chunk);
}
assert_eq!(hasher.finalize(), oneshot, "Many small chunks failed");
}
#[test]
fn test_kernel_probe_selection() {
let name = Crc64::kernel_name_for_len(1024);
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
let _ = name;
#[cfg(target_arch = "x86_64")]
{
let caps = crate::platform::caps();
let cfg = Crc64::config();
if cfg.effective_force == Crc64Force::Portable {
assert_eq!(name, "portable/slice16");
} else if caps.has(crate::platform::caps::x86::PCLMUL_READY)
|| caps.has(crate::platform::caps::x86::VPCLMUL_READY)
{
assert!(name.starts_with("x86_64/"), "expected x86_64 kernel name, got: {name}");
assert!(
!name.contains("auto"),
"expected specific kernel name, not auto: {name}"
);
} else {
assert_eq!(name, "portable/slice16");
}
}
#[cfg(target_arch = "aarch64")]
{
let caps = crate::platform::caps();
let cfg = Crc64::config();
if cfg.effective_force == Crc64Force::Portable {
assert_eq!(name, "portable/slice16");
} else if caps.has(crate::platform::caps::aarch64::PMULL_READY) {
assert!(
name.starts_with("aarch64/"),
"expected aarch64 kernel name, got: {name}"
);
assert!(
!name.contains("auto"),
"expected specific kernel name, not auto: {name}"
);
} else {
assert_eq!(name, "portable/slice16");
}
}
let len0_name = Crc64::kernel_name_for_len(0);
assert!(
!len0_name.is_empty(),
"kernel_name_for_len should return a non-empty name"
);
assert!(
!len0_name.contains("auto"),
"expected specific kernel name, not auto: {len0_name}"
);
}
#[test]
fn test_crc64_forced_kernel_smoke_from_env() {
let Ok(force) = std::env::var("RSCRYPTO_CRC64_FORCE") else {
return;
};
let force = force.trim();
if force.is_empty() {
return;
}
let cfg = Crc64::config();
let len = 4096usize;
let data: Vec<u8> = (0..len).map(|i| (i as u8).wrapping_mul(13)).collect();
let ours_xz = Crc64::checksum(&data);
let ours_nvme = Crc64Nvme::checksum(&data);
let portable_xz = portable::crc64_slice16_xz(!0, &data) ^ !0;
let portable_nvme = portable::crc64_slice16_nvme(!0, &data) ^ !0;
assert_eq!(ours_xz, portable_xz, "Forced tier produced incorrect CRC64-XZ");
assert_eq!(ours_nvme, portable_nvme, "Forced tier produced incorrect CRC64-NVME");
let kernel = Crc64::kernel_name_for_len(len);
if force.eq_ignore_ascii_case("portable") {
assert_eq!(cfg.requested_force, Crc64Force::Portable);
assert_eq!(kernel, "portable/slice16");
}
#[cfg(target_arch = "x86_64")]
{
if force.eq_ignore_ascii_case("pclmul") {
assert_eq!(cfg.requested_force, Crc64Force::Pclmul);
if cfg.effective_force == Crc64Force::Pclmul {
assert!(
kernel.starts_with("x86_64/pclmul"),
"Expected pclmul kernel, got {kernel}"
);
}
return;
}
if force.eq_ignore_ascii_case("vpclmul") {
assert_eq!(cfg.requested_force, Crc64Force::Vpclmul);
if cfg.effective_force == Crc64Force::Vpclmul {
assert!(
kernel.starts_with("x86_64/vpclmul"),
"Expected vpclmul kernel, got {kernel}"
);
}
}
}
#[cfg(target_arch = "aarch64")]
{
if force.eq_ignore_ascii_case("pmull") {
assert_eq!(cfg.requested_force, Crc64Force::Pmull);
if cfg.effective_force == Crc64Force::Pmull {
assert!(
kernel.starts_with("aarch64/pmull"),
"Expected pmull kernel, got {kernel}"
);
}
}
if force.eq_ignore_ascii_case("sve2-pmull")
|| force.eq_ignore_ascii_case("sve2")
|| force.eq_ignore_ascii_case("pmull-sve2")
{
assert_eq!(cfg.requested_force, Crc64Force::Sve2Pmull);
if cfg.effective_force == Crc64Force::Sve2Pmull {
assert!(
kernel.starts_with("aarch64/sve2-pmull"),
"Expected sve2-pmull kernel, got {kernel}"
);
}
}
}
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_xz_matches_unbuffered() {
let data = b"The quick brown fox jumps over the lazy dog";
let expected = Crc64::checksum(data);
let mut buffered = BufferedCrc64::new();
buffered.update(data);
assert_eq!(buffered.finalize(), expected);
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_xz_single_byte_updates() {
let data = b"123456789";
let expected = Crc64::checksum(data);
let mut buffered = BufferedCrc64::new();
for byte in data.iter() {
buffered.update(&[*byte]);
}
assert_eq!(buffered.finalize(), expected);
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_xz_mixed_sizes() {
let mut data = [0u8; 1024];
for (i, byte) in data.iter_mut().enumerate() {
*byte = (i as u8).wrapping_mul(13);
}
let expected = Crc64::checksum(&data);
let mut buffered = BufferedCrc64::new();
let mut offset = 0;
let chunk_sizes = [1, 3, 7, 15, 31, 64, 128, 256, 300, 219];
for &size in &chunk_sizes {
let end = (offset + size).min(data.len());
buffered.update(&data[offset..end]);
offset = end;
if offset >= data.len() {
break;
}
}
assert_eq!(buffered.finalize(), expected);
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_nvme_matches_unbuffered() {
let data = b"The quick brown fox jumps over the lazy dog";
let expected = Crc64Nvme::checksum(data);
let mut buffered = BufferedCrc64Nvme::new();
buffered.update(data);
assert_eq!(buffered.finalize(), expected);
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_nvme_single_byte_updates() {
let data = b"123456789";
let expected = Crc64Nvme::checksum(data);
let mut buffered = BufferedCrc64Nvme::new();
for byte in data.iter() {
buffered.update(&[*byte]);
}
assert_eq!(buffered.finalize(), expected);
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_reset() {
let data1 = b"hello";
let data2 = b"world";
let mut buffered = BufferedCrc64::new();
buffered.update(data1);
buffered.reset();
buffered.update(data2);
assert_eq!(buffered.finalize(), Crc64::checksum(data2));
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_empty() {
let buffered = BufferedCrc64::new();
assert_eq!(buffered.finalize(), Crc64::checksum(&[]));
}
#[cfg(feature = "alloc")]
#[test]
fn test_buffered_crc64_finalize_is_idempotent() {
let data = b"test data";
let mut buffered = BufferedCrc64::new();
buffered.update(data);
let crc1 = buffered.finalize();
let crc2 = buffered.finalize();
assert_eq!(crc1, crc2, "finalize should be idempotent");
}
mod cross_check {
use alloc::{vec, vec::Vec};
use super::*;
use crate::checksum::common::{
reference::crc64_bitwise,
tables::{CRC64_NVME_POLY, CRC64_XZ_POLY},
tests::{STREAMING_CHUNK_SIZES, TEST_LENGTHS},
};
fn generate_test_data(len: usize) -> Vec<u8> {
(0..len)
.map(|i| {
let i = i as u64;
((i.wrapping_mul(2654435761) ^ i.wrapping_mul(0x9E3779B97F4A7C15)) & 0xFF) as u8
})
.collect()
}
fn reference_xz(data: &[u8]) -> u64 {
crc64_bitwise(CRC64_XZ_POLY, !0u64, data) ^ !0u64
}
fn reference_nvme(data: &[u8]) -> u64 {
crc64_bitwise(CRC64_NVME_POLY, !0u64, data) ^ !0u64
}
#[test]
fn cross_check_xz_all_lengths() {
for &len in TEST_LENGTHS {
let data = generate_test_data(len);
let reference = reference_xz(&data);
let actual = Crc64::checksum(&data);
assert_eq!(
actual, reference,
"CRC64-XZ mismatch at len={len}: actual={actual:#018X}, reference={reference:#018X}"
);
}
}
#[test]
fn cross_check_nvme_all_lengths() {
for &len in TEST_LENGTHS {
let data = generate_test_data(len);
let reference = reference_nvme(&data);
let actual = Crc64Nvme::checksum(&data);
assert_eq!(
actual, reference,
"CRC64-NVME mismatch at len={len}: actual={actual:#018X}, reference={reference:#018X}"
);
}
}
#[test]
fn cross_check_xz_all_single_bytes() {
for byte in 0u8..=255 {
let data = [byte];
let reference = reference_xz(&data);
let actual = Crc64::checksum(&data);
assert_eq!(actual, reference, "CRC64-XZ single-byte mismatch for byte={byte:#04X}");
}
}
#[test]
fn cross_check_nvme_all_single_bytes() {
for byte in 0u8..=255 {
let data = [byte];
let reference = reference_nvme(&data);
let actual = Crc64Nvme::checksum(&data);
assert_eq!(
actual, reference,
"CRC64-NVME single-byte mismatch for byte={byte:#04X}"
);
}
}
#[test]
fn cross_check_xz_streaming_all_chunk_sizes() {
let data = generate_test_data(4096);
let reference = reference_xz(&data);
for &chunk_size in STREAMING_CHUNK_SIZES {
let mut hasher = Crc64::new();
for chunk in data.chunks(chunk_size) {
hasher.update(chunk);
}
let actual = hasher.finalize();
assert_eq!(
actual, reference,
"CRC64-XZ streaming mismatch with chunk_size={chunk_size}"
);
}
}
#[test]
fn cross_check_nvme_streaming_all_chunk_sizes() {
let data = generate_test_data(4096);
let reference = reference_nvme(&data);
for &chunk_size in STREAMING_CHUNK_SIZES {
let mut hasher = Crc64Nvme::new();
for chunk in data.chunks(chunk_size) {
hasher.update(chunk);
}
let actual = hasher.finalize();
assert_eq!(
actual, reference,
"CRC64-NVME streaming mismatch with chunk_size={chunk_size}"
);
}
}
#[test]
fn cross_check_xz_combine_all_splits() {
let data = generate_test_data(1024);
let reference = reference_xz(&data);
let small_data = &data[..64];
let small_ref = reference_xz(small_data);
for split in 0..=small_data.len() {
let (a, b) = small_data.split_at(split);
let crc_a = Crc64::checksum(a);
let crc_b = Crc64::checksum(b);
let combined = Crc64::combine(crc_a, crc_b, b.len());
assert_eq!(combined, small_ref, "CRC64-XZ combine mismatch at split={split}");
}
let strategic_splits = [0, 1, 15, 16, 17, 63, 64, 65, 127, 128, 129, 255, 256, 512, 1024];
for &split in &strategic_splits {
if split > data.len() {
continue;
}
let (a, b) = data.split_at(split);
let crc_a = Crc64::checksum(a);
let crc_b = Crc64::checksum(b);
let combined = Crc64::combine(crc_a, crc_b, b.len());
assert_eq!(
combined, reference,
"CRC64-XZ combine mismatch at strategic split={split}"
);
}
}
#[test]
fn cross_check_nvme_combine_all_splits() {
let data = generate_test_data(1024);
let reference = reference_nvme(&data);
let small_data = &data[..64];
let small_ref = reference_nvme(small_data);
for split in 0..=small_data.len() {
let (a, b) = small_data.split_at(split);
let crc_a = Crc64Nvme::checksum(a);
let crc_b = Crc64Nvme::checksum(b);
let combined = Crc64Nvme::combine(crc_a, crc_b, b.len());
assert_eq!(combined, small_ref, "CRC64-NVME combine mismatch at split={split}");
}
let strategic_splits = [0, 1, 15, 16, 17, 63, 64, 65, 127, 128, 129, 255, 256, 512, 1024];
for &split in &strategic_splits {
if split > data.len() {
continue;
}
let (a, b) = data.split_at(split);
let crc_a = Crc64Nvme::checksum(a);
let crc_b = Crc64Nvme::checksum(b);
let combined = Crc64Nvme::combine(crc_a, crc_b, b.len());
assert_eq!(
combined, reference,
"CRC64-NVME combine mismatch at strategic split={split}"
);
}
}
#[test]
fn cross_check_xz_unaligned_offsets() {
let mut buffer = vec![0u8; 4096 + 64];
for (i, byte) in buffer.iter_mut().enumerate() {
*byte = (((i as u64).wrapping_mul(17)) & 0xFF) as u8;
}
for offset in 0..16 {
let data = &buffer[offset..offset + 1024];
let reference = reference_xz(data);
let actual = Crc64::checksum(data);
assert_eq!(actual, reference, "CRC64-XZ unaligned mismatch at offset={offset}");
}
}
#[test]
fn cross_check_nvme_unaligned_offsets() {
let mut buffer = vec![0u8; 4096 + 64];
for (i, byte) in buffer.iter_mut().enumerate() {
*byte = (((i as u64).wrapping_mul(17)) & 0xFF) as u8;
}
for offset in 0..16 {
let data = &buffer[offset..offset + 1024];
let reference = reference_nvme(data);
let actual = Crc64Nvme::checksum(data);
assert_eq!(actual, reference, "CRC64-NVME unaligned mismatch at offset={offset}");
}
}
#[test]
fn cross_check_xz_byte_at_a_time_streaming() {
let data = generate_test_data(256);
let reference = reference_xz(&data);
let mut hasher = Crc64::new();
for &byte in &data {
hasher.update(&[byte]);
}
let actual = hasher.finalize();
assert_eq!(actual, reference, "CRC64-XZ byte-at-a-time streaming mismatch");
}
#[test]
fn cross_check_nvme_byte_at_a_time_streaming() {
let data = generate_test_data(256);
let reference = reference_nvme(&data);
let mut hasher = Crc64Nvme::new();
for &byte in &data {
hasher.update(&[byte]);
}
let actual = hasher.finalize();
assert_eq!(actual, reference, "CRC64-NVME byte-at-a-time streaming mismatch");
}
#[test]
fn cross_check_reference_kernel_accessible() {
let data = generate_test_data(1024);
let xz_ref = crc64_xz_reference(!0u64, &data) ^ !0u64;
let nvme_ref = crc64_nvme_reference(!0u64, &data) ^ !0u64;
let xz_direct = reference_xz(&data);
let nvme_direct = reference_nvme(&data);
assert_eq!(xz_ref, xz_direct, "XZ reference kernel mismatch");
assert_eq!(nvme_ref, nvme_direct, "NVME reference kernel mismatch");
}
#[test]
fn cross_check_portable_matches_reference() {
for &len in TEST_LENGTHS {
let data = generate_test_data(len);
let portable_xz = portable::crc64_slice16_xz(!0u64, &data) ^ !0u64;
let reference_xz_val = reference_xz(&data);
assert_eq!(portable_xz, reference_xz_val, "XZ portable mismatch at len={len}");
let portable_nvme = portable::crc64_slice16_nvme(!0u64, &data) ^ !0u64;
let reference_nvme_val = reference_nvme(&data);
assert_eq!(portable_nvme, reference_nvme_val, "NVME portable mismatch at len={len}");
}
}
}
}
#[cfg(test)]
crate::define_crc_property_tests!(crc64_xz_props, Crc64);
#[cfg(test)]
crate::define_crc_property_tests!(crc64_nvme_props, Crc64Nvme);