rscrypto 0.1.1

Pure Rust cryptography, hardware-accelerated: BLAKE3, SHA-2/3, AES-GCM, ChaCha20-Poly1305, Ed25519, X25519, HMAC, HKDF, Argon2, CRC. no_std, WASM, ten CPU architectures.
Documentation
//! Internal macros for CRC variant generation.
//!
//! These macros eliminate boilerplate when defining buffered CRC wrappers
//! and vectored (multi-buffer) dispatch.

/// Run a CRC kernel across multiple buffers, re-selecting the optimal kernel
/// when the buffer size class changes.
///
/// This is the shared core of every `*_vectored` / `*_io_slices` API and the
/// `*_dispatch_auto_vectored` helpers in CRC modules.
///
/// `$field` is the [`KernelFnSet`] field name (e.g. `crc64_xz`).
/// `$bufs` must yield items that deref to `&[u8]` (supports `&[&[u8]]`,
/// `&[IoSlice]`, etc.).
macro_rules! crc_vectored_dispatch {
  ($table:expr, $init:expr, $field:ident, $bufs:expr) => {{
    let table = $table;
    let mut crc = $init;
    let mut last_set: *const $crate::checksum::kernel_table::KernelFnSet = core::ptr::null();
    let mut kernel = table.fns[0].$field;

    for buf in $bufs {
      let buf: &[u8] = &*buf;
      if buf.is_empty() {
        continue;
      }
      let set = table.select_fns(buf.len());
      let set_ptr: *const $crate::checksum::kernel_table::KernelFnSet = core::ptr::from_ref(set);
      if set_ptr != last_set {
        last_set = set_ptr;
        kernel = set.$field;
      }
      crc = kernel(crc, buf);
    }

    crc
  }};
}

/// Generate the cached dispatch shell for a CRC polynomial.
///
/// This covers the repeated "resolve once under std, otherwise use auto"
/// pattern shared by CRC-16/32/64 variants while leaving the actual kernel
/// wrappers and force-specific architecture helpers local to each module.
#[cfg_attr(
  not(any(feature = "crc16", feature = "crc32", feature = "crc64")),
  allow(unused_macros)
)]
macro_rules! define_crc_dispatch {
  (
    word_ty: $word_ty:ty,
    dispatch_fn_ty: $dispatch_fn_ty:ty,
    dispatch_vectored_fn_ty: $dispatch_vectored_fn_ty:ty,
    auto_force: $auto_force:path,
    force_expr: $force_expr:expr,
    active_table: $active_table:expr,
    auto_dispatch: $auto_dispatch:path,
    auto_vectored_dispatch: $auto_vectored_dispatch:path,
    dispatch_cache: $dispatch_cache:ident,
    dispatch_vectored_cache: $dispatch_vectored_cache:ident,
    resolve_dispatch: $resolve_dispatch:ident,
    resolve_dispatch_vectored: $resolve_dispatch_vectored:ident,
    dispatch: $dispatch:ident,
    dispatch_vectored: $dispatch_vectored:ident,
    resolved_dispatch: $resolved_dispatch:ident,
    runtime_paths: $runtime_paths:ident,
    resolve_match: { $($resolve_match:tt)* },
    resolve_vectored_match: { $($resolve_vectored_match:tt)* }
  ) => {
    #[cfg(feature = "std")]
    static $dispatch_cache: $crate::backend::cache::OnceCache<$dispatch_fn_ty> =
      $crate::backend::cache::OnceCache::new();
    #[cfg(feature = "std")]
    static $dispatch_vectored_cache: $crate::backend::cache::OnceCache<$dispatch_vectored_fn_ty> =
      $crate::backend::cache::OnceCache::new();

    #[cfg(feature = "std")]
    #[inline]
    fn $resolve_dispatch() -> $dispatch_fn_ty {
      match $force_expr {
        $($resolve_match)*
      }
    }

    #[cfg(feature = "std")]
    #[inline]
    fn $resolve_dispatch_vectored() -> $dispatch_vectored_fn_ty {
      match $force_expr {
        $($resolve_vectored_match)*
      }
    }

    #[inline]
    fn $dispatch(crc: $word_ty, data: &[u8]) -> $word_ty {
      #[cfg(feature = "std")]
      {
        let dispatch = $dispatch_cache.get_or_init($resolve_dispatch);
        dispatch(crc, data)
      }

      #[cfg(not(feature = "std"))]
      {
        $auto_dispatch(crc, data)
      }
    }

    #[inline]
    fn $dispatch_vectored(crc: $word_ty, bufs: &[&[u8]]) -> $word_ty {
      #[cfg(feature = "std")]
      {
        let dispatch = $dispatch_vectored_cache.get_or_init($resolve_dispatch_vectored);
        dispatch(crc, bufs)
      }

      #[cfg(not(feature = "std"))]
      {
        $auto_vectored_dispatch(crc, bufs)
      }
    }

    #[inline]
    fn $resolved_dispatch() -> $dispatch_fn_ty {
      #[cfg(feature = "std")]
      {
        $dispatch_cache.get_or_init($resolve_dispatch)
      }

      #[cfg(not(feature = "std"))]
      {
        $auto_dispatch
      }
    }

    #[inline]
    fn $runtime_paths() -> (
      $dispatch_fn_ty,
      Option<&'static $crate::checksum::kernel_table::KernelTable>,
    ) {
      let force = $force_expr;
      if force == $auto_force {
        ($auto_dispatch, Some($active_table))
      } else {
        ($resolved_dispatch(), None)
      }
    }
  };
}

/// Generate a buffered CRC wrapper for a given inner CRC type.
///
/// This macro creates:
/// - The struct definition with `inner`, `buffer`, and `len` fields
/// - `new()`, `update()`, `finalize()`, and `reset()` methods
/// - `Default` trait implementation
/// - `Drop` implementation that zeroizes the buffer via [`crate::traits::ct::zeroize`]
///
/// # Arguments
///
/// - `$name`: The wrapper type name (e.g., `BufferedCrc64`)
/// - `$inner`: The inner CRC type (e.g., `Crc64`)
/// - `$buffer_size`: The buffer size constant
/// - `$threshold_fn`: Function that returns the SIMD threshold
#[cfg(feature = "alloc")]
macro_rules! define_buffered_crc {
  (
    $(#[$outer:meta])*
    $vis:vis struct $name:ident<$inner:ty> {
      buffer_size: $buffer_size:expr,
      threshold_fn: $threshold_fn:expr,
    }
  ) => {
    $(#[$outer])*
    $vis struct $name {
      inner: $inner,
      buffer: alloc::boxed::Box<[u8; $buffer_size]>,
      len: usize,
    }

    impl $name {
      /// Create a new buffered CRC hasher.
      #[must_use]
      pub fn new() -> Self {
        Self {
          inner: <$inner as $crate::Checksum>::new(),
          buffer: alloc::boxed::Box::new([0u8; $buffer_size]),
          len: 0,
        }
      }

      /// Update the CRC with more data.
      ///
      /// Data is buffered internally until enough accumulates for efficient
      /// SIMD processing.
      #[allow(clippy::indexing_slicing)]
      // Safety: All slice indices are bounds-checked by the algorithm:
      // - self.len < buffer_size (invariant maintained by this function)
      // - fill = min(input.len(), space), so input[..fill] and buffer[len..len+fill] are valid
      // - aligned <= input.len() by construction
      pub fn update(&mut self, data: &[u8]) {
        let threshold = $threshold_fn();
        let mut input = data;

        // If we have buffered data, try to fill and flush
        if self.len > 0 {
          let space = $buffer_size.strict_sub(self.len);
          let fill = input.len().min(space);
          self.buffer[self.len..self.len.strict_add(fill)].copy_from_slice(&input[..fill]);
          self.len = self.len.strict_add(fill);
          input = &input[fill..];

          // Flush if buffer is full or we have enough for SIMD
          if self.len >= $buffer_size || (self.len >= threshold && input.is_empty()) {
            <$inner as $crate::Checksum>::update(&mut self.inner, &self.buffer[..self.len]);
            self.len = 0;
          }
        }

        // Process large chunks directly
        if input.len() >= threshold {
          // Find largest aligned chunk
          let aligned = input.len().strict_div(threshold).strict_mul(threshold);
          <$inner as $crate::Checksum>::update(&mut self.inner, &input[..aligned]);
          input = &input[aligned..];
        }

        // Buffer remainder
        if !input.is_empty() {
          self.buffer[..input.len()].copy_from_slice(input);
          self.len = input.len();
        }
      }

      /// Update the CRC with multiple non-contiguous buffers.
      #[inline]
      pub fn update_vectored(&mut self, bufs: &[&[u8]]) {
        for &buf in bufs {
          self.update(buf);
        }
      }

      /// Update the CRC with `std::io::IoSlice` buffers.
      #[cfg(feature = "std")]
      #[inline]
      pub fn update_io_slices(&mut self, bufs: &[std::io::IoSlice<'_>]) {
        for buf in bufs {
          self.update(buf);
        }
      }

      /// Finalize and return the CRC value.
      ///
      /// Flushes any remaining buffered data before computing the final CRC.
      #[must_use]
      #[allow(clippy::indexing_slicing)]
      // Safety: self.len < buffer_size (invariant)
      pub fn finalize(&self) -> <$inner as $crate::Checksum>::Output {
        if self.len > 0 {
          // Clone inner to avoid mutating self
          let mut inner = self.inner.clone();
          <$inner as $crate::Checksum>::update(&mut inner, &self.buffer[..self.len]);
          <$inner as $crate::Checksum>::finalize(&inner)
        } else {
          <$inner as $crate::Checksum>::finalize(&self.inner)
        }
      }

      /// Reset the hasher to initial state.
      pub fn reset(&mut self) {
        <$inner as $crate::Checksum>::reset(&mut self.inner);
        self.len = 0;
      }
    }

    impl core::fmt::Debug for $name {
      fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
        f.debug_struct(stringify!($name)).finish_non_exhaustive()
      }
    }

    impl Default for $name {
      fn default() -> Self {
        Self::new()
      }
    }

    impl Drop for $name {
      fn drop(&mut self) {
        $crate::traits::ct::zeroize(&mut self.buffer[..]);
        // SAFETY: field is a valid, aligned, dereferenceable pointer to initialized memory.
        unsafe { core::ptr::write_volatile(&mut self.len, 0) };
        core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
      }
    }
  };
}