pub trait GitHashFn: Sealed + Debug + Send + Clone + Eq + 'static {
    type State: AsRef<[u32]> + AsMut<[u32]> + Clone + Copy + Debug + Default + Eq + Send;
    type Block: AsRef<[u8]> + AsMut<[u8]> + Copy + Debug;

    const INITIAL_STATE: Self::State;
    const KERNEL: &'static str;

    // Required method
    fn compress(state: &mut Self::State, blocks: &[Self::Block]);
}
Expand description

A hash function used by git. This is a sealed trait implemented by Sha1 and Sha256. The fields and methods on this trait are subject to change. Consumers should pretend that the types implementing the trait are opaque.

Required Associated Types§

source

type State: AsRef<[u32]> + AsMut<[u32]> + Clone + Copy + Debug + Default + Eq + Send

The type of the output and intermediate state of this hash function. For sha1 and sha256, this is [u32; N] for some N. Ideally this trait would just have an associated const for the length of the state vector, and then State would be defined as [u32; N], but this isn’t possible due to https://github.com/rust-lang/rust/issues/60551.

source

type Block: AsRef<[u8]> + AsMut<[u8]> + Copy + Debug

The datatype representing a block for this algorithm. This must be layout-equivalent to [u8; 64], although the nominal type that gets used might be different on a per-library basis due to const generic limitations.

Required Associated Constants§

source

const INITIAL_STATE: Self::State

The initial value of the state vector for the given algorithm

source

const KERNEL: &'static str

Source code of an OpenCL shader kernel finding hash matches for the given algorithm. The kernel should have a function scatter_padding_and_find_match, which accepts the following parameters:

  1. A pointer to the data in the desired hash spec (pointing to the appropriate number of bytes for the given hash algorithm)
  2. A pointer to the mask of the desired hash spec
  3. The “base padding specifier” for the current run, which determines which padding will be attempted. The padding specifier used by any given thread is equal to the base specifier plus that thread’s ID.
  4. A pointer to the intermediate state after all static blocks have been hashed
  5. A pointer to the dynamic blocks, encoded as big-endian 32-bit integers
  6. The number of dynamic blocks that are present
  7. A writeable pointer where the shader should write a thread ID if it finds an appropriate match.

Required Methods§

source

fn compress(state: &mut Self::State, blocks: &[Self::Block])

Processes a set of blocks using the given algorithm

Object Safety§

This trait is not object safe.

Implementors§

source§

impl GitHashFn for Sha1

§

type State = [u32; 5]

source§

const INITIAL_STATE: Self::State = _

§

type Block = GenericArray<u8, <Sha1Core as BlockSizeUser>::BlockSize>

source§

const KERNEL: &'static str = "// Note: A lot of code is duplicated between this file and sha256_matcher.cl.\nuint16 arrange_padding_block(ulong padding_specifier, uint4 padding_block_ending);\nvoid sha1_compress(__private uint* h, uint16 w);\n\n__constant uint PADDING_CHUNKS[16] = {\n 0x20202020, 0x20202009, 0x20200920, 0x20200909,\n 0x20092020, 0x20092009, 0x20090920, 0x20090909,\n 0x09202020, 0x09202009, 0x09200920, 0x09200909,\n 0x09092020, 0x09092009, 0x09090920, 0x09090909,\n};\n\n__kernel void scatter_padding_and_find_match(\n __global uint* hash_spec_data,\n __global uint* hash_spec_mask,\n __global uint* h,\n ulong base_padding_specifier,\n __global uint16* dynamic_blocks,\n ulong num_dynamic_blocks,\n __global uint* successful_match_receiver\n) {\n uint finalized_hash[5] = {h[0], h[1], h[2], h[3], h[4]};\n sha1_compress(\n finalized_hash,\n arrange_padding_block(\n base_padding_specifier + get_global_id(0),\n dynamic_blocks[0].sCDEF\n )\n );\n for (size_t i = 1; i < num_dynamic_blocks; i++) {\n sha1_compress(finalized_hash, dynamic_blocks[i]);\n }\n\n if (\n (finalized_hash[0] & hash_spec_mask[0]) == hash_spec_data[0] &&\n (finalized_hash[1] & hash_spec_mask[1]) == hash_spec_data[1] &&\n (finalized_hash[2] & hash_spec_mask[2]) == hash_spec_data[2] &&\n (finalized_hash[3] & hash_spec_mask[3]) == hash_spec_data[3] &&\n (finalized_hash[4] & hash_spec_mask[4]) == hash_spec_data[4]\n ) {\n atomic_cmpxchg(successful_match_receiver, UINT_MAX, get_global_id(0));\n }\n}\n\nuint16 arrange_padding_block(ulong padding_specifier, uint4 padding_block_ending) {\n return (uint16)(\n PADDING_CHUNKS[(padding_specifier >> 4) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 0) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 12) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 8) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 20) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 16) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 28) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 24) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 36) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 32) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 44) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 40) & 0xf],\n padding_block_ending.s0,\n padding_block_ending.s1,\n padding_block_ending.s2,\n padding_block_ending.s3\n );\n}\n\n/*\nThe sha1 implementation below is mostly adapted from hashcat (https://hashcat.net/hashcat).\n\nThe MIT License (MIT)\n\nCopyright (c) 2015-2020 Jens Steube\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n*/\n\n#define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))\n#define SHA1_F0o(x,y,z) (bitselect((z), (y), (x)))\n#define SHA1_F2o(x,y,z) (bitselect((x), (y), ((x) ^ (z))))\n\n#define SHA1_STEP_S(f,a,b,c,d,e,x) \\\n{ \\\n e += x + f(b, c, d) + K + rotate(a, 5u); \\\n b = rotate(b, 30u); \\\n}\n\nvoid sha1_compress(__private uint* h, uint16 w) {\n uint a = h[0];\n uint b = h[1];\n uint c = h[2];\n uint d = h[3];\n uint e = h[4];\n\n uint w0_t = w.s0;\n uint w1_t = w.s1;\n uint w2_t = w.s2;\n uint w3_t = w.s3;\n uint w4_t = w.s4;\n uint w5_t = w.s5;\n uint w6_t = w.s6;\n uint w7_t = w.s7;\n uint w8_t = w.s8;\n uint w9_t = w.s9;\n uint wa_t = w.sA;\n uint wb_t = w.sB;\n uint wc_t = w.sC;\n uint wd_t = w.sD;\n uint we_t = w.sE;\n uint wf_t = w.sF;\n\n #define K 0x5a827999\n\n SHA1_STEP_S(SHA1_F0o, a, b, c, d, e, w0_t);\n SHA1_STEP_S(SHA1_F0o, e, a, b, c, d, w1_t);\n SHA1_STEP_S(SHA1_F0o, d, e, a, b, c, w2_t);\n SHA1_STEP_S(SHA1_F0o, c, d, e, a, b, w3_t);\n SHA1_STEP_S(SHA1_F0o, b, c, d, e, a, w4_t);\n SHA1_STEP_S(SHA1_F0o, a, b, c, d, e, w5_t);\n SHA1_STEP_S(SHA1_F0o, e, a, b, c, d, w6_t);\n SHA1_STEP_S(SHA1_F0o, d, e, a, b, c, w7_t);\n SHA1_STEP_S(SHA1_F0o, c, d, e, a, b, w8_t);\n SHA1_STEP_S(SHA1_F0o, b, c, d, e, a, w9_t);\n SHA1_STEP_S(SHA1_F0o, a, b, c, d, e, wa_t);\n SHA1_STEP_S(SHA1_F0o, e, a, b, c, d, wb_t);\n SHA1_STEP_S(SHA1_F0o, d, e, a, b, c, wc_t);\n SHA1_STEP_S(SHA1_F0o, c, d, e, a, b, wd_t);\n SHA1_STEP_S(SHA1_F0o, b, c, d, e, a, we_t);\n SHA1_STEP_S(SHA1_F0o, a, b, c, d, e, wf_t);\n w0_t = rotate((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP_S(SHA1_F0o, e, a, b, c, d, w0_t);\n w1_t = rotate((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP_S(SHA1_F0o, d, e, a, b, c, w1_t);\n w2_t = rotate((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP_S(SHA1_F0o, c, d, e, a, b, w2_t);\n w3_t = rotate((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP_S(SHA1_F0o, b, c, d, e, a, w3_t);\n\n #undef K\n #define K 0x6ed9eba1\n\n w4_t = rotate((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, w4_t);\n w5_t = rotate((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, w5_t);\n w6_t = rotate((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, w6_t);\n w7_t = rotate((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, w7_t);\n w8_t = rotate((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, w8_t);\n w9_t = rotate((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, w9_t);\n wa_t = rotate((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, wa_t);\n wb_t = rotate((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, wb_t);\n wc_t = rotate((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, wc_t);\n wd_t = rotate((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, wd_t);\n we_t = rotate((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, we_t);\n wf_t = rotate((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, wf_t);\n w0_t = rotate((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, w0_t);\n w1_t = rotate((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, w1_t);\n w2_t = rotate((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, w2_t);\n w3_t = rotate((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, w3_t);\n w4_t = rotate((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, w4_t);\n w5_t = rotate((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, w5_t);\n w6_t = rotate((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, w6_t);\n w7_t = rotate((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, w7_t);\n\n #undef K\n #define K 0x8f1bbcdc\n\n w8_t = rotate((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP_S(SHA1_F2o, a, b, c, d, e, w8_t);\n w9_t = rotate((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP_S(SHA1_F2o, e, a, b, c, d, w9_t);\n wa_t = rotate((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP_S(SHA1_F2o, d, e, a, b, c, wa_t);\n wb_t = rotate((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP_S(SHA1_F2o, c, d, e, a, b, wb_t);\n wc_t = rotate((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP_S(SHA1_F2o, b, c, d, e, a, wc_t);\n wd_t = rotate((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP_S(SHA1_F2o, a, b, c, d, e, wd_t);\n we_t = rotate((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP_S(SHA1_F2o, e, a, b, c, d, we_t);\n wf_t = rotate((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP_S(SHA1_F2o, d, e, a, b, c, wf_t);\n w0_t = rotate((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP_S(SHA1_F2o, c, d, e, a, b, w0_t);\n w1_t = rotate((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP_S(SHA1_F2o, b, c, d, e, a, w1_t);\n w2_t = rotate((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP_S(SHA1_F2o, a, b, c, d, e, w2_t);\n w3_t = rotate((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP_S(SHA1_F2o, e, a, b, c, d, w3_t);\n w4_t = rotate((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP_S(SHA1_F2o, d, e, a, b, c, w4_t);\n w5_t = rotate((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP_S(SHA1_F2o, c, d, e, a, b, w5_t);\n w6_t = rotate((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP_S(SHA1_F2o, b, c, d, e, a, w6_t);\n w7_t = rotate((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP_S(SHA1_F2o, a, b, c, d, e, w7_t);\n w8_t = rotate((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP_S(SHA1_F2o, e, a, b, c, d, w8_t);\n w9_t = rotate((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP_S(SHA1_F2o, d, e, a, b, c, w9_t);\n wa_t = rotate((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP_S(SHA1_F2o, c, d, e, a, b, wa_t);\n wb_t = rotate((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP_S(SHA1_F2o, b, c, d, e, a, wb_t);\n\n #undef K\n #define K 0xca62c1d6\n\n wc_t = rotate((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, wc_t);\n wd_t = rotate((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, wd_t);\n we_t = rotate((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, we_t);\n wf_t = rotate((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, wf_t);\n w0_t = rotate((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, w0_t);\n w1_t = rotate((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, w1_t);\n w2_t = rotate((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, w2_t);\n w3_t = rotate((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, w3_t);\n w4_t = rotate((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, w4_t);\n w5_t = rotate((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, w5_t);\n w6_t = rotate((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, w6_t);\n w7_t = rotate((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, w7_t);\n w8_t = rotate((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, w8_t);\n w9_t = rotate((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, w9_t);\n wa_t = rotate((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, wa_t);\n wb_t = rotate((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP_S(SHA1_F1, a, b, c, d, e, wb_t);\n wc_t = rotate((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP_S(SHA1_F1, e, a, b, c, d, wc_t);\n wd_t = rotate((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP_S(SHA1_F1, d, e, a, b, c, wd_t);\n we_t = rotate((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP_S(SHA1_F1, c, d, e, a, b, we_t);\n wf_t = rotate((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP_S(SHA1_F1, b, c, d, e, a, wf_t);\n\n #undef K\n\n h[0] += a;\n h[1] += b;\n h[2] += c;\n h[3] += d;\n h[4] += e;\n}\n"

source§

impl GitHashFn for Sha256

§

type State = [u32; 8]

source§

const INITIAL_STATE: Self::State = _

§

type Block = GenericArray<u8, <CoreWrapper<CtVariableCoreWrapper<Sha256VarCore, UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, B0>, B0>, B0>, B0>, B0>, OidSha256>> as BlockSizeUser>::BlockSize>

source§

const KERNEL: &'static str = "// Note: A lot of code is duplicated between this file and sha1_matcher.cl.\nuint16 arrange_padding_block(ulong padding_specifier, uint4 padding_block_ending);\nvoid sha256_compress(__private uint* h, uint16 w);\n\n__constant uint PADDING_CHUNKS[16] = {\n 0x20202020, 0x20202009, 0x20200920, 0x20200909,\n 0x20092020, 0x20092009, 0x20090920, 0x20090909,\n 0x09202020, 0x09202009, 0x09200920, 0x09200909,\n 0x09092020, 0x09092009, 0x09090920, 0x09090909,\n};\n\n__kernel void scatter_padding_and_find_match(\n __global uint* hash_spec_data,\n __global uint* hash_spec_mask,\n __global uint* h,\n ulong base_padding_specifier,\n __global uint16* dynamic_blocks,\n ulong num_dynamic_blocks,\n __global uint* successful_match_receiver\n) {\n uint finalized_hash[8] = {h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]};\n sha256_compress(\n finalized_hash,\n arrange_padding_block(\n base_padding_specifier + get_global_id(0),\n dynamic_blocks[0].sCDEF\n )\n );\n for (size_t i = 1; i < num_dynamic_blocks; i++) {\n sha256_compress(finalized_hash, dynamic_blocks[i]);\n }\n\n if (\n (finalized_hash[0] & hash_spec_mask[0]) == hash_spec_data[0] &&\n (finalized_hash[1] & hash_spec_mask[1]) == hash_spec_data[1] &&\n (finalized_hash[2] & hash_spec_mask[2]) == hash_spec_data[2] &&\n (finalized_hash[3] & hash_spec_mask[3]) == hash_spec_data[3] &&\n (finalized_hash[4] & hash_spec_mask[4]) == hash_spec_data[4] &&\n (finalized_hash[5] & hash_spec_mask[5]) == hash_spec_data[5] &&\n (finalized_hash[6] & hash_spec_mask[6]) == hash_spec_data[6] &&\n (finalized_hash[7] & hash_spec_mask[7]) == hash_spec_data[7]\n ) {\n atomic_cmpxchg(successful_match_receiver, UINT_MAX, get_global_id(0));\n }\n}\n\nuint16 arrange_padding_block(ulong padding_specifier, uint4 padding_block_ending) {\n return (uint16)(\n PADDING_CHUNKS[(padding_specifier >> 4) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 0) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 12) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 8) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 20) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 16) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 28) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 24) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 36) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 32) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 44) & 0xf],\n PADDING_CHUNKS[(padding_specifier >> 40) & 0xf],\n padding_block_ending.s0,\n padding_block_ending.s1,\n padding_block_ending.s2,\n padding_block_ending.s3\n );\n}\n\n/*\nThe sha256 implementation below is mostly adapted from hashcat (https://hashcat.net/hashcat).\n\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Jens Steube\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n*/\n\n__constant uint k_sha256[64] = {\n 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,\n 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,\n 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,\n 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,\n\n 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,\n 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,\n 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,\n 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,\n\n 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,\n 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,\n 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,\n 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,\n\n 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,\n 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,\n 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,\n 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2\n};\n\n#define hc_rotl32_S rotate\n#define SHA256_S0_S(x) (rotate((x), 25u) ^ rotate((x), 14u) ^ ((x) >> 3u))\n#define SHA256_S1_S(x) (rotate((x), 15u) ^ rotate((x), 13u) ^ ((x) >> 10u))\n#define SHA256_S2_S(x) (rotate((x), 30u) ^ rotate((x), 19u) ^ rotate((x), 10u))\n#define SHA256_S3_S(x) (rotate((x), 26u) ^ rotate((x), 21u) ^ rotate((x), 7u))\n\n#define SHA256_F0o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))\n#define SHA256_F1o(x,y,z) (bitselect ((z), (y), (x)))\n\n#define SHA256_STEP_S(F0,F1,a,b,c,d,e,f,g,h,x,K) \\\n{ \\\n h += K + x + SHA256_S3_S (e) + F1 (e,f,g); \\\n d += h; \\\n h += SHA256_S2_S (a) + F0 (a,b,c); \\\n}\n#define SHA256_EXPAND_S(x,y,z,w) (SHA256_S1_S (x) + y + SHA256_S0_S (z) + w)\n\nvoid sha256_compress(__private uint* hash, uint16 w) {\n uint a = hash[0];\n uint b = hash[1];\n uint c = hash[2];\n uint d = hash[3];\n uint e = hash[4];\n uint f = hash[5];\n uint g = hash[6];\n uint h = hash[7];\n\n uint w0_t = w.s0;\n uint w1_t = w.s1;\n uint w2_t = w.s2;\n uint w3_t = w.s3;\n uint w4_t = w.s4;\n uint w5_t = w.s5;\n uint w6_t = w.s6;\n uint w7_t = w.s7;\n uint w8_t = w.s8;\n uint w9_t = w.s9;\n uint wa_t = w.sA;\n uint wb_t = w.sB;\n uint wc_t = w.sC;\n uint wd_t = w.sD;\n uint we_t = w.sE;\n uint wf_t = w.sF;\n\n #define ROUND_EXPAND_S() \\\n { \\\n w0_t = SHA256_EXPAND_S (we_t, w9_t, w1_t, w0_t); \\\n w1_t = SHA256_EXPAND_S (wf_t, wa_t, w2_t, w1_t); \\\n w2_t = SHA256_EXPAND_S (w0_t, wb_t, w3_t, w2_t); \\\n w3_t = SHA256_EXPAND_S (w1_t, wc_t, w4_t, w3_t); \\\n w4_t = SHA256_EXPAND_S (w2_t, wd_t, w5_t, w4_t); \\\n w5_t = SHA256_EXPAND_S (w3_t, we_t, w6_t, w5_t); \\\n w6_t = SHA256_EXPAND_S (w4_t, wf_t, w7_t, w6_t); \\\n w7_t = SHA256_EXPAND_S (w5_t, w0_t, w8_t, w7_t); \\\n w8_t = SHA256_EXPAND_S (w6_t, w1_t, w9_t, w8_t); \\\n w9_t = SHA256_EXPAND_S (w7_t, w2_t, wa_t, w9_t); \\\n wa_t = SHA256_EXPAND_S (w8_t, w3_t, wb_t, wa_t); \\\n wb_t = SHA256_EXPAND_S (w9_t, w4_t, wc_t, wb_t); \\\n wc_t = SHA256_EXPAND_S (wa_t, w5_t, wd_t, wc_t); \\\n wd_t = SHA256_EXPAND_S (wb_t, w6_t, we_t, wd_t); \\\n we_t = SHA256_EXPAND_S (wc_t, w7_t, wf_t, we_t); \\\n wf_t = SHA256_EXPAND_S (wd_t, w8_t, w0_t, wf_t); \\\n }\n\n #define ROUND_STEP_S(i) \\\n { \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha256[i + 0]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha256[i + 1]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha256[i + 2]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha256[i + 3]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha256[i + 4]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha256[i + 5]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha256[i + 6]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha256[i + 7]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha256[i + 8]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha256[i + 9]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha256[i + 10]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha256[i + 11]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha256[i + 12]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha256[i + 13]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, k_sha256[i + 14]); \\\n SHA256_STEP_S (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha256[i + 15]); \\\n }\n\n ROUND_STEP_S (0);\n\n for (int i = 16; i < 64; i += 16) {\n ROUND_EXPAND_S (); ROUND_STEP_S (i);\n }\n\n hash[0] += a;\n hash[1] += b;\n hash[2] += c;\n hash[3] += d;\n hash[4] += e;\n hash[5] += f;\n hash[6] += g;\n hash[7] += h;\n}\n"