1#[cfg(target_arch = "x86_64")]
17use std::arch::x86_64::*;
18use core::arch::x86_64::_mm_prefetch;
19use core::arch::x86_64::_MM_HINT_T0;
20
21use crate::avx2_inline::{ add, loadu, or, rota_lf, rota_rg, setone_i32, setzero, xor, horiz_add_avx2 };
22use std::hint::black_box;
23
24macro_rules! teq_direct {
25 ($ss:ident, $ss1:expr, $lv:expr, $lr:expr, $ymm_a1:ident) => {
26 $ss = add($ss, $ymm_a1);
27 $ss = or(rota_lf::<$lv>($ss), rota_rg::<$lr>($ss));
28 $ss = xor($ss1, $ss);
29 };
30}
31
32use zeroize::{Zeroize, ZeroizeOnDrop};
33
34#[derive(Debug, Zeroize, ZeroizeOnDrop, Clone, PartialEq, Eq)]
36pub struct TequelHash {
37 pub states: [u32; 12],
38 pub salt: String,
39 pub iterations: u32
40}
41
42impl TequelHash {
43
44 pub fn new() -> Self {
45 Self {
46 states: [
47 0x1A2B3C4D, 0x5E6F7A8B, 0x9C0D1E2F, 0x31415926,
48 0x27182818, 0xDEADBEEF, 0xCAFEBABE, 0x80808080,
49 0xABCDEF01, 0x456789AB, 0xFEDCBA98, 0x01234567
50 ],
51 salt: "".to_string(),
52 iterations: 30
53 }
54 }
55
56
57
58 pub fn with_salt(mut self, salt: &str) -> Self {
59 self.salt = salt.to_string();
60 self
61 }
62
63 pub fn with_iteration(mut self, value: u32) -> Self{
64 self.iterations = value;
65 self
66 }
67
68 pub fn tqlhash(&mut self, input: &[u8]) -> String {
102
103 self.states = [
104 0x107912FA, 0x220952EA, 0x3320212A, 0x4324312F,
105 0x5320212A, 0x9E3779B1, 0x85EBCA6B, 0xAD35744D,
106 0xCC2912FA, 0xEE0952EA, 0x1120212A, 0x2224312F,
107 ];
108
109 const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
110
111 let mut s0 = unsafe { setzero() };
112 let mut s1 = unsafe { setzero() };
113 let mut s2 = unsafe { setzero() };
114 let mut s3 = unsafe { setzero() };
115 let mut s4 = unsafe { setzero() };
116 let mut s5 = unsafe { setzero() };
117 let mut s6 = unsafe { setzero() };
118 let mut s7 = unsafe { setzero() };
119 let mut s8 = unsafe { setzero() };
120 let mut s9 = unsafe { setzero() };
121 let mut s10 = unsafe { setzero() };
122 let mut s11 = unsafe { setzero() };
123
124 let mut chunks = input.chunks_exact(256);
125
126 for chunk in chunks.by_ref() {
127
128 unsafe {
129
130 _mm_prefetch(chunk.as_ptr().add(256) as *const i8, _MM_HINT_T0);
131
132 let bl_a = &chunk[..64];
133 let bl_b = &chunk[64..128];
134 let bl_c = &chunk[128..];
135 let bl_d = &chunk[..256];
136
137 let ymm_a1 = loadu(bl_a.as_ptr() as *const __m256i);
138 let ymm_a2 = xor(loadu(bl_a.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
139
140 teq_direct!(s0, s1, 7, 25, ymm_a1);
141 teq_direct!(s1, s2, 31, 28, ymm_a2);
142 teq_direct!(s2, s3, 25, 7, ymm_a1);
143 teq_direct!(s3, s4, 23, 9, ymm_a2);
144 teq_direct!(s4, s5, 13, 19, ymm_a1);
145 teq_direct!(s5, s6, 29, 3, ymm_a2);
146 teq_direct!(s6, s7, 19, 13, ymm_a1);
147 teq_direct!(s7, s8, 17, 15, ymm_a2);
148 teq_direct!(s8, s9, 11, 21, ymm_a1);
149 teq_direct!(s9, s10, 5, 27, ymm_a2);
150 teq_direct!(s10, s11, 3, 29, ymm_a1);
151 teq_direct!(s11, s0, 2, 30, ymm_a2);
152
153 let ymm_b1 = loadu(bl_b.as_ptr() as *const __m256i);
154 let ymm_b2 = xor(loadu(bl_b.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
155
156 teq_direct!(s0, s1, 7, 25, ymm_b1);
157 teq_direct!(s1, s2, 31, 28, ymm_b2);
158 teq_direct!(s2, s3, 25, 7, ymm_b1);
159 teq_direct!(s3, s4, 23, 9, ymm_b2);
160 teq_direct!(s4, s5, 13, 19, ymm_b1);
161 teq_direct!(s5, s6, 29, 3, ymm_b2);
162 teq_direct!(s6, s7, 19, 13, ymm_b1);
163 teq_direct!(s7, s8, 17, 15, ymm_b2);
164 teq_direct!(s8, s9, 11, 21, ymm_b1);
165 teq_direct!(s9, s10, 5, 27, ymm_b2);
166 teq_direct!(s10, s11, 3, 29, ymm_b1);
167 teq_direct!(s11, s0, 2, 30, ymm_b2);
168
169 let ymm_c1 = loadu(bl_c.as_ptr() as *const __m256i);
170 let ymm_c2 = xor(loadu(bl_c.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
171
172 teq_direct!(s0, s1, 7, 25, ymm_c1);
173 teq_direct!(s1, s2, 31, 28, ymm_c2);
174 teq_direct!(s2, s3, 25, 7, ymm_c1);
175 teq_direct!(s3, s4, 23, 9, ymm_c2);
176 teq_direct!(s4, s5, 13, 19, ymm_c1);
177 teq_direct!(s5, s6, 29, 3, ymm_c2);
178 teq_direct!(s6, s7, 19, 13, ymm_c1);
179 teq_direct!(s7, s8, 17, 15, ymm_c2);
180 teq_direct!(s8, s9, 11, 21, ymm_c1);
181 teq_direct!(s9, s10, 5, 27, ymm_c2);
182 teq_direct!(s10, s11, 3, 29, ymm_c1);
183 teq_direct!(s11, s0, 2, 30, ymm_c2);
184
185 let ymm_d1 = loadu(bl_d.as_ptr() as *const __m256i);
186 let ymm_d2 = xor(loadu(bl_d.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
187
188 teq_direct!(s0, s1, 7, 25, ymm_d1);
189 teq_direct!(s1, s2, 31, 28, ymm_d2);
190 teq_direct!(s2, s3, 25, 7, ymm_d1);
191 teq_direct!(s3, s4, 23, 9, ymm_d2);
192 teq_direct!(s4, s5, 13, 19, ymm_d1);
193 teq_direct!(s5, s6, 29, 3, ymm_d2);
194 teq_direct!(s6, s7, 19, 13, ymm_d1);
195 teq_direct!(s7, s8, 17, 15, ymm_d2);
196 teq_direct!(s8, s9, 11, 21, ymm_d1);
197 teq_direct!(s9, s10, 5, 27, ymm_d2);
198 teq_direct!(s10, s11, 3, 29, ymm_d1);
199 teq_direct!(s11, s0, 2, 30, ymm_d2);
200
201 s0 = xor(s0, s11);
202
203 }
204
205 }
206
207
208 unsafe {
209 self.states[0] = self.states[0] .wrapping_add(horiz_add_avx2(s0));
210 self.states[1] = self.states[1] .wrapping_add(horiz_add_avx2(s1));
211 self.states[2] = self.states[2] .wrapping_add(horiz_add_avx2(s2));
212 self.states[3] = self.states[3] .wrapping_add(horiz_add_avx2(s3));
213 self.states[4] = self.states[4] .wrapping_add(horiz_add_avx2(s4));
214 self.states[5] = self.states[5] .wrapping_add(horiz_add_avx2(s5));
215 self.states[6] = self.states[6] .wrapping_add(horiz_add_avx2(s6));
216 self.states[7] = self.states[7] .wrapping_add(horiz_add_avx2(s7));
217 self.states[8] = self.states[8] .wrapping_add(horiz_add_avx2(s8));
218 self.states[9] = self.states[9] .wrapping_add(horiz_add_avx2(s9));
219 self.states[10] = self.states[10].wrapping_add(horiz_add_avx2(s10));
220 self.states[11] = self.states[11].wrapping_add(horiz_add_avx2(s11));
221 }
222
223
224 let remainder_128 = chunks.remainder();
225 let mut chunks_64 = remainder_128.chunks_exact(64);
226
227 for chunk in chunks_64.by_ref() {
228 unsafe {
229
230 let ymm_a1 = loadu(chunk.as_ptr() as *const __m256i);
231 let ymm_a2 = xor(loadu(chunk.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
232
233 teq_direct!(s0, s1, 7, 25, ymm_a1);
234 teq_direct!(s1, s2, 31, 28, ymm_a2);
235 teq_direct!(s2, s3, 25, 7, ymm_a1);
236 teq_direct!(s3, s4, 23, 9, ymm_a2);
237 teq_direct!(s4, s5, 13, 19, ymm_a1);
238 teq_direct!(s5, s6, 29, 3, ymm_a2);
239 teq_direct!(s6, s7, 19, 13, ymm_a1);
240 teq_direct!(s7, s8, 17, 15, ymm_a2);
241 teq_direct!(s8, s9, 11, 21, ymm_a1);
242 teq_direct!(s9, s10, 5, 27, ymm_a2);
243 teq_direct!(s10, s11, 3, 29, ymm_a1);
244 teq_direct!(s11, s0, 2, 30, ymm_a2);
245
246 }
247 }
248
249
250 let final_remainder = chunks_64.remainder();
251
252 for (idx, &byte) in final_remainder.iter().enumerate() {
253 let pos = idx % 12;
254 self.states[pos] = self.states[pos].wrapping_add((byte as u32) ^ 0x9E3779B1);
255 }
256
257 self.apply_final_mixer_64();
258
259 let mut hex_buffer = vec![0u8; 96];
260
261 for (i, &s) in self.states.iter().enumerate() {
262 let bytes = s.to_be_bytes();
263 for (j, &byte) in bytes.iter().enumerate() {
264 let offset = (i * 8) + (j * 2);
265 hex_buffer[offset] = HEX_CHARS[(byte >> 4) as usize];
266 hex_buffer[offset + 1] = HEX_CHARS[(byte & 0x0f) as usize];
267 }
268 }
269
270 unsafe { String::from_utf8_unchecked(hex_buffer) }
271
272 }
273
274
275 pub fn tqlhash_raw(&mut self, input: &[u8]) -> [u8; 48] {
309
310 self.states = [
311 0x107912FA, 0x220952EA, 0x3320212A, 0x4324312F,
312 0x5320212A, 0x9E3779B1, 0x85EBCA6B, 0xAD35744D,
313 0xCC2912FA, 0xEE0952EA, 0x1120212A, 0x2224312F,
314 ];
315
316 let mut s0 = unsafe { setzero() };
317 let mut s1 = unsafe { setzero() };
318 let mut s2 = unsafe { setzero() };
319 let mut s3 = unsafe { setzero() };
320 let mut s4 = unsafe { setzero() };
321 let mut s5 = unsafe { setzero() };
322 let mut s6 = unsafe { setzero() };
323 let mut s7 = unsafe { setzero() };
324 let mut s8 = unsafe { setzero() };
325 let mut s9 = unsafe { setzero() };
326 let mut s10 = unsafe { setzero() };
327 let mut s11 = unsafe { setzero() };
328
329 let mut chunks = input.chunks_exact(128);
330
331 for chunk in chunks.by_ref() {
332
333 unsafe {
334
335 _mm_prefetch(chunk.as_ptr().add(256) as *const i8, _MM_HINT_T0);
336
337 let bl_a = &chunk[..64];
338 let bl_b = &chunk[64..128];
339 let bl_c = &chunk[128..];
340 let bl_d = &chunk[..256];
341
342 let ymm_a1 = loadu(bl_a.as_ptr() as *const __m256i);
343 let ymm_a2 = xor(loadu(bl_a.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
344
345 teq_direct!(s0, s1, 7, 25, ymm_a1);
346 teq_direct!(s1, s2, 31, 28, ymm_a2);
347 teq_direct!(s2, s3, 25, 7, ymm_a1);
348 teq_direct!(s3, s4, 23, 9, ymm_a2);
349 teq_direct!(s4, s5, 13, 19, ymm_a1);
350 teq_direct!(s5, s6, 29, 3, ymm_a2);
351 teq_direct!(s6, s7, 19, 13, ymm_a1);
352 teq_direct!(s7, s8, 17, 15, ymm_a2);
353 teq_direct!(s8, s9, 11, 21, ymm_a1);
354 teq_direct!(s9, s10, 5, 27, ymm_a2);
355 teq_direct!(s10, s11, 3, 29, ymm_a1);
356 teq_direct!(s11, s0, 2, 30, ymm_a2);
357
358 let ymm_b1 = loadu(bl_b.as_ptr() as *const __m256i);
359 let ymm_b2 = xor(loadu(bl_b.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
360
361 teq_direct!(s0, s1, 7, 25, ymm_b1);
362 teq_direct!(s1, s2, 31, 28, ymm_b2);
363 teq_direct!(s2, s3, 25, 7, ymm_b1);
364 teq_direct!(s3, s4, 23, 9, ymm_b2);
365 teq_direct!(s4, s5, 13, 19, ymm_b1);
366 teq_direct!(s5, s6, 29, 3, ymm_b2);
367 teq_direct!(s6, s7, 19, 13, ymm_b1);
368 teq_direct!(s7, s8, 17, 15, ymm_b2);
369 teq_direct!(s8, s9, 11, 21, ymm_b1);
370 teq_direct!(s9, s10, 5, 27, ymm_b2);
371 teq_direct!(s10, s11, 3, 29, ymm_b1);
372 teq_direct!(s11, s0, 2, 30, ymm_b2);
373
374 let ymm_c1 = loadu(bl_c.as_ptr() as *const __m256i);
375 let ymm_c2 = xor(loadu(bl_c.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
376
377 teq_direct!(s0, s1, 7, 25, ymm_c1);
378 teq_direct!(s1, s2, 31, 28, ymm_c2);
379 teq_direct!(s2, s3, 25, 7, ymm_c1);
380 teq_direct!(s3, s4, 23, 9, ymm_c2);
381 teq_direct!(s4, s5, 13, 19, ymm_c1);
382 teq_direct!(s5, s6, 29, 3, ymm_c2);
383 teq_direct!(s6, s7, 19, 13, ymm_c1);
384 teq_direct!(s7, s8, 17, 15, ymm_c2);
385 teq_direct!(s8, s9, 11, 21, ymm_c1);
386 teq_direct!(s9, s10, 5, 27, ymm_c2);
387 teq_direct!(s10, s11, 3, 29, ymm_c1);
388 teq_direct!(s11, s0, 2, 30, ymm_c2);
389
390 let ymm_d1 = loadu(bl_d.as_ptr() as *const __m256i);
391 let ymm_d2 = xor(loadu(bl_d.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
392
393 teq_direct!(s0, s1, 7, 25, ymm_d1);
394 teq_direct!(s1, s2, 31, 28, ymm_d2);
395 teq_direct!(s2, s3, 25, 7, ymm_d1);
396 teq_direct!(s3, s4, 23, 9, ymm_d2);
397 teq_direct!(s4, s5, 13, 19, ymm_d1);
398 teq_direct!(s5, s6, 29, 3, ymm_d2);
399 teq_direct!(s6, s7, 19, 13, ymm_d1);
400 teq_direct!(s7, s8, 17, 15, ymm_d2);
401 teq_direct!(s8, s9, 11, 21, ymm_d1);
402 teq_direct!(s9, s10, 5, 27, ymm_d2);
403 teq_direct!(s10, s11, 3, 29, ymm_d1);
404 teq_direct!(s11, s0, 2, 30, ymm_d2);
405
406 s0 = xor(s0, s11);
407
408 }
409
410 }
411
412
413 unsafe {
414 self.states[0] = self.states[0] .wrapping_add(horiz_add_avx2(s0));
415 self.states[1] = self.states[1] .wrapping_add(horiz_add_avx2(s1));
416 self.states[2] = self.states[2] .wrapping_add(horiz_add_avx2(s2));
417 self.states[3] = self.states[3] .wrapping_add(horiz_add_avx2(s3));
418 self.states[4] = self.states[4] .wrapping_add(horiz_add_avx2(s4));
419 self.states[5] = self.states[5] .wrapping_add(horiz_add_avx2(s5));
420 self.states[6] = self.states[6] .wrapping_add(horiz_add_avx2(s6));
421 self.states[7] = self.states[7] .wrapping_add(horiz_add_avx2(s7));
422 self.states[8] = self.states[8] .wrapping_add(horiz_add_avx2(s8));
423 self.states[9] = self.states[9] .wrapping_add(horiz_add_avx2(s9));
424 self.states[10] = self.states[10].wrapping_add(horiz_add_avx2(s10));
425 self.states[11] = self.states[11].wrapping_add(horiz_add_avx2(s11));
426 }
427
428
429 let remainder_128 = chunks.remainder();
430 let mut chunks_64 = remainder_128.chunks_exact(64);
431
432 for chunk in chunks_64.by_ref() {
433 unsafe {
434
435 let ymm_a1 = loadu(chunk.as_ptr() as *const __m256i);
436 let ymm_a2 = xor(loadu(chunk.as_ptr().add(32) as *const __m256i), setone_i32(0x517CC1B7));
437
438 teq_direct!(s0, s1, 7, 25, ymm_a1);
439 teq_direct!(s1, s2, 31, 28, ymm_a2);
440 teq_direct!(s2, s3, 25, 7, ymm_a1);
441 teq_direct!(s3, s4, 23, 9, ymm_a2);
442 teq_direct!(s4, s5, 13, 19, ymm_a1);
443 teq_direct!(s5, s6, 29, 3, ymm_a2);
444 teq_direct!(s6, s7, 19, 13, ymm_a1);
445 teq_direct!(s7, s8, 17, 15, ymm_a2);
446 teq_direct!(s8, s9, 11, 21, ymm_a1);
447 teq_direct!(s9, s10, 5, 27, ymm_a2);
448 teq_direct!(s10, s11, 3, 29, ymm_a1);
449 teq_direct!(s11, s0, 2, 30, ymm_a2);
450
451 }
452 }
453
454 let final_remainder = chunks_64.remainder();
455
456 for (idx, &byte) in final_remainder.iter().enumerate() {
457 let pos = idx % 12;
458 self.states[pos] = self.states[pos].wrapping_add((byte as u32) ^ 0x9E3779B1);
459 }
460
461 self.apply_final_mixer_64();
462
463 let mut bytes = [0u8; 48];
464
465 for (i, &val) in self.states.iter().enumerate() {
466 let b = val.to_be_bytes();
467 bytes[i*4 .. i*4+4].copy_from_slice(&b);
468 }
469
470 bytes
471 }
472
473
474
475 pub fn isv_tqlhash(&mut self, hash: &String, input: &[u8]) -> bool {
507
508 let new_hash = self.tqlhash(input);
509
510 let a = new_hash.as_bytes();
511 let b = hash.as_bytes();
512
513 if a.len() != b.len() {
514 return false;
515 }
516
517 let mut result = 0u8;
518 for i in 0..a.len() {
519 result |= black_box(a[i] ^ b[i]);
520 }
521
522 result == 0
523
524 }
525
526
527 pub fn isv_tqlhash_raw(&mut self, hash: &[u8; 48], input: &[u8]) -> bool {
528
529 let a_bh = self.tqlhash_raw(input);
530
531 let mut result = 0u8;
532
533 for i in 0..48 {
534 result |= black_box(a_bh[i] ^ hash[i]);
535 }
536
537 result == 0
538
539 }
540
541
542
543 pub fn derive_key(&mut self, password: &str, iterations: u32) -> [u8; 32] {
575
576 self.iterations = if iterations > 0 { iterations } else { 30 };
577
578 let mut derived = format!("{}{}{}", self.salt, password, self.salt);
579
580 for i in 0..self.iterations {
581 let hash_hex = self.tqlhash(derived.as_bytes());
582 derived = format!("{}{}{}", i, hash_hex, self.salt);
583 }
584
585 let final_hash = self.tqlhash(derived.as_bytes());
586 let bytes = hex::decode(&final_hash).expect("Error in key closing");
587
588 let mut key = [0u8; 32];
589 key.copy_from_slice(&bytes[0..32]);
590 key
591 }
592
593
594
595 fn apply_final_mixer_64(&mut self) {
596 for r in 0..64 {
597 for i in 0..12 {
598 let prev = if i == 0 { 11 } else { i - 1 };
599 let next = (i + 1) % 12;
600
601 self.states[i] = self.states[i]
602 .wrapping_add(self.states[prev])
603 .rotate_left(((r % 31) as u32) + 1);
604 self.states[next] ^= self.states[i].wrapping_mul(0xAD35744D);
605 }
606 }
607 }
608
609}