1use crate::internal_alloc::Vec;
19use noxtls_core::{Error, Result};
20
21#[derive(Debug, Clone)]
23pub struct AesCipher {
24 round_keys: Vec<[u8; 16]>,
25 rounds: usize,
26}
27
28impl AesCipher {
29 pub fn new(key: &[u8]) -> Result<Self> {
37 let (nk, rounds) = match key.len() {
38 16 => (4, 10),
39 24 => (6, 12),
40 32 => (8, 14),
41 _ => {
42 return Err(Error::InvalidLength(
43 "aes key length must be 16, 24, or 32 bytes",
44 ))
45 }
46 };
47 let expanded = key_expansion(key, nk, rounds);
48 Ok(Self {
49 round_keys: expanded,
50 rounds,
51 })
52 }
53
54 pub fn encrypt_block(&self, block: &mut [u8; 16]) {
59 add_round_key(block, &self.round_keys[0]);
60 for round in 1..self.rounds {
61 sub_bytes(block);
62 shift_rows(block);
63 mix_columns(block);
64 add_round_key(block, &self.round_keys[round]);
65 }
66 sub_bytes(block);
67 shift_rows(block);
68 add_round_key(block, &self.round_keys[self.rounds]);
69 }
70
71 pub fn decrypt_block(&self, block: &mut [u8; 16]) {
76 add_round_key(block, &self.round_keys[self.rounds]);
77 for round in (1..self.rounds).rev() {
78 inv_shift_rows(block);
79 inv_sub_bytes(block);
80 add_round_key(block, &self.round_keys[round]);
81 inv_mix_columns(block);
82 }
83 inv_shift_rows(block);
84 inv_sub_bytes(block);
85 add_round_key(block, &self.round_keys[0]);
86 }
87}
88
89#[cfg(feature = "hazardous-legacy-crypto")]
98pub fn aes_ecb_encrypt(cipher: &AesCipher, input: &[u8]) -> Result<Vec<u8>> {
99 if !input.len().is_multiple_of(16) {
100 return Err(Error::InvalidLength("aes ecb input must be block-aligned"));
101 }
102 let mut out = input.to_vec();
103 for chunk in out.chunks_exact_mut(16) {
104 let mut block = [0_u8; 16];
105 block.copy_from_slice(chunk);
106 cipher.encrypt_block(&mut block);
107 chunk.copy_from_slice(&block);
108 }
109 Ok(out)
110}
111
112#[cfg(feature = "hazardous-legacy-crypto")]
121pub fn aes_ecb_decrypt(cipher: &AesCipher, input: &[u8]) -> Result<Vec<u8>> {
122 if !input.len().is_multiple_of(16) {
123 return Err(Error::InvalidLength("aes ecb input must be block-aligned"));
124 }
125 let mut out = input.to_vec();
126 for chunk in out.chunks_exact_mut(16) {
127 let mut block = [0_u8; 16];
128 block.copy_from_slice(chunk);
129 cipher.decrypt_block(&mut block);
130 chunk.copy_from_slice(&block);
131 }
132 Ok(out)
133}
134
135pub fn aes_cbc_encrypt(cipher: &AesCipher, iv: &[u8; 16], plaintext: &[u8]) -> Result<Vec<u8>> {
145 if !plaintext.len().is_multiple_of(16) {
146 return Err(Error::InvalidLength("aes cbc input must be block-aligned"));
147 }
148 let mut out = plaintext.to_vec();
149 let mut prev = *iv;
150 for chunk in out.chunks_exact_mut(16) {
151 for (i, byte) in chunk.iter_mut().enumerate() {
152 *byte ^= prev[i];
153 }
154 let mut block = [0_u8; 16];
155 block.copy_from_slice(chunk);
156 cipher.encrypt_block(&mut block);
157 chunk.copy_from_slice(&block);
158 prev = block;
159 }
160 Ok(out)
161}
162
163pub fn aes_cbc_decrypt(cipher: &AesCipher, iv: &[u8; 16], ciphertext: &[u8]) -> Result<Vec<u8>> {
173 if !ciphertext.len().is_multiple_of(16) {
174 return Err(Error::InvalidLength("aes cbc input must be block-aligned"));
175 }
176 let mut out = ciphertext.to_vec();
177 let mut prev = *iv;
178 for chunk in out.chunks_exact_mut(16) {
179 let mut cur = [0_u8; 16];
180 cur.copy_from_slice(chunk);
181 let mut block = cur;
182 cipher.decrypt_block(&mut block);
183 for i in 0..16 {
184 block[i] ^= prev[i];
185 }
186 chunk.copy_from_slice(&block);
187 prev = cur;
188 }
189 Ok(out)
190}
191
192pub fn aes_ctr_apply(cipher: &AesCipher, nonce_counter: &[u8; 16], input: &[u8]) -> Vec<u8> {
202 let mut out = vec![0_u8; input.len()];
203 let mut counter = *nonce_counter;
204 let mut offset = 0;
205 while offset < input.len() {
206 let mut stream = counter;
207 cipher.encrypt_block(&mut stream);
208 let chunk_len = (input.len() - offset).min(16);
209 for i in 0..chunk_len {
210 out[offset + i] = input[offset + i] ^ stream[i];
211 }
212 increment_be(&mut counter);
213 offset += chunk_len;
214 }
215 out
216}
217
218pub fn aes_cfb_apply(cipher: &AesCipher, iv: &[u8; 16], input: &[u8]) -> Vec<u8> {
228 aes_cfb_encrypt(cipher, iv, input)
229}
230
231pub fn aes_cfb_encrypt(cipher: &AesCipher, iv: &[u8; 16], plaintext: &[u8]) -> Vec<u8> {
241 aes_cfb_process(cipher, iv, plaintext, true)
242}
243
244pub fn aes_cfb_decrypt(cipher: &AesCipher, iv: &[u8; 16], ciphertext: &[u8]) -> Vec<u8> {
254 aes_cfb_process(cipher, iv, ciphertext, false)
255}
256
257fn aes_cfb_process(cipher: &AesCipher, iv: &[u8; 16], input: &[u8], encrypt: bool) -> Vec<u8> {
274 let mut out = vec![0_u8; input.len()];
275 let mut reg = *iv;
276 let mut offset = 0;
277 while offset < input.len() {
278 let mut stream = reg;
279 cipher.encrypt_block(&mut stream);
280 let chunk_len = (input.len() - offset).min(16);
281 for i in 0..chunk_len {
282 out[offset + i] = input[offset + i] ^ stream[i];
283 }
284 if encrypt {
285 shift_register_append(&mut reg, &out[offset..offset + chunk_len]);
286 } else {
287 shift_register_append(&mut reg, &input[offset..offset + chunk_len]);
288 }
289 offset += chunk_len;
290 }
291 out
292}
293
294pub fn aes_ofb_apply(cipher: &AesCipher, iv: &[u8; 16], input: &[u8]) -> Vec<u8> {
304 let mut out = vec![0_u8; input.len()];
305 let mut stream = *iv;
306 let mut offset = 0;
307 while offset < input.len() {
308 cipher.encrypt_block(&mut stream);
309 let chunk_len = (input.len() - offset).min(16);
310 for i in 0..chunk_len {
311 out[offset + i] = input[offset + i] ^ stream[i];
312 }
313 offset += chunk_len;
314 }
315 out
316}
317
318pub fn aes_gcm_encrypt(
329 cipher: &AesCipher,
330 nonce: &[u8],
331 aad: &[u8],
332 plaintext: &[u8],
333) -> Result<(Vec<u8>, [u8; 16])> {
334 let h = {
335 let mut zero = [0_u8; 16];
336 cipher.encrypt_block(&mut zero);
337 u128::from_be_bytes(zero)
338 };
339 let j0 = gcm_j0(h, nonce);
340 let mut ctr = j0;
341 inc32_u128(&mut ctr);
342 let ciphertext = gcm_ctr_xor(cipher, ctr, plaintext);
343 let s = ghash(h, aad, &ciphertext);
344 let mut e_j0 = j0.to_be_bytes();
345 cipher.encrypt_block(&mut e_j0);
346 let tag = (u128::from_be_bytes(e_j0) ^ s).to_be_bytes();
347 Ok((ciphertext, tag))
348}
349
350pub fn aes_gcm_decrypt(
362 cipher: &AesCipher,
363 nonce: &[u8],
364 aad: &[u8],
365 ciphertext: &[u8],
366 tag: &[u8; 16],
367) -> Result<Vec<u8>> {
368 let h = {
369 let mut zero = [0_u8; 16];
370 cipher.encrypt_block(&mut zero);
371 u128::from_be_bytes(zero)
372 };
373 let j0 = gcm_j0(h, nonce);
374 let mut ctr = j0;
375 inc32_u128(&mut ctr);
376 let s = ghash(h, aad, ciphertext);
377 let mut e_j0 = j0.to_be_bytes();
378 cipher.encrypt_block(&mut e_j0);
379 let expected_tag = (u128::from_be_bytes(e_j0) ^ s).to_be_bytes();
380 if !constant_time_tag_eq(&expected_tag, tag) {
381 return Err(Error::CryptoFailure("aes-gcm authentication failed"));
382 }
383 Ok(gcm_ctr_xor(cipher, ctr, ciphertext))
384}
385
386pub fn aes_ccm_encrypt(
397 cipher: &AesCipher,
398 nonce: &[u8],
399 aad: &[u8],
400 plaintext: &[u8],
401) -> Result<(Vec<u8>, [u8; 16])> {
402 if !(7..=13).contains(&nonce.len()) {
403 return Err(Error::InvalidLength("aes-ccm nonce must be 7..13 bytes"));
404 }
405 let q = 15 - nonce.len();
406 if plaintext.len() >= (1_usize << (8 * q.min(8))) {
407 return Err(Error::InvalidLength(
408 "aes-ccm plaintext too large for nonce",
409 ));
410 }
411 let t_len = 16_usize;
412 let mut b0 = [0_u8; 16];
413 let aadata_flag = if aad.is_empty() { 0_u8 } else { 0x40 };
414 let m_prime = (((t_len - 2) / 2) as u8) << 3;
415 let l_prime = (q as u8) - 1;
416 b0[0] = aadata_flag | m_prime | l_prime;
417 b0[1..1 + nonce.len()].copy_from_slice(nonce);
418 encode_len_q(plaintext.len() as u64, q, &mut b0[16 - q..]);
419
420 let mut mac_state = [0_u8; 16];
421 xor_block_in_place(&mut mac_state, &b0);
422 cipher.encrypt_block(&mut mac_state);
423
424 if !aad.is_empty() {
425 let mut aad_blocked = Vec::new();
426 if aad.len() < 0xFF00 {
427 aad_blocked.extend_from_slice(&(aad.len() as u16).to_be_bytes());
428 } else {
429 aad_blocked.extend_from_slice(&[0xFF, 0xFE]);
430 aad_blocked.extend_from_slice(&(aad.len() as u32).to_be_bytes());
431 }
432 aad_blocked.extend_from_slice(aad);
433 pad16(&mut aad_blocked);
434 for chunk in aad_blocked.chunks_exact(16) {
435 let mut blk = [0_u8; 16];
436 blk.copy_from_slice(chunk);
437 xor_block_in_place(&mut mac_state, &blk);
438 cipher.encrypt_block(&mut mac_state);
439 }
440 }
441
442 let mut payload = plaintext.to_vec();
443 pad16(&mut payload);
444 for chunk in payload.chunks_exact(16) {
445 let mut blk = [0_u8; 16];
446 blk.copy_from_slice(chunk);
447 xor_block_in_place(&mut mac_state, &blk);
448 cipher.encrypt_block(&mut mac_state);
449 }
450 let mut tag = mac_state;
451
452 let mut ctr0 = [0_u8; 16];
453 ctr0[0] = l_prime;
454 ctr0[1..1 + nonce.len()].copy_from_slice(nonce);
455 let mut s0 = ctr0;
456 cipher.encrypt_block(&mut s0);
457 for (t, s) in tag.iter_mut().zip(s0) {
458 *t ^= s;
459 }
460
461 let mut ciphertext = vec![0_u8; plaintext.len()];
462 let mut counter = ctr0;
463 for block_idx in 0..plaintext.len().div_ceil(16) {
464 increment_q_counter(&mut counter, q);
465 let mut stream = counter;
466 cipher.encrypt_block(&mut stream);
467 let start = block_idx * 16;
468 let end = (start + 16).min(plaintext.len());
469 for i in start..end {
470 ciphertext[i] = plaintext[i] ^ stream[i - start];
471 }
472 }
473
474 Ok((ciphertext, tag))
475}
476
477pub fn aes_ccm_decrypt(
489 cipher: &AesCipher,
490 nonce: &[u8],
491 aad: &[u8],
492 ciphertext: &[u8],
493 tag: &[u8; 16],
494) -> Result<Vec<u8>> {
495 if !(7..=13).contains(&nonce.len()) {
496 return Err(Error::InvalidLength("aes-ccm nonce must be 7..13 bytes"));
497 }
498 let q = 15 - nonce.len();
499 if ciphertext.len() >= (1_usize << (8 * q.min(8))) {
500 return Err(Error::InvalidLength(
501 "aes-ccm ciphertext too large for nonce",
502 ));
503 }
504 let t_len = 16_usize;
505 let l_prime = (q as u8) - 1;
506
507 let mut ctr0 = [0_u8; 16];
508 ctr0[0] = l_prime;
509 ctr0[1..1 + nonce.len()].copy_from_slice(nonce);
510
511 let mut plaintext = vec![0_u8; ciphertext.len()];
512 let mut counter = ctr0;
513 for block_idx in 0..ciphertext.len().div_ceil(16) {
514 increment_q_counter(&mut counter, q);
515 let mut stream = counter;
516 cipher.encrypt_block(&mut stream);
517 let start = block_idx * 16;
518 let end = (start + 16).min(ciphertext.len());
519 for i in start..end {
520 plaintext[i] = ciphertext[i] ^ stream[i - start];
521 }
522 }
523
524 let mut b0 = [0_u8; 16];
525 let aadata_flag = if aad.is_empty() { 0_u8 } else { 0x40 };
526 let m_prime = (((t_len - 2) / 2) as u8) << 3;
527 b0[0] = aadata_flag | m_prime | l_prime;
528 b0[1..1 + nonce.len()].copy_from_slice(nonce);
529 encode_len_q(plaintext.len() as u64, q, &mut b0[16 - q..]);
530
531 let mut mac_state = [0_u8; 16];
532 xor_block_in_place(&mut mac_state, &b0);
533 cipher.encrypt_block(&mut mac_state);
534
535 if !aad.is_empty() {
536 let mut aad_blocked = Vec::new();
537 if aad.len() < 0xFF00 {
538 aad_blocked.extend_from_slice(&(aad.len() as u16).to_be_bytes());
539 } else {
540 aad_blocked.extend_from_slice(&[0xFF, 0xFE]);
541 aad_blocked.extend_from_slice(&(aad.len() as u32).to_be_bytes());
542 }
543 aad_blocked.extend_from_slice(aad);
544 pad16(&mut aad_blocked);
545 for chunk in aad_blocked.chunks_exact(16) {
546 let mut blk = [0_u8; 16];
547 blk.copy_from_slice(chunk);
548 xor_block_in_place(&mut mac_state, &blk);
549 cipher.encrypt_block(&mut mac_state);
550 }
551 }
552
553 let mut payload = plaintext.clone();
554 pad16(&mut payload);
555 for chunk in payload.chunks_exact(16) {
556 let mut blk = [0_u8; 16];
557 blk.copy_from_slice(chunk);
558 xor_block_in_place(&mut mac_state, &blk);
559 cipher.encrypt_block(&mut mac_state);
560 }
561 let mut expected_tag = mac_state;
562 let mut s0 = ctr0;
563 cipher.encrypt_block(&mut s0);
564 for (t, s) in expected_tag.iter_mut().zip(s0) {
565 *t ^= s;
566 }
567 if !constant_time_tag_eq(&expected_tag, tag) {
568 return Err(Error::CryptoFailure("aes-ccm authentication failed"));
569 }
570 Ok(plaintext)
571}
572
573fn constant_time_tag_eq(expected: &[u8; 16], received: &[u8; 16]) -> bool {
588 let mut diff = 0_u8;
589 for (&left, &right) in expected.iter().zip(received.iter()) {
590 diff |= left ^ right;
591 }
592 diff == 0
593}
594
595pub fn aes_xts_encrypt(
606 cipher_a: &AesCipher,
607 cipher_b: &AesCipher,
608 tweak: &[u8; 16],
609 plaintext: &[u8],
610) -> Result<Vec<u8>> {
611 aes_xts_crypt(cipher_a, cipher_b, tweak, plaintext, true)
612}
613
614pub fn aes_xts_decrypt(
625 cipher_a: &AesCipher,
626 cipher_b: &AesCipher,
627 tweak: &[u8; 16],
628 ciphertext: &[u8],
629) -> Result<Vec<u8>> {
630 aes_xts_crypt(cipher_a, cipher_b, tweak, ciphertext, false)
631}
632
633fn aes_xts_crypt(
655 cipher_a: &AesCipher,
656 cipher_b: &AesCipher,
657 tweak: &[u8; 16],
658 input: &[u8],
659 encrypt: bool,
660) -> Result<Vec<u8>> {
661 if input.len() < 16 {
662 return Err(Error::InvalidLength(
663 "aes-xts input must be at least one full 16-byte block",
664 ));
665 }
666 let mut out = vec![0_u8; input.len()];
667 let full_blocks = input.len() / 16;
668 let rem = input.len() % 16;
669
670 let mut tw = *tweak;
671 cipher_b.encrypt_block(&mut tw);
672
673 if rem == 0 {
674 for block_idx in 0..full_blocks {
675 let start = block_idx * 16;
676 let mut block = [0_u8; 16];
677 block.copy_from_slice(&input[start..start + 16]);
678 xor_block_in_place(&mut block, &tw);
679 if encrypt {
680 cipher_a.encrypt_block(&mut block);
681 } else {
682 cipher_a.decrypt_block(&mut block);
683 }
684 xor_block_in_place(&mut block, &tw);
685 out[start..start + 16].copy_from_slice(&block);
686 xts_mul_x(&mut tw);
687 }
688 return Ok(out);
689 }
690
691 for block_idx in 0..(full_blocks - 1) {
693 let start = block_idx * 16;
694 let mut block = [0_u8; 16];
695 block.copy_from_slice(&input[start..start + 16]);
696 xor_block_in_place(&mut block, &tw);
697 if encrypt {
698 cipher_a.encrypt_block(&mut block);
699 } else {
700 cipher_a.decrypt_block(&mut block);
701 }
702 xor_block_in_place(&mut block, &tw);
703 out[start..start + 16].copy_from_slice(&block);
704 xts_mul_x(&mut tw);
705 }
706
707 let mut tw_next = tw;
708 xts_mul_x(&mut tw_next);
709 let last_full_start = (full_blocks - 1) * 16;
710 let partial_start = full_blocks * 16;
711
712 if encrypt {
713 let mut block = [0_u8; 16];
714 block.copy_from_slice(&input[last_full_start..last_full_start + 16]);
715 xor_block_in_place(&mut block, &tw);
716 cipher_a.encrypt_block(&mut block);
717 xor_block_in_place(&mut block, &tw);
718
719 out[partial_start..].copy_from_slice(&block[..rem]);
721
722 let mut p_star = [0_u8; 16];
724 p_star[..rem].copy_from_slice(&input[partial_start..]);
725 p_star[rem..].copy_from_slice(&block[rem..]);
726 xor_block_in_place(&mut p_star, &tw_next);
727 cipher_a.encrypt_block(&mut p_star);
728 xor_block_in_place(&mut p_star, &tw_next);
729 out[last_full_start..last_full_start + 16].copy_from_slice(&p_star);
730 } else {
731 let mut c_m_minus_1 = [0_u8; 16];
732 c_m_minus_1.copy_from_slice(&input[last_full_start..last_full_start + 16]);
733 xor_block_in_place(&mut c_m_minus_1, &tw_next);
734 cipher_a.decrypt_block(&mut c_m_minus_1);
735 xor_block_in_place(&mut c_m_minus_1, &tw_next);
736
737 out[partial_start..].copy_from_slice(&c_m_minus_1[..rem]);
739
740 let mut c_star = [0_u8; 16];
742 c_star[..rem].copy_from_slice(&input[partial_start..]);
743 c_star[rem..].copy_from_slice(&c_m_minus_1[rem..]);
744 xor_block_in_place(&mut c_star, &tw);
745 cipher_a.decrypt_block(&mut c_star);
746 xor_block_in_place(&mut c_star, &tw);
747 out[last_full_start..last_full_start + 16].copy_from_slice(&c_star);
748 }
749
750 Ok(out)
751}
752
753fn key_expansion(key: &[u8], nk: usize, rounds: usize) -> Vec<[u8; 16]> {
769 let total_words = 4 * (rounds + 1);
770 let mut w = vec![0_u32; total_words];
771 for (i, word) in w.iter_mut().enumerate().take(nk) {
772 let idx = i * 4;
773 *word = u32::from_be_bytes([key[idx], key[idx + 1], key[idx + 2], key[idx + 3]]);
774 }
775 for i in nk..total_words {
776 let mut temp = w[i - 1];
777 if i % nk == 0 {
778 temp = sub_word(rot_word(temp)) ^ (u32::from(RCON[i / nk - 1]) << 24);
779 } else if nk > 6 && i % nk == 4 {
780 temp = sub_word(temp);
781 }
782 w[i] = w[i - nk] ^ temp;
783 }
784 let mut keys = Vec::with_capacity(rounds + 1);
785 for r in 0..=rounds {
786 let mut key_block = [0_u8; 16];
787 for c in 0..4 {
788 key_block[c * 4..(c + 1) * 4].copy_from_slice(&w[r * 4 + c].to_be_bytes());
789 }
790 keys.push(key_block);
791 }
792 keys
793}
794
795fn rot_word(word: u32) -> u32 {
809 word.rotate_left(8)
810}
811
812fn sub_word(word: u32) -> u32 {
826 let bytes = word.to_be_bytes();
827 u32::from_be_bytes([
828 SBOX[usize::from(bytes[0])],
829 SBOX[usize::from(bytes[1])],
830 SBOX[usize::from(bytes[2])],
831 SBOX[usize::from(bytes[3])],
832 ])
833}
834
835fn add_round_key(state: &mut [u8; 16], round_key: &[u8; 16]) {
850 for i in 0..16 {
851 state[i] ^= round_key[i];
852 }
853}
854
855fn sub_bytes(state: &mut [u8; 16]) {
869 for byte in state {
870 *byte = SBOX[usize::from(*byte)];
871 }
872}
873
874fn inv_sub_bytes(state: &mut [u8; 16]) {
888 for byte in state {
889 *byte = INV_SBOX[usize::from(*byte)];
890 }
891}
892
893fn shift_rows(state: &mut [u8; 16]) {
907 let mut tmp = *state;
908 tmp[1] = state[5];
909 tmp[5] = state[9];
910 tmp[9] = state[13];
911 tmp[13] = state[1];
912 tmp[2] = state[10];
913 tmp[6] = state[14];
914 tmp[10] = state[2];
915 tmp[14] = state[6];
916 tmp[3] = state[15];
917 tmp[7] = state[3];
918 tmp[11] = state[7];
919 tmp[15] = state[11];
920 *state = tmp;
921}
922
923fn inv_shift_rows(state: &mut [u8; 16]) {
937 let mut tmp = *state;
938 tmp[1] = state[13];
939 tmp[5] = state[1];
940 tmp[9] = state[5];
941 tmp[13] = state[9];
942 tmp[2] = state[10];
943 tmp[6] = state[14];
944 tmp[10] = state[2];
945 tmp[14] = state[6];
946 tmp[3] = state[7];
947 tmp[7] = state[11];
948 tmp[11] = state[15];
949 tmp[15] = state[3];
950 *state = tmp;
951}
952
953fn mix_columns(state: &mut [u8; 16]) {
967 for c in 0..4 {
968 let i = c * 4;
969 let a0 = state[i];
970 let a1 = state[i + 1];
971 let a2 = state[i + 2];
972 let a3 = state[i + 3];
973 state[i] = gf_mul(a0, 2) ^ gf_mul(a1, 3) ^ a2 ^ a3;
974 state[i + 1] = a0 ^ gf_mul(a1, 2) ^ gf_mul(a2, 3) ^ a3;
975 state[i + 2] = a0 ^ a1 ^ gf_mul(a2, 2) ^ gf_mul(a3, 3);
976 state[i + 3] = gf_mul(a0, 3) ^ a1 ^ a2 ^ gf_mul(a3, 2);
977 }
978}
979
980fn inv_mix_columns(state: &mut [u8; 16]) {
994 for c in 0..4 {
995 let i = c * 4;
996 let a0 = state[i];
997 let a1 = state[i + 1];
998 let a2 = state[i + 2];
999 let a3 = state[i + 3];
1000 state[i] = gf_mul(a0, 14) ^ gf_mul(a1, 11) ^ gf_mul(a2, 13) ^ gf_mul(a3, 9);
1001 state[i + 1] = gf_mul(a0, 9) ^ gf_mul(a1, 14) ^ gf_mul(a2, 11) ^ gf_mul(a3, 13);
1002 state[i + 2] = gf_mul(a0, 13) ^ gf_mul(a1, 9) ^ gf_mul(a2, 14) ^ gf_mul(a3, 11);
1003 state[i + 3] = gf_mul(a0, 11) ^ gf_mul(a1, 13) ^ gf_mul(a2, 9) ^ gf_mul(a3, 14);
1004 }
1005}
1006
1007fn gf_mul(mut a: u8, mut b: u8) -> u8 {
1022 let mut p = 0_u8;
1023 for _ in 0..8 {
1024 if b & 1 != 0 {
1025 p ^= a;
1026 }
1027 let high = a & 0x80;
1028 a <<= 1;
1029 if high != 0 {
1030 a ^= 0x1b;
1031 }
1032 b >>= 1;
1033 }
1034 p
1035}
1036
1037fn increment_be(counter: &mut [u8; 16]) {
1051 for byte in counter.iter_mut().rev() {
1052 *byte = byte.wrapping_add(1);
1053 if *byte != 0 {
1054 break;
1055 }
1056 }
1057}
1058
1059fn ghash(h: u128, aad: &[u8], ciphertext: &[u8]) -> u128 {
1075 let mut y = 0_u128;
1076 let mut a = aad.to_vec();
1077 let mut c = ciphertext.to_vec();
1078 pad16(&mut a);
1079 pad16(&mut c);
1080 for chunk in a.chunks_exact(16) {
1081 let x = u128::from_be_bytes(chunk.try_into().expect("16-byte chunk"));
1082 y = gf128_mul(y ^ x, h);
1083 }
1084 for chunk in c.chunks_exact(16) {
1085 let x = u128::from_be_bytes(chunk.try_into().expect("16-byte chunk"));
1086 y = gf128_mul(y ^ x, h);
1087 }
1088 let lengths = ((aad.len() as u128) << 64) | ((ciphertext.len() as u128) * 8);
1089 gf128_mul(y ^ lengths, h)
1090}
1091
1092fn gf128_mul(mut x: u128, mut y: u128) -> u128 {
1107 let mut z = 0_u128;
1108 for _ in 0..128 {
1109 if (x & (1_u128 << 127)) != 0 {
1110 z ^= y;
1111 }
1112 let lsb = y & 1;
1113 y >>= 1;
1114 if lsb != 0 {
1115 y ^= 0xe1_u128 << 120;
1116 }
1117 x <<= 1;
1118 }
1119 z
1120}
1121
1122fn gcm_j0(h: u128, nonce: &[u8]) -> u128 {
1137 if nonce.len() == 12 {
1138 let mut j = [0_u8; 16];
1139 j[..12].copy_from_slice(nonce);
1140 j[15] = 1;
1141 return u128::from_be_bytes(j);
1142 }
1143 let mut n = nonce.to_vec();
1144 pad16(&mut n);
1145 let mut y = 0_u128;
1146 for chunk in n.chunks_exact(16) {
1147 let x = u128::from_be_bytes(chunk.try_into().expect("16-byte chunk"));
1148 y = gf128_mul(y ^ x, h);
1149 }
1150 let len_block = (nonce.len() as u128) * 8;
1151 gf128_mul(y ^ len_block, h)
1152}
1153
1154fn gcm_ctr_xor(cipher: &AesCipher, initial_ctr: u128, input: &[u8]) -> Vec<u8> {
1170 let mut ctr = initial_ctr;
1171 let mut out = vec![0_u8; input.len()];
1172 let mut offset = 0;
1173 while offset < input.len() {
1174 let mut stream = ctr.to_be_bytes();
1175 cipher.encrypt_block(&mut stream);
1176 let chunk_len = (input.len() - offset).min(16);
1177 for i in 0..chunk_len {
1178 out[offset + i] = input[offset + i] ^ stream[i];
1179 }
1180 inc32_u128(&mut ctr);
1181 offset += chunk_len;
1182 }
1183 out
1184}
1185
1186fn inc32_u128(counter: &mut u128) {
1200 let mut bytes = counter.to_be_bytes();
1201 inc32(&mut bytes);
1202 *counter = u128::from_be_bytes(bytes);
1203}
1204
1205fn inc32(counter: &mut [u8; 16]) {
1219 for i in (12..16).rev() {
1220 counter[i] = counter[i].wrapping_add(1);
1221 if counter[i] != 0 {
1222 break;
1223 }
1224 }
1225}
1226
1227fn pad16(data: &mut Vec<u8>) {
1241 let rem = data.len() % 16;
1242 if rem != 0 {
1243 data.resize(data.len() + (16 - rem), 0);
1244 }
1245}
1246
1247fn encode_len_q(len: u64, q: usize, out: &mut [u8]) {
1263 for i in 0..q {
1264 out[q - 1 - i] = ((len >> (8 * i)) & 0xFF) as u8;
1265 }
1266}
1267
1268fn increment_q_counter(counter: &mut [u8; 16], q: usize) {
1283 for i in (16 - q..16).rev() {
1284 counter[i] = counter[i].wrapping_add(1);
1285 if counter[i] != 0 {
1286 break;
1287 }
1288 }
1289}
1290
1291fn xor_block_in_place(dst: &mut [u8; 16], src: &[u8; 16]) {
1306 for i in 0..16 {
1307 dst[i] ^= src[i];
1308 }
1309}
1310
1311fn shift_register_append(reg: &mut [u8; 16], segment: &[u8]) {
1326 debug_assert!(segment.len() <= 16);
1327 if segment.len() == 16 {
1328 reg.copy_from_slice(segment);
1329 return;
1330 }
1331 let keep = 16 - segment.len();
1332 reg.copy_within(segment.len().., 0);
1333 reg[keep..].copy_from_slice(segment);
1334}
1335
1336fn xts_mul_x(tweak: &mut [u8; 16]) {
1350 let mut carry = 0_u8;
1351 for byte in tweak.iter_mut() {
1352 let next_carry = (*byte & 0x80) >> 7;
1353 *byte = (*byte << 1) | carry;
1354 carry = next_carry;
1355 }
1356 if carry != 0 {
1357 tweak[0] ^= 0x87;
1358 }
1359}
1360
1361const RCON: [u8; 10] = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36];
1362
1363const SBOX: [u8; 256] = [
1364 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
1365 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
1366 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
1367 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
1368 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
1369 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
1370 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
1371 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
1372 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
1373 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
1374 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
1375 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
1376 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
1377 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
1378 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
1379 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
1380];
1381
1382const INV_SBOX: [u8; 256] = [
1383 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
1384 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
1385 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
1386 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
1387 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
1388 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
1389 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
1390 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
1391 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
1392 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
1393 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
1394 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
1395 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
1396 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
1397 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
1398 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
1399];