Skip to main content

noxtls_crypto/sym/encryption/
aes.rs

1// Copyright (c) 2019-2026, Argenox Technologies LLC
2// All rights reserved.
3//
4// SPDX-License-Identifier: GPL-2.0-only OR LicenseRef-Argenox-Commercial-License
5//
6// This file is part of the NoxTLS Library.
7//
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU General Public License as published by the
10// Free Software Foundation; version 2 of the License.
11//
12// Alternatively, this file may be used under the terms of a commercial
13// license from Argenox Technologies LLC.
14//
15// See `noxtls/LICENSE` and `noxtls/LICENSE.md` in this repository for full details.
16// CONTACT: info@argenox.com
17
18use crate::internal_alloc::Vec;
19use noxtls_core::{Error, Result};
20
21/// Stores expanded AES round keys for block encryption/decryption.
22#[derive(Debug, Clone)]
23pub struct AesCipher {
24    round_keys: Vec<[u8; 16]>,
25    rounds: usize,
26}
27
28impl AesCipher {
29    /// Builds an AES cipher for 128/192/256-bit keys.
30    ///
31    /// # Arguments
32    /// * `key`: AES key bytes (16, 24, or 32 bytes).
33    ///
34    /// # Returns
35    /// Initialized `AesCipher` with expanded round keys.
36    pub fn new(key: &[u8]) -> Result<Self> {
37        let (nk, rounds) = match key.len() {
38            16 => (4, 10),
39            24 => (6, 12),
40            32 => (8, 14),
41            _ => {
42                return Err(Error::InvalidLength(
43                    "aes key length must be 16, 24, or 32 bytes",
44                ))
45            }
46        };
47        let expanded = key_expansion(key, nk, rounds);
48        Ok(Self {
49            round_keys: expanded,
50            rounds,
51        })
52    }
53
54    /// Encrypts one 16-byte block in place using AES.
55    ///
56    /// # Arguments
57    /// * `block`: Mutable 16-byte block to encrypt in place.
58    pub fn encrypt_block(&self, block: &mut [u8; 16]) {
59        add_round_key(block, &self.round_keys[0]);
60        for round in 1..self.rounds {
61            sub_bytes(block);
62            shift_rows(block);
63            mix_columns(block);
64            add_round_key(block, &self.round_keys[round]);
65        }
66        sub_bytes(block);
67        shift_rows(block);
68        add_round_key(block, &self.round_keys[self.rounds]);
69    }
70
71    /// Decrypts one 16-byte block in place using AES inverse rounds.
72    ///
73    /// # Arguments
74    /// * `block`: Mutable 16-byte block to decrypt in place.
75    pub fn decrypt_block(&self, block: &mut [u8; 16]) {
76        add_round_key(block, &self.round_keys[self.rounds]);
77        for round in (1..self.rounds).rev() {
78            inv_shift_rows(block);
79            inv_sub_bytes(block);
80            add_round_key(block, &self.round_keys[round]);
81            inv_mix_columns(block);
82        }
83        inv_shift_rows(block);
84        inv_sub_bytes(block);
85        add_round_key(block, &self.round_keys[0]);
86    }
87}
88
89/// Encrypts AES-ECB over full blocks; input length must be multiple of 16.
90///
91/// # Arguments
92/// * `cipher`: Configured AES cipher instance.
93/// * `input`: Block-aligned plaintext bytes.
94///
95/// # Returns
96/// ECB ciphertext bytes with same length as `input`.
97#[cfg(feature = "hazardous-legacy-crypto")]
98pub fn aes_ecb_encrypt(cipher: &AesCipher, input: &[u8]) -> Result<Vec<u8>> {
99    if !input.len().is_multiple_of(16) {
100        return Err(Error::InvalidLength("aes ecb input must be block-aligned"));
101    }
102    let mut out = input.to_vec();
103    for chunk in out.chunks_exact_mut(16) {
104        let mut block = [0_u8; 16];
105        block.copy_from_slice(chunk);
106        cipher.encrypt_block(&mut block);
107        chunk.copy_from_slice(&block);
108    }
109    Ok(out)
110}
111
112/// Decrypts AES-ECB over full blocks; input length must be multiple of 16.
113///
114/// # Arguments
115/// * `cipher`: Configured AES cipher instance.
116/// * `input`: Block-aligned ciphertext bytes.
117///
118/// # Returns
119/// ECB plaintext bytes with same length as `input`.
120#[cfg(feature = "hazardous-legacy-crypto")]
121pub fn aes_ecb_decrypt(cipher: &AesCipher, input: &[u8]) -> Result<Vec<u8>> {
122    if !input.len().is_multiple_of(16) {
123        return Err(Error::InvalidLength("aes ecb input must be block-aligned"));
124    }
125    let mut out = input.to_vec();
126    for chunk in out.chunks_exact_mut(16) {
127        let mut block = [0_u8; 16];
128        block.copy_from_slice(chunk);
129        cipher.decrypt_block(&mut block);
130        chunk.copy_from_slice(&block);
131    }
132    Ok(out)
133}
134
135/// Encrypts AES-CBC with a 16-byte IV and block-aligned plaintext.
136///
137/// # Arguments
138/// * `cipher`: Configured AES cipher instance.
139/// * `iv`: 16-byte initialization vector.
140/// * `plaintext`: Block-aligned plaintext bytes.
141///
142/// # Returns
143/// CBC ciphertext bytes with same length as `plaintext`.
144pub fn aes_cbc_encrypt(cipher: &AesCipher, iv: &[u8; 16], plaintext: &[u8]) -> Result<Vec<u8>> {
145    if !plaintext.len().is_multiple_of(16) {
146        return Err(Error::InvalidLength("aes cbc input must be block-aligned"));
147    }
148    let mut out = plaintext.to_vec();
149    let mut prev = *iv;
150    for chunk in out.chunks_exact_mut(16) {
151        for (i, byte) in chunk.iter_mut().enumerate() {
152            *byte ^= prev[i];
153        }
154        let mut block = [0_u8; 16];
155        block.copy_from_slice(chunk);
156        cipher.encrypt_block(&mut block);
157        chunk.copy_from_slice(&block);
158        prev = block;
159    }
160    Ok(out)
161}
162
163/// Decrypts AES-CBC with a 16-byte IV and block-aligned ciphertext.
164///
165/// # Arguments
166/// * `cipher`: Configured AES cipher instance.
167/// * `iv`: 16-byte initialization vector.
168/// * `ciphertext`: Block-aligned ciphertext bytes.
169///
170/// # Returns
171/// CBC plaintext bytes with same length as `ciphertext`.
172pub fn aes_cbc_decrypt(cipher: &AesCipher, iv: &[u8; 16], ciphertext: &[u8]) -> Result<Vec<u8>> {
173    if !ciphertext.len().is_multiple_of(16) {
174        return Err(Error::InvalidLength("aes cbc input must be block-aligned"));
175    }
176    let mut out = ciphertext.to_vec();
177    let mut prev = *iv;
178    for chunk in out.chunks_exact_mut(16) {
179        let mut cur = [0_u8; 16];
180        cur.copy_from_slice(chunk);
181        let mut block = cur;
182        cipher.decrypt_block(&mut block);
183        for i in 0..16 {
184            block[i] ^= prev[i];
185        }
186        chunk.copy_from_slice(&block);
187        prev = cur;
188    }
189    Ok(out)
190}
191
192/// Applies AES-CTR transformation using a 16-byte initial counter block.
193///
194/// # Arguments
195/// * `cipher`: Configured AES cipher instance.
196/// * `nonce_counter`: Initial 16-byte counter block.
197/// * `input`: Input bytes to transform.
198///
199/// # Returns
200/// Transformed bytes (encryption/decryption are identical in CTR).
201pub fn aes_ctr_apply(cipher: &AesCipher, nonce_counter: &[u8; 16], input: &[u8]) -> Vec<u8> {
202    let mut out = vec![0_u8; input.len()];
203    let mut counter = *nonce_counter;
204    let mut offset = 0;
205    while offset < input.len() {
206        let mut stream = counter;
207        cipher.encrypt_block(&mut stream);
208        let chunk_len = (input.len() - offset).min(16);
209        for i in 0..chunk_len {
210            out[offset + i] = input[offset + i] ^ stream[i];
211        }
212        increment_be(&mut counter);
213        offset += chunk_len;
214    }
215    out
216}
217
218/// Applies AES-CFB-128 transformation with a 16-byte IV.
219///
220/// # Arguments
221/// * `cipher`: Configured AES cipher instance.
222/// * `iv`: 16-byte initialization vector/register.
223/// * `input`: Input bytes to transform.
224///
225/// # Returns
226/// Transformed bytes for CFB mode.
227pub fn aes_cfb_apply(cipher: &AesCipher, iv: &[u8; 16], input: &[u8]) -> Vec<u8> {
228    aes_cfb_encrypt(cipher, iv, input)
229}
230
231/// Encrypts bytes with AES-CFB-128 using a 16-byte IV/register.
232///
233/// # Arguments
234/// * `cipher`: Configured AES cipher instance.
235/// * `iv`: 16-byte initialization vector/register.
236/// * `plaintext`: Plaintext bytes to encrypt.
237///
238/// # Returns
239/// Ciphertext bytes with same length as `plaintext`.
240pub fn aes_cfb_encrypt(cipher: &AesCipher, iv: &[u8; 16], plaintext: &[u8]) -> Vec<u8> {
241    aes_cfb_process(cipher, iv, plaintext, true)
242}
243
244/// Decrypts bytes with AES-CFB-128 using a 16-byte IV/register.
245///
246/// # Arguments
247/// * `cipher`: Configured AES cipher instance.
248/// * `iv`: 16-byte initialization vector/register.
249/// * `ciphertext`: Ciphertext bytes to decrypt.
250///
251/// # Returns
252/// Plaintext bytes with same length as `ciphertext`.
253pub fn aes_cfb_decrypt(cipher: &AesCipher, iv: &[u8; 16], ciphertext: &[u8]) -> Vec<u8> {
254    aes_cfb_process(cipher, iv, ciphertext, false)
255}
256
257/// Processes AES-CFB-128 encryption/decryption while tracking register updates.
258///
259/// # Arguments
260///
261/// * `cipher` — `&AesCipher`.
262/// * `iv` — `&[u8; 16]`.
263/// * `input` — `&[u8]`.
264/// * `encrypt` — `bool`.
265///
266/// # Returns
267///
268/// `Vec<u8>` produced by `aes_cfb_process` (see implementation).
269///
270/// # Panics
271///
272/// This function does not panic unless otherwise noted.
273fn aes_cfb_process(cipher: &AesCipher, iv: &[u8; 16], input: &[u8], encrypt: bool) -> Vec<u8> {
274    let mut out = vec![0_u8; input.len()];
275    let mut reg = *iv;
276    let mut offset = 0;
277    while offset < input.len() {
278        let mut stream = reg;
279        cipher.encrypt_block(&mut stream);
280        let chunk_len = (input.len() - offset).min(16);
281        for i in 0..chunk_len {
282            out[offset + i] = input[offset + i] ^ stream[i];
283        }
284        if encrypt {
285            shift_register_append(&mut reg, &out[offset..offset + chunk_len]);
286        } else {
287            shift_register_append(&mut reg, &input[offset..offset + chunk_len]);
288        }
289        offset += chunk_len;
290    }
291    out
292}
293
294/// Applies AES-OFB transformation with a 16-byte IV.
295///
296/// # Arguments
297/// * `cipher`: Configured AES cipher instance.
298/// * `iv`: 16-byte initialization vector.
299/// * `input`: Input bytes to transform.
300///
301/// # Returns
302/// Transformed bytes for OFB mode.
303pub fn aes_ofb_apply(cipher: &AesCipher, iv: &[u8; 16], input: &[u8]) -> Vec<u8> {
304    let mut out = vec![0_u8; input.len()];
305    let mut stream = *iv;
306    let mut offset = 0;
307    while offset < input.len() {
308        cipher.encrypt_block(&mut stream);
309        let chunk_len = (input.len() - offset).min(16);
310        for i in 0..chunk_len {
311            out[offset + i] = input[offset + i] ^ stream[i];
312        }
313        offset += chunk_len;
314    }
315    out
316}
317
318/// Placeholder API for AES-GCM during ongoing porting work.
319///
320/// # Arguments
321/// * `cipher`: Configured AES cipher instance.
322/// * `nonce`: GCM nonce bytes.
323/// * `aad`: Additional authenticated data bytes.
324/// * `plaintext`: Plaintext bytes to encrypt.
325///
326/// # Returns
327/// `(ciphertext, tag)` pair with 16-byte authentication tag.
328pub fn aes_gcm_encrypt(
329    cipher: &AesCipher,
330    nonce: &[u8],
331    aad: &[u8],
332    plaintext: &[u8],
333) -> Result<(Vec<u8>, [u8; 16])> {
334    let h = {
335        let mut zero = [0_u8; 16];
336        cipher.encrypt_block(&mut zero);
337        u128::from_be_bytes(zero)
338    };
339    let j0 = gcm_j0(h, nonce);
340    let mut ctr = j0;
341    inc32_u128(&mut ctr);
342    let ciphertext = gcm_ctr_xor(cipher, ctr, plaintext);
343    let s = ghash(h, aad, &ciphertext);
344    let mut e_j0 = j0.to_be_bytes();
345    cipher.encrypt_block(&mut e_j0);
346    let tag = (u128::from_be_bytes(e_j0) ^ s).to_be_bytes();
347    Ok((ciphertext, tag))
348}
349
350/// Decrypts and authenticates AES-GCM ciphertext/tag with associated data.
351///
352/// # Arguments
353/// * `cipher`: Configured AES cipher instance.
354/// * `nonce`: GCM nonce bytes.
355/// * `aad`: Additional authenticated data bytes.
356/// * `ciphertext`: Ciphertext bytes to decrypt.
357/// * `tag`: 16-byte authentication tag to verify.
358///
359/// # Returns
360/// Decrypted plaintext bytes when tag verification succeeds.
361pub fn aes_gcm_decrypt(
362    cipher: &AesCipher,
363    nonce: &[u8],
364    aad: &[u8],
365    ciphertext: &[u8],
366    tag: &[u8; 16],
367) -> Result<Vec<u8>> {
368    let h = {
369        let mut zero = [0_u8; 16];
370        cipher.encrypt_block(&mut zero);
371        u128::from_be_bytes(zero)
372    };
373    let j0 = gcm_j0(h, nonce);
374    let mut ctr = j0;
375    inc32_u128(&mut ctr);
376    let s = ghash(h, aad, ciphertext);
377    let mut e_j0 = j0.to_be_bytes();
378    cipher.encrypt_block(&mut e_j0);
379    let expected_tag = (u128::from_be_bytes(e_j0) ^ s).to_be_bytes();
380    if !constant_time_tag_eq(&expected_tag, tag) {
381        return Err(Error::CryptoFailure("aes-gcm authentication failed"));
382    }
383    Ok(gcm_ctr_xor(cipher, ctr, ciphertext))
384}
385
386/// Placeholder API for AES-CCM during ongoing porting work.
387///
388/// # Arguments
389/// * `cipher`: Configured AES cipher instance.
390/// * `nonce`: CCM nonce bytes (7..13 bytes).
391/// * `aad`: Additional authenticated data bytes.
392/// * `plaintext`: Plaintext bytes to encrypt/authenticate.
393///
394/// # Returns
395/// `(ciphertext, tag)` pair with 16-byte authentication tag.
396pub fn aes_ccm_encrypt(
397    cipher: &AesCipher,
398    nonce: &[u8],
399    aad: &[u8],
400    plaintext: &[u8],
401) -> Result<(Vec<u8>, [u8; 16])> {
402    if !(7..=13).contains(&nonce.len()) {
403        return Err(Error::InvalidLength("aes-ccm nonce must be 7..13 bytes"));
404    }
405    let q = 15 - nonce.len();
406    if plaintext.len() >= (1_usize << (8 * q.min(8))) {
407        return Err(Error::InvalidLength(
408            "aes-ccm plaintext too large for nonce",
409        ));
410    }
411    let t_len = 16_usize;
412    let mut b0 = [0_u8; 16];
413    let aadata_flag = if aad.is_empty() { 0_u8 } else { 0x40 };
414    let m_prime = (((t_len - 2) / 2) as u8) << 3;
415    let l_prime = (q as u8) - 1;
416    b0[0] = aadata_flag | m_prime | l_prime;
417    b0[1..1 + nonce.len()].copy_from_slice(nonce);
418    encode_len_q(plaintext.len() as u64, q, &mut b0[16 - q..]);
419
420    let mut mac_state = [0_u8; 16];
421    xor_block_in_place(&mut mac_state, &b0);
422    cipher.encrypt_block(&mut mac_state);
423
424    if !aad.is_empty() {
425        let mut aad_blocked = Vec::new();
426        if aad.len() < 0xFF00 {
427            aad_blocked.extend_from_slice(&(aad.len() as u16).to_be_bytes());
428        } else {
429            aad_blocked.extend_from_slice(&[0xFF, 0xFE]);
430            aad_blocked.extend_from_slice(&(aad.len() as u32).to_be_bytes());
431        }
432        aad_blocked.extend_from_slice(aad);
433        pad16(&mut aad_blocked);
434        for chunk in aad_blocked.chunks_exact(16) {
435            let mut blk = [0_u8; 16];
436            blk.copy_from_slice(chunk);
437            xor_block_in_place(&mut mac_state, &blk);
438            cipher.encrypt_block(&mut mac_state);
439        }
440    }
441
442    let mut payload = plaintext.to_vec();
443    pad16(&mut payload);
444    for chunk in payload.chunks_exact(16) {
445        let mut blk = [0_u8; 16];
446        blk.copy_from_slice(chunk);
447        xor_block_in_place(&mut mac_state, &blk);
448        cipher.encrypt_block(&mut mac_state);
449    }
450    let mut tag = mac_state;
451
452    let mut ctr0 = [0_u8; 16];
453    ctr0[0] = l_prime;
454    ctr0[1..1 + nonce.len()].copy_from_slice(nonce);
455    let mut s0 = ctr0;
456    cipher.encrypt_block(&mut s0);
457    for (t, s) in tag.iter_mut().zip(s0) {
458        *t ^= s;
459    }
460
461    let mut ciphertext = vec![0_u8; plaintext.len()];
462    let mut counter = ctr0;
463    for block_idx in 0..plaintext.len().div_ceil(16) {
464        increment_q_counter(&mut counter, q);
465        let mut stream = counter;
466        cipher.encrypt_block(&mut stream);
467        let start = block_idx * 16;
468        let end = (start + 16).min(plaintext.len());
469        for i in start..end {
470            ciphertext[i] = plaintext[i] ^ stream[i - start];
471        }
472    }
473
474    Ok((ciphertext, tag))
475}
476
477/// Decrypts and authenticates AES-CCM ciphertext/tag with associated data.
478///
479/// # Arguments
480/// * `cipher`: Configured AES cipher instance.
481/// * `nonce`: CCM nonce bytes (7..13 bytes).
482/// * `aad`: Additional authenticated data bytes.
483/// * `ciphertext`: Ciphertext bytes to decrypt/authenticate.
484/// * `tag`: 16-byte authentication tag to verify.
485///
486/// # Returns
487/// Decrypted plaintext bytes when tag verification succeeds.
488pub fn aes_ccm_decrypt(
489    cipher: &AesCipher,
490    nonce: &[u8],
491    aad: &[u8],
492    ciphertext: &[u8],
493    tag: &[u8; 16],
494) -> Result<Vec<u8>> {
495    if !(7..=13).contains(&nonce.len()) {
496        return Err(Error::InvalidLength("aes-ccm nonce must be 7..13 bytes"));
497    }
498    let q = 15 - nonce.len();
499    if ciphertext.len() >= (1_usize << (8 * q.min(8))) {
500        return Err(Error::InvalidLength(
501            "aes-ccm ciphertext too large for nonce",
502        ));
503    }
504    let t_len = 16_usize;
505    let l_prime = (q as u8) - 1;
506
507    let mut ctr0 = [0_u8; 16];
508    ctr0[0] = l_prime;
509    ctr0[1..1 + nonce.len()].copy_from_slice(nonce);
510
511    let mut plaintext = vec![0_u8; ciphertext.len()];
512    let mut counter = ctr0;
513    for block_idx in 0..ciphertext.len().div_ceil(16) {
514        increment_q_counter(&mut counter, q);
515        let mut stream = counter;
516        cipher.encrypt_block(&mut stream);
517        let start = block_idx * 16;
518        let end = (start + 16).min(ciphertext.len());
519        for i in start..end {
520            plaintext[i] = ciphertext[i] ^ stream[i - start];
521        }
522    }
523
524    let mut b0 = [0_u8; 16];
525    let aadata_flag = if aad.is_empty() { 0_u8 } else { 0x40 };
526    let m_prime = (((t_len - 2) / 2) as u8) << 3;
527    b0[0] = aadata_flag | m_prime | l_prime;
528    b0[1..1 + nonce.len()].copy_from_slice(nonce);
529    encode_len_q(plaintext.len() as u64, q, &mut b0[16 - q..]);
530
531    let mut mac_state = [0_u8; 16];
532    xor_block_in_place(&mut mac_state, &b0);
533    cipher.encrypt_block(&mut mac_state);
534
535    if !aad.is_empty() {
536        let mut aad_blocked = Vec::new();
537        if aad.len() < 0xFF00 {
538            aad_blocked.extend_from_slice(&(aad.len() as u16).to_be_bytes());
539        } else {
540            aad_blocked.extend_from_slice(&[0xFF, 0xFE]);
541            aad_blocked.extend_from_slice(&(aad.len() as u32).to_be_bytes());
542        }
543        aad_blocked.extend_from_slice(aad);
544        pad16(&mut aad_blocked);
545        for chunk in aad_blocked.chunks_exact(16) {
546            let mut blk = [0_u8; 16];
547            blk.copy_from_slice(chunk);
548            xor_block_in_place(&mut mac_state, &blk);
549            cipher.encrypt_block(&mut mac_state);
550        }
551    }
552
553    let mut payload = plaintext.clone();
554    pad16(&mut payload);
555    for chunk in payload.chunks_exact(16) {
556        let mut blk = [0_u8; 16];
557        blk.copy_from_slice(chunk);
558        xor_block_in_place(&mut mac_state, &blk);
559        cipher.encrypt_block(&mut mac_state);
560    }
561    let mut expected_tag = mac_state;
562    let mut s0 = ctr0;
563    cipher.encrypt_block(&mut s0);
564    for (t, s) in expected_tag.iter_mut().zip(s0) {
565        *t ^= s;
566    }
567    if !constant_time_tag_eq(&expected_tag, tag) {
568        return Err(Error::CryptoFailure("aes-ccm authentication failed"));
569    }
570    Ok(plaintext)
571}
572
573/// Compares fixed-size authentication tags in constant time.
574///
575/// # Arguments
576///
577/// * `expected` — `&[u8; 16]`.
578/// * `received` — `&[u8; 16]`.
579///
580/// # Returns
581///
582/// `bool` produced by `constant_time_tag_eq` (see implementation).
583///
584/// # Panics
585///
586/// This function does not panic unless otherwise noted.
587fn constant_time_tag_eq(expected: &[u8; 16], received: &[u8; 16]) -> bool {
588    let mut diff = 0_u8;
589    for (&left, &right) in expected.iter().zip(received.iter()) {
590        diff |= left ^ right;
591    }
592    diff == 0
593}
594
595/// Placeholder API for AES-XTS during ongoing porting work.
596///
597/// # Arguments
598/// * `cipher_a`: Data-key AES instance.
599/// * `cipher_b`: Tweak-key AES instance.
600/// * `tweak`: Initial 16-byte tweak value.
601/// * `plaintext`: Block-aligned plaintext bytes.
602///
603/// # Returns
604/// XTS ciphertext bytes with same length as `plaintext`.
605pub fn aes_xts_encrypt(
606    cipher_a: &AesCipher,
607    cipher_b: &AesCipher,
608    tweak: &[u8; 16],
609    plaintext: &[u8],
610) -> Result<Vec<u8>> {
611    aes_xts_crypt(cipher_a, cipher_b, tweak, plaintext, true)
612}
613
614/// Decrypts AES-XTS over a data unit, including ciphertext-stealing for partial trailing block.
615///
616/// # Arguments
617/// * `cipher_a`: Data-key AES instance.
618/// * `cipher_b`: Tweak-key AES instance.
619/// * `tweak`: Initial 16-byte tweak value.
620/// * `ciphertext`: Ciphertext bytes to decrypt.
621///
622/// # Returns
623/// XTS plaintext bytes with same length as `ciphertext`.
624pub fn aes_xts_decrypt(
625    cipher_a: &AesCipher,
626    cipher_b: &AesCipher,
627    tweak: &[u8; 16],
628    ciphertext: &[u8],
629) -> Result<Vec<u8>> {
630    aes_xts_crypt(cipher_a, cipher_b, tweak, ciphertext, false)
631}
632
633/// Applies AES-XTS encryption or decryption with ciphertext stealing for non-block-aligned inputs.
634///
635/// # Arguments
636///
637/// * `cipher_a` — Data-path AES instance.
638/// * `cipher_b` — Tweak-path AES instance used to derive the initial tweak.
639/// * `tweak` — 16-byte starting tweak block.
640/// * `input` — Plaintext (encrypt) or ciphertext (decrypt), at least 16 bytes.
641/// * `encrypt` — `true` to encrypt, `false` to decrypt.
642///
643/// # Returns
644///
645/// On success, output bytes matching `input` length.
646///
647/// # Errors
648///
649/// Returns `noxtls_core::Error` when `input` is shorter than one block or ciphertext stealing paths fail internal checks.
650///
651/// # Panics
652///
653/// This function does not panic.
654fn aes_xts_crypt(
655    cipher_a: &AesCipher,
656    cipher_b: &AesCipher,
657    tweak: &[u8; 16],
658    input: &[u8],
659    encrypt: bool,
660) -> Result<Vec<u8>> {
661    if input.len() < 16 {
662        return Err(Error::InvalidLength(
663            "aes-xts input must be at least one full 16-byte block",
664        ));
665    }
666    let mut out = vec![0_u8; input.len()];
667    let full_blocks = input.len() / 16;
668    let rem = input.len() % 16;
669
670    let mut tw = *tweak;
671    cipher_b.encrypt_block(&mut tw);
672
673    if rem == 0 {
674        for block_idx in 0..full_blocks {
675            let start = block_idx * 16;
676            let mut block = [0_u8; 16];
677            block.copy_from_slice(&input[start..start + 16]);
678            xor_block_in_place(&mut block, &tw);
679            if encrypt {
680                cipher_a.encrypt_block(&mut block);
681            } else {
682                cipher_a.decrypt_block(&mut block);
683            }
684            xor_block_in_place(&mut block, &tw);
685            out[start..start + 16].copy_from_slice(&block);
686            xts_mul_x(&mut tw);
687        }
688        return Ok(out);
689    }
690
691    // Process all but the final full block that participates in ciphertext stealing.
692    for block_idx in 0..(full_blocks - 1) {
693        let start = block_idx * 16;
694        let mut block = [0_u8; 16];
695        block.copy_from_slice(&input[start..start + 16]);
696        xor_block_in_place(&mut block, &tw);
697        if encrypt {
698            cipher_a.encrypt_block(&mut block);
699        } else {
700            cipher_a.decrypt_block(&mut block);
701        }
702        xor_block_in_place(&mut block, &tw);
703        out[start..start + 16].copy_from_slice(&block);
704        xts_mul_x(&mut tw);
705    }
706
707    let mut tw_next = tw;
708    xts_mul_x(&mut tw_next);
709    let last_full_start = (full_blocks - 1) * 16;
710    let partial_start = full_blocks * 16;
711
712    if encrypt {
713        let mut block = [0_u8; 16];
714        block.copy_from_slice(&input[last_full_start..last_full_start + 16]);
715        xor_block_in_place(&mut block, &tw);
716        cipher_a.encrypt_block(&mut block);
717        xor_block_in_place(&mut block, &tw);
718
719        // C_m is first r bytes of C*.
720        out[partial_start..].copy_from_slice(&block[..rem]);
721
722        // P* = P_m || C*[r..16], then encrypted with next tweak for C_{m-1}.
723        let mut p_star = [0_u8; 16];
724        p_star[..rem].copy_from_slice(&input[partial_start..]);
725        p_star[rem..].copy_from_slice(&block[rem..]);
726        xor_block_in_place(&mut p_star, &tw_next);
727        cipher_a.encrypt_block(&mut p_star);
728        xor_block_in_place(&mut p_star, &tw_next);
729        out[last_full_start..last_full_start + 16].copy_from_slice(&p_star);
730    } else {
731        let mut c_m_minus_1 = [0_u8; 16];
732        c_m_minus_1.copy_from_slice(&input[last_full_start..last_full_start + 16]);
733        xor_block_in_place(&mut c_m_minus_1, &tw_next);
734        cipher_a.decrypt_block(&mut c_m_minus_1);
735        xor_block_in_place(&mut c_m_minus_1, &tw_next);
736
737        // P_m is the first r bytes of decrypted C_{m-1}.
738        out[partial_start..].copy_from_slice(&c_m_minus_1[..rem]);
739
740        // Reconstruct C* = C_m || tail(P*), then decrypt with current tweak for P_{m-1}.
741        let mut c_star = [0_u8; 16];
742        c_star[..rem].copy_from_slice(&input[partial_start..]);
743        c_star[rem..].copy_from_slice(&c_m_minus_1[rem..]);
744        xor_block_in_place(&mut c_star, &tw);
745        cipher_a.decrypt_block(&mut c_star);
746        xor_block_in_place(&mut c_star, &tw);
747        out[last_full_start..last_full_start + 16].copy_from_slice(&c_star);
748    }
749
750    Ok(out)
751}
752
753/// Expands AES key material into round keys for encryption and decryption.
754///
755/// # Arguments
756///
757/// * `key` — `&[u8]`.
758/// * `nk` — `usize`.
759/// * `rounds` — `usize`.
760///
761/// # Returns
762///
763/// `Vec<[u8` produced by `key_expansion` (see implementation).
764///
765/// # Panics
766///
767/// This function does not panic unless otherwise noted.
768fn key_expansion(key: &[u8], nk: usize, rounds: usize) -> Vec<[u8; 16]> {
769    let total_words = 4 * (rounds + 1);
770    let mut w = vec![0_u32; total_words];
771    for (i, word) in w.iter_mut().enumerate().take(nk) {
772        let idx = i * 4;
773        *word = u32::from_be_bytes([key[idx], key[idx + 1], key[idx + 2], key[idx + 3]]);
774    }
775    for i in nk..total_words {
776        let mut temp = w[i - 1];
777        if i % nk == 0 {
778            temp = sub_word(rot_word(temp)) ^ (u32::from(RCON[i / nk - 1]) << 24);
779        } else if nk > 6 && i % nk == 4 {
780            temp = sub_word(temp);
781        }
782        w[i] = w[i - nk] ^ temp;
783    }
784    let mut keys = Vec::with_capacity(rounds + 1);
785    for r in 0..=rounds {
786        let mut key_block = [0_u8; 16];
787        for c in 0..4 {
788            key_block[c * 4..(c + 1) * 4].copy_from_slice(&w[r * 4 + c].to_be_bytes());
789        }
790        keys.push(key_block);
791    }
792    keys
793}
794
795/// Rotates one 32-bit word by one byte to the left.
796///
797/// # Arguments
798///
799/// * `word` — `u32`.
800///
801/// # Returns
802///
803/// `u32` produced by `rot_word` (see implementation).
804///
805/// # Panics
806///
807/// This function does not panic unless otherwise noted.
808fn rot_word(word: u32) -> u32 {
809    word.rotate_left(8)
810}
811
812/// Applies AES S-box substitution to each byte of a 32-bit word.
813///
814/// # Arguments
815///
816/// * `word` — `u32`.
817///
818/// # Returns
819///
820/// `u32` produced by `sub_word` (see implementation).
821///
822/// # Panics
823///
824/// This function does not panic unless otherwise noted.
825fn sub_word(word: u32) -> u32 {
826    let bytes = word.to_be_bytes();
827    u32::from_be_bytes([
828        SBOX[usize::from(bytes[0])],
829        SBOX[usize::from(bytes[1])],
830        SBOX[usize::from(bytes[2])],
831        SBOX[usize::from(bytes[3])],
832    ])
833}
834
835/// XORs one round key into current state.
836///
837/// # Arguments
838///
839/// * `state` — `&mut [u8; 16]`.
840/// * `round_key` — `&[u8; 16]`.
841///
842/// # Returns
843///
844/// `()` when there is no return data.
845///
846/// # Panics
847///
848/// This function does not panic unless otherwise noted.
849fn add_round_key(state: &mut [u8; 16], round_key: &[u8; 16]) {
850    for i in 0..16 {
851        state[i] ^= round_key[i];
852    }
853}
854
855/// Applies forward AES S-box to every state byte.
856///
857/// # Arguments
858///
859/// * `state` — `&mut [u8; 16]`.
860///
861/// # Returns
862///
863/// `()` when there is no return data.
864///
865/// # Panics
866///
867/// This function does not panic unless otherwise noted.
868fn sub_bytes(state: &mut [u8; 16]) {
869    for byte in state {
870        *byte = SBOX[usize::from(*byte)];
871    }
872}
873
874/// Applies inverse AES S-box to every state byte.
875///
876/// # Arguments
877///
878/// * `state` — `&mut [u8; 16]`.
879///
880/// # Returns
881///
882/// `()` when there is no return data.
883///
884/// # Panics
885///
886/// This function does not panic unless otherwise noted.
887fn inv_sub_bytes(state: &mut [u8; 16]) {
888    for byte in state {
889        *byte = INV_SBOX[usize::from(*byte)];
890    }
891}
892
893/// Performs AES row shifts in forward direction.
894///
895/// # Arguments
896///
897/// * `state` — `&mut [u8; 16]`.
898///
899/// # Returns
900///
901/// `()` when there is no return data.
902///
903/// # Panics
904///
905/// This function does not panic unless otherwise noted.
906fn shift_rows(state: &mut [u8; 16]) {
907    let mut tmp = *state;
908    tmp[1] = state[5];
909    tmp[5] = state[9];
910    tmp[9] = state[13];
911    tmp[13] = state[1];
912    tmp[2] = state[10];
913    tmp[6] = state[14];
914    tmp[10] = state[2];
915    tmp[14] = state[6];
916    tmp[3] = state[15];
917    tmp[7] = state[3];
918    tmp[11] = state[7];
919    tmp[15] = state[11];
920    *state = tmp;
921}
922
923/// Performs AES row shifts in inverse direction.
924///
925/// # Arguments
926///
927/// * `state` — `&mut [u8; 16]`.
928///
929/// # Returns
930///
931/// `()` when there is no return data.
932///
933/// # Panics
934///
935/// This function does not panic unless otherwise noted.
936fn inv_shift_rows(state: &mut [u8; 16]) {
937    let mut tmp = *state;
938    tmp[1] = state[13];
939    tmp[5] = state[1];
940    tmp[9] = state[5];
941    tmp[13] = state[9];
942    tmp[2] = state[10];
943    tmp[6] = state[14];
944    tmp[10] = state[2];
945    tmp[14] = state[6];
946    tmp[3] = state[7];
947    tmp[7] = state[11];
948    tmp[11] = state[15];
949    tmp[15] = state[3];
950    *state = tmp;
951}
952
953/// Mixes each AES state column using Rijndael field multiplication.
954///
955/// # Arguments
956///
957/// * `state` — `&mut [u8; 16]`.
958///
959/// # Returns
960///
961/// `()` when there is no return data.
962///
963/// # Panics
964///
965/// This function does not panic unless otherwise noted.
966fn mix_columns(state: &mut [u8; 16]) {
967    for c in 0..4 {
968        let i = c * 4;
969        let a0 = state[i];
970        let a1 = state[i + 1];
971        let a2 = state[i + 2];
972        let a3 = state[i + 3];
973        state[i] = gf_mul(a0, 2) ^ gf_mul(a1, 3) ^ a2 ^ a3;
974        state[i + 1] = a0 ^ gf_mul(a1, 2) ^ gf_mul(a2, 3) ^ a3;
975        state[i + 2] = a0 ^ a1 ^ gf_mul(a2, 2) ^ gf_mul(a3, 3);
976        state[i + 3] = gf_mul(a0, 3) ^ a1 ^ a2 ^ gf_mul(a3, 2);
977    }
978}
979
980/// Inversely mixes each AES state column for decryption rounds.
981///
982/// # Arguments
983///
984/// * `state` — `&mut [u8; 16]`.
985///
986/// # Returns
987///
988/// `()` when there is no return data.
989///
990/// # Panics
991///
992/// This function does not panic unless otherwise noted.
993fn inv_mix_columns(state: &mut [u8; 16]) {
994    for c in 0..4 {
995        let i = c * 4;
996        let a0 = state[i];
997        let a1 = state[i + 1];
998        let a2 = state[i + 2];
999        let a3 = state[i + 3];
1000        state[i] = gf_mul(a0, 14) ^ gf_mul(a1, 11) ^ gf_mul(a2, 13) ^ gf_mul(a3, 9);
1001        state[i + 1] = gf_mul(a0, 9) ^ gf_mul(a1, 14) ^ gf_mul(a2, 11) ^ gf_mul(a3, 13);
1002        state[i + 2] = gf_mul(a0, 13) ^ gf_mul(a1, 9) ^ gf_mul(a2, 14) ^ gf_mul(a3, 11);
1003        state[i + 3] = gf_mul(a0, 11) ^ gf_mul(a1, 13) ^ gf_mul(a2, 9) ^ gf_mul(a3, 14);
1004    }
1005}
1006
1007/// Multiplies two bytes in GF(2^8) with AES reduction polynomial.
1008///
1009/// # Arguments
1010///
1011/// * `a` — `u8`.
1012/// * `b` — `u8`.
1013///
1014/// # Returns
1015///
1016/// `u8` produced by `gf_mul` (see implementation).
1017///
1018/// # Panics
1019///
1020/// This function does not panic unless otherwise noted.
1021fn gf_mul(mut a: u8, mut b: u8) -> u8 {
1022    let mut p = 0_u8;
1023    for _ in 0..8 {
1024        if b & 1 != 0 {
1025            p ^= a;
1026        }
1027        let high = a & 0x80;
1028        a <<= 1;
1029        if high != 0 {
1030            a ^= 0x1b;
1031        }
1032        b >>= 1;
1033    }
1034    p
1035}
1036
1037/// Increments a 16-byte big-endian counter block in place.
1038///
1039/// # Arguments
1040///
1041/// * `counter` — `&mut [u8; 16]`.
1042///
1043/// # Returns
1044///
1045/// `()` when there is no return data.
1046///
1047/// # Panics
1048///
1049/// This function does not panic unless otherwise noted.
1050fn increment_be(counter: &mut [u8; 16]) {
1051    for byte in counter.iter_mut().rev() {
1052        *byte = byte.wrapping_add(1);
1053        if *byte != 0 {
1054            break;
1055        }
1056    }
1057}
1058
1059/// Computes GHASH over AAD and ciphertext for GCM authentication.
1060///
1061/// # Arguments
1062///
1063/// * `h` — `u128`.
1064/// * `aad` — `&[u8]`.
1065/// * `ciphertext` — `&[u8]`.
1066///
1067/// # Returns
1068///
1069/// `u128` produced by `ghash` (see implementation).
1070///
1071/// # Panics
1072///
1073/// This function does not panic unless otherwise noted.
1074fn ghash(h: u128, aad: &[u8], ciphertext: &[u8]) -> u128 {
1075    let mut y = 0_u128;
1076    let mut a = aad.to_vec();
1077    let mut c = ciphertext.to_vec();
1078    pad16(&mut a);
1079    pad16(&mut c);
1080    for chunk in a.chunks_exact(16) {
1081        let x = u128::from_be_bytes(chunk.try_into().expect("16-byte chunk"));
1082        y = gf128_mul(y ^ x, h);
1083    }
1084    for chunk in c.chunks_exact(16) {
1085        let x = u128::from_be_bytes(chunk.try_into().expect("16-byte chunk"));
1086        y = gf128_mul(y ^ x, h);
1087    }
1088    let lengths = ((aad.len() as u128) << 64) | ((ciphertext.len() as u128) * 8);
1089    gf128_mul(y ^ lengths, h)
1090}
1091
1092/// Multiplies two elements in GF(2^128) with GCM reduction polynomial.
1093///
1094/// # Arguments
1095///
1096/// * `x` — `u128`.
1097/// * `y` — `u128`.
1098///
1099/// # Returns
1100///
1101/// `u128` produced by `gf128_mul` (see implementation).
1102///
1103/// # Panics
1104///
1105/// This function does not panic unless otherwise noted.
1106fn gf128_mul(mut x: u128, mut y: u128) -> u128 {
1107    let mut z = 0_u128;
1108    for _ in 0..128 {
1109        if (x & (1_u128 << 127)) != 0 {
1110            z ^= y;
1111        }
1112        let lsb = y & 1;
1113        y >>= 1;
1114        if lsb != 0 {
1115            y ^= 0xe1_u128 << 120;
1116        }
1117        x <<= 1;
1118    }
1119    z
1120}
1121
1122/// Builds J0 nonce block per GCM specification.
1123///
1124/// # Arguments
1125///
1126/// * `h` — `u128`.
1127/// * `nonce` — `&[u8]`.
1128///
1129/// # Returns
1130///
1131/// `u128` produced by `gcm_j0` (see implementation).
1132///
1133/// # Panics
1134///
1135/// This function does not panic unless otherwise noted.
1136fn gcm_j0(h: u128, nonce: &[u8]) -> u128 {
1137    if nonce.len() == 12 {
1138        let mut j = [0_u8; 16];
1139        j[..12].copy_from_slice(nonce);
1140        j[15] = 1;
1141        return u128::from_be_bytes(j);
1142    }
1143    let mut n = nonce.to_vec();
1144    pad16(&mut n);
1145    let mut y = 0_u128;
1146    for chunk in n.chunks_exact(16) {
1147        let x = u128::from_be_bytes(chunk.try_into().expect("16-byte chunk"));
1148        y = gf128_mul(y ^ x, h);
1149    }
1150    let len_block = (nonce.len() as u128) * 8;
1151    gf128_mul(y ^ len_block, h)
1152}
1153
1154/// Applies GCM counter-mode keystream XOR starting from provided counter block.
1155///
1156/// # Arguments
1157///
1158/// * `cipher` — `&AesCipher`.
1159/// * `initial_ctr` — `u128`.
1160/// * `input` — `&[u8]`.
1161///
1162/// # Returns
1163///
1164/// `Vec<u8>` produced by `gcm_ctr_xor` (see implementation).
1165///
1166/// # Panics
1167///
1168/// This function does not panic unless otherwise noted.
1169fn gcm_ctr_xor(cipher: &AesCipher, initial_ctr: u128, input: &[u8]) -> Vec<u8> {
1170    let mut ctr = initial_ctr;
1171    let mut out = vec![0_u8; input.len()];
1172    let mut offset = 0;
1173    while offset < input.len() {
1174        let mut stream = ctr.to_be_bytes();
1175        cipher.encrypt_block(&mut stream);
1176        let chunk_len = (input.len() - offset).min(16);
1177        for i in 0..chunk_len {
1178            out[offset + i] = input[offset + i] ^ stream[i];
1179        }
1180        inc32_u128(&mut ctr);
1181        offset += chunk_len;
1182    }
1183    out
1184}
1185
1186/// Increments low 32-bit GCM counter portion of 128-bit counter block.
1187///
1188/// # Arguments
1189///
1190/// * `counter` — `&mut u128`.
1191///
1192/// # Returns
1193///
1194/// `()` when there is no return data.
1195///
1196/// # Panics
1197///
1198/// This function does not panic unless otherwise noted.
1199fn inc32_u128(counter: &mut u128) {
1200    let mut bytes = counter.to_be_bytes();
1201    inc32(&mut bytes);
1202    *counter = u128::from_be_bytes(bytes);
1203}
1204
1205/// Increments low 32-bit GCM counter portion in-place.
1206///
1207/// # Arguments
1208///
1209/// * `counter` — `&mut [u8; 16]`.
1210///
1211/// # Returns
1212///
1213/// `()` when there is no return data.
1214///
1215/// # Panics
1216///
1217/// This function does not panic unless otherwise noted.
1218fn inc32(counter: &mut [u8; 16]) {
1219    for i in (12..16).rev() {
1220        counter[i] = counter[i].wrapping_add(1);
1221        if counter[i] != 0 {
1222            break;
1223        }
1224    }
1225}
1226
1227/// Pads byte vector with zeroes to reach next multiple of 16.
1228///
1229/// # Arguments
1230///
1231/// * `data` — `&mut Vec<u8>`.
1232///
1233/// # Returns
1234///
1235/// `()` when there is no return data.
1236///
1237/// # Panics
1238///
1239/// This function does not panic unless otherwise noted.
1240fn pad16(data: &mut Vec<u8>) {
1241    let rem = data.len() % 16;
1242    if rem != 0 {
1243        data.resize(data.len() + (16 - rem), 0);
1244    }
1245}
1246
1247/// Encodes length field in CCM q-byte big-endian form.
1248///
1249/// # Arguments
1250///
1251/// * `len` — `u64`.
1252/// * `q` — `usize`.
1253/// * `out` — `&mut [u8]`.
1254///
1255/// # Returns
1256///
1257/// `()` when there is no return data.
1258///
1259/// # Panics
1260///
1261/// This function does not panic unless otherwise noted.
1262fn encode_len_q(len: u64, q: usize, out: &mut [u8]) {
1263    for i in 0..q {
1264        out[q - 1 - i] = ((len >> (8 * i)) & 0xFF) as u8;
1265    }
1266}
1267
1268/// Increments CCM q-byte counter region in-place.
1269///
1270/// # Arguments
1271///
1272/// * `counter` — `&mut [u8; 16]`.
1273/// * `q` — `usize`.
1274///
1275/// # Returns
1276///
1277/// `()` when there is no return data.
1278///
1279/// # Panics
1280///
1281/// This function does not panic unless otherwise noted.
1282fn increment_q_counter(counter: &mut [u8; 16], q: usize) {
1283    for i in (16 - q..16).rev() {
1284        counter[i] = counter[i].wrapping_add(1);
1285        if counter[i] != 0 {
1286            break;
1287        }
1288    }
1289}
1290
1291/// XORs one 16-byte block into another in-place.
1292///
1293/// # Arguments
1294///
1295/// * `dst` — `&mut [u8; 16]`.
1296/// * `src` — `&[u8; 16]`.
1297///
1298/// # Returns
1299///
1300/// `()` when there is no return data.
1301///
1302/// # Panics
1303///
1304/// This function does not panic unless otherwise noted.
1305fn xor_block_in_place(dst: &mut [u8; 16], src: &[u8; 16]) {
1306    for i in 0..16 {
1307        dst[i] ^= src[i];
1308    }
1309}
1310
1311/// Shifts CFB register left by segment length and appends segment bytes.
1312///
1313/// # Arguments
1314///
1315/// * `reg` — `&mut [u8; 16]`.
1316/// * `segment` — `&[u8]`.
1317///
1318/// # Returns
1319///
1320/// `()` when there is no return data.
1321///
1322/// # Panics
1323///
1324/// This function does not panic unless otherwise noted.
1325fn shift_register_append(reg: &mut [u8; 16], segment: &[u8]) {
1326    debug_assert!(segment.len() <= 16);
1327    if segment.len() == 16 {
1328        reg.copy_from_slice(segment);
1329        return;
1330    }
1331    let keep = 16 - segment.len();
1332    reg.copy_within(segment.len().., 0);
1333    reg[keep..].copy_from_slice(segment);
1334}
1335
1336/// Multiplies XTS tweak by x over GF(2^128) with polynomial x^128 + x^7 + x^2 + x + 1.
1337///
1338/// # Arguments
1339///
1340/// * `tweak` — `&mut [u8; 16]`.
1341///
1342/// # Returns
1343///
1344/// `()` when there is no return data.
1345///
1346/// # Panics
1347///
1348/// This function does not panic unless otherwise noted.
1349fn xts_mul_x(tweak: &mut [u8; 16]) {
1350    let mut carry = 0_u8;
1351    for byte in tweak.iter_mut() {
1352        let next_carry = (*byte & 0x80) >> 7;
1353        *byte = (*byte << 1) | carry;
1354        carry = next_carry;
1355    }
1356    if carry != 0 {
1357        tweak[0] ^= 0x87;
1358    }
1359}
1360
1361const RCON: [u8; 10] = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36];
1362
1363const SBOX: [u8; 256] = [
1364    0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
1365    0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
1366    0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
1367    0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
1368    0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
1369    0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
1370    0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
1371    0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
1372    0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
1373    0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
1374    0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
1375    0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
1376    0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
1377    0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
1378    0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
1379    0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
1380];
1381
1382const INV_SBOX: [u8; 256] = [
1383    0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
1384    0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
1385    0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
1386    0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
1387    0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
1388    0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
1389    0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
1390    0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
1391    0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
1392    0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
1393    0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
1394    0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
1395    0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
1396    0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
1397    0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
1398    0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
1399];