sequoia_openpgp/crypto/aead.rs
1//! Authenticated encryption with additional data.
2//!
3//! This module provides both a uniform streaming (chunked) and a
4//! non-streaming (non-chunked) interface to authenticated symmetric
5//! encryption and decryption using different block ciphers and AEAD
6//! modes.
7//!
8//! Note: this is a very low-level interface. It is not about OpenPGP
9//! encryption or decryption. If you are looking for that, see
10//! [`crate::serialize::stream::Encryptor`] and
11//! [`crate::parse::stream::Decryptor`] instead.
12//!
13//! # Examples
14//!
15//! This example demonstrates streaming (chunked) encryption and
16//! decryption.
17//!
18//! ```rust
19//! # use std::io::{Read, Write};
20//! # use sequoia_openpgp::crypto::SessionKey;
21//! # use sequoia_openpgp::crypto::{AEADAlgorithm, SymmetricAlgorithm};
22//! # use sequoia_openpgp::crypto::aead::*;
23//! # use sequoia_openpgp::parse::buffered_reader::{self, BufferedReader};
24//! # fn main() -> sequoia_openpgp::Result<()> {
25//! let text = b"Hello World :)";
26//! let algo = SymmetricAlgorithm::default();
27//! let aead = AEADAlgorithm::default();
28//! let key = SessionKey::new(algo.key_size()?)?;
29//! let chunk_size = 4096;
30//! let schedule = SEIPv2Schedule::new(&key, algo, aead, chunk_size, b"salt")?;
31//!
32//! // Encrypt the `text`.
33//! let mut ciphertext = Vec::new();
34//! let mut encryptor = Encryptor::new(
35//! algo, aead, chunk_size, schedule.clone(), &mut ciphertext)?;
36//! encryptor.write_all(text)?;
37//! encryptor.finalize()?;
38//!
39//! // Decrypt the `ciphertext`.
40//! let mut plaintext = Vec::new();
41//! let reader = buffered_reader::Memory::with_cookie(
42//! &ciphertext, Default::default());
43//!
44//! let mut decryptor = Decryptor::new(
45//! algo, aead, chunk_size, schedule.clone(), reader.into_boxed())?;
46//!
47//! decryptor.read_to_end(&mut plaintext)?;
48//!
49//! // Check that we recovered it.
50//! assert_eq!(&plaintext[..], text);
51//! # Ok(()) }
52//! ```
53//!
54//! This example demonstrates non-streaming (non-chunked) encryption
55//! and decryption.
56//!
57//! ```rust
58//! # use std::io::{Read, Write};
59//! # use sequoia_openpgp::crypto::{self, SessionKey};
60//! # use sequoia_openpgp::crypto::{AEADAlgorithm, SymmetricAlgorithm};
61//! # use sequoia_openpgp::crypto::aead::*;
62//! # fn main() -> sequoia_openpgp::Result<()> {
63//! let text = b"Hello World :)";
64//! let aad = b"Not secret, but authenticated";
65//! let algo = SymmetricAlgorithm::default();
66//! let aead = AEADAlgorithm::default();
67//! let key = SessionKey::new(algo.key_size()?)?;
68//! let mut nonce = vec![0; aead.nonce_size()?];
69//! crypto::random(&mut nonce)?;
70//!
71//! // Encrypt the `text`.
72//! let mut ciphertext = vec![0; text.len() + aead.digest_size()?];
73//! aead.context(algo, &key, aad, &nonce)?
74//! .for_encryption()?
75//! .encrypt_seal(&mut ciphertext, text)?;
76//!
77//! // Decrypt the `ciphertext`.
78//! let mut plaintext = vec![0; ciphertext.len() - aead.digest_size()?];
79//! aead.context(algo, &key, aad, &nonce)?
80//! .for_decryption()?
81//! .decrypt_verify(&mut plaintext, &ciphertext)?;
82//!
83//! // Check that we recovered it.
84//! assert_eq!(&plaintext[..], text);
85//! # Ok(()) }
86//! ```
87
88use std::cmp;
89use std::convert::TryInto;
90use std::fmt;
91use std::io;
92
93use buffered_reader::BufferedReader;
94
95use crate::types::{
96 AEADAlgorithm,
97 SymmetricAlgorithm,
98};
99use crate::utils::{
100 write_be_u64,
101};
102use crate::Error;
103use crate::Result;
104use crate::crypto::SessionKey;
105use crate::seal;
106use crate::parse::Cookie;
107use crate::crypto::backend::{Backend, interface::Kdf};
108
109/// Maximum size of any Nonce used by an AEAD mode.
110pub const MAX_NONCE_LEN: usize = 16;
111
112/// Converts a chunk size to a usize.
113pub(crate) fn chunk_size_usize(chunk_size: u64) -> Result<usize> {
114 chunk_size.try_into()
115 .map_err(|_| Error::InvalidOperation(
116 format!("AEAD chunk size exceeds size of \
117 virtual memory: {}", chunk_size)).into())
118}
119
120/// Builds AEAD contexts.
121pub struct Builder<'a> {
122 symm: SymmetricAlgorithm,
123 aead: AEADAlgorithm,
124 key: &'a SessionKey,
125 aad: &'a [u8],
126 nonce: &'a [u8],
127}
128
129impl AEADAlgorithm {
130 /// Creates a new AEAD context builder for this algorithm.
131 ///
132 /// # Errors
133 ///
134 /// Fails with [`Error::UnsupportedSymmetricAlgorithm`] if Sequoia
135 /// does not support the given symmetric algorithm, and
136 /// [`Error::UnsupportedAEADAlgorithm`] if Sequoia does not
137 /// support the given AEAD algorithm, or combination of symmetric
138 /// algorithm and AEAD algorithm.
139 pub fn context<'s>(self,
140 symm: SymmetricAlgorithm,
141 key: &'s SessionKey,
142 aad: &'s [u8],
143 nonce: &'s [u8])
144 -> Result<Builder<'s>>
145 {
146 if ! symm.is_supported() {
147 return Err(Error::UnsupportedSymmetricAlgorithm(symm).into());
148 }
149
150 use crate::crypto::backend::{Backend, interface::Aead};
151 if ! Backend::supports_algo_with_symmetric(self, symm) {
152 return Err(Error::UnsupportedAEADAlgorithm(self).into());
153 }
154
155 Ok(Builder {
156 symm,
157 aead: self,
158 key,
159 aad,
160 nonce,
161 })
162 }
163}
164
165impl Builder<'_> {
166 /// Returns an AEAD context for encryption.
167 pub fn for_encryption(self) -> Result<EncryptionContext> {
168 use crate::crypto::backend::{Backend, interface::Aead};
169 Ok(EncryptionContext(
170 Backend::context(self.aead, self.symm, self.key.as_protected(),
171 self.aad, self.nonce, CipherOp::Encrypt)?))
172 }
173
174 /// Returns an AEAD context for decryption.
175 pub fn for_decryption(self) -> Result<DecryptionContext> {
176 use crate::crypto::backend::{Backend, interface::Aead};
177 Ok(DecryptionContext(
178 Backend::context(self.aead, self.symm, self.key.as_protected(),
179 self.aad, self.nonce, CipherOp::Decrypt)?))
180 }
181}
182
183/// A block cipher state and AEAD mode for encryption.
184pub struct EncryptionContext(Box<dyn Context>);
185
186impl EncryptionContext {
187 /// Encrypts `src` to `dst`.
188 ///
189 /// Encrypts the given plaintext, and adds an authentication tag.
190 ///
191 /// `dst` must be exactly large enough to accommodate both the
192 /// ciphertext and the digest, i.e. its length must be exactly
193 /// `src.len() + self.digest_size()`.
194 pub fn encrypt_seal(&mut self, dst: &mut [u8], src: &[u8]) -> Result<()> {
195 if dst.len() != src.len() + self.digest_size() {
196 return Err(Error::InvalidOperation(
197 "invalid buffer length".into()).into());
198 }
199
200 self.0.encrypt_seal(dst, src)
201 }
202
203 /// Length of the digest in bytes.
204 pub fn digest_size(&self) -> usize {
205 self.0.digest_size()
206 }
207}
208
209/// A block cipher state and AEAD mode for decryption.
210pub struct DecryptionContext(Box<dyn Context>);
211
212impl DecryptionContext {
213 /// Decrypts `src` to `dst`.
214 ///
215 /// Decrypts the given plaintext, and checks the authentication
216 /// tag. If the authentication tag is not correct, an error is
217 /// returned.
218 ///
219 /// `src` contains both the ciphertext and the digest, i.e. its
220 /// length must be exactly `dst.len() + self.digest_size()`.
221 pub fn decrypt_verify(&mut self, dst: &mut [u8], src: &[u8]) -> Result<()> {
222 if dst.len() + self.digest_size() != src.len() {
223 return Err(Error::InvalidOperation(
224 "invalid buffer length".into()).into());
225 }
226
227 self.0.decrypt_verify(dst, src)
228 }
229
230 /// Length of the digest in bytes.
231 pub fn digest_size(&self) -> usize {
232 self.0.digest_size()
233 }
234}
235
236/// A block cipher state and AEAD mode of operation.
237///
238/// # Sealed trait
239///
240/// This trait is [sealed] and cannot be implemented for types outside this crate.
241/// Therefore it can be extended in a non-breaking way.
242/// If you want to implement the trait inside the crate
243/// you also need to implement the `seal::Sealed` marker trait.
244///
245/// [sealed]: https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed
246pub(crate) trait Context: seal::Sealed {
247 /// Encrypts one chunk `src` to `dst` adding a digest.
248 ///
249 /// Note: `dst` must be exactly large enough to accommodate both
250 /// the ciphertext and the digest!
251 fn encrypt_seal(&mut self, dst: &mut [u8], src: &[u8]) -> Result<()>;
252
253 /// Length of the digest in bytes.
254 #[allow(dead_code)] // Used in debug assertions.
255 fn digest_size(&self) -> usize;
256
257 /// Decrypt one chunk `src` to `dst` and verify that the digest is
258 /// correct.
259 fn decrypt_verify(&mut self, dst: &mut [u8], src: &[u8]) -> Result<()>;
260}
261
262/// Whether AEAD cipher is used for data encryption or decryption.
263pub(crate) enum CipherOp {
264 /// Cipher is used for data encryption.
265 Encrypt,
266 /// Cipher is used for data decryption.
267 Decrypt,
268}
269
270impl AEADAlgorithm {
271 /// Returns the digest size of the AEAD algorithm.
272 pub fn digest_size(&self) -> Result<usize> {
273 use self::AEADAlgorithm::*;
274 match self {
275 // See https://www.rfc-editor.org/rfc/rfc9580.html#name-eax-mode
276 EAX => Ok(16),
277 // See https://www.rfc-editor.org/rfc/rfc9580.html#name-ocb-mode
278 OCB => Ok(16),
279 // See https://www.rfc-editor.org/rfc/rfc9580.html#name-gcm-mode
280 GCM => Ok(16),
281 _ => Err(Error::UnsupportedAEADAlgorithm(*self).into()),
282 }
283 }
284
285 /// Returns the nonce size of the AEAD algorithm.
286 pub fn nonce_size(&self) -> Result<usize> {
287 use self::AEADAlgorithm::*;
288 match self {
289 // See https://www.rfc-editor.org/rfc/rfc9580.html#name-eax-mode
290 EAX => Ok(16),
291 // See https://www.rfc-editor.org/rfc/rfc9580.html#name-ocb-mode
292 OCB => Ok(15),
293 // See https://www.rfc-editor.org/rfc/rfc9580.html#name-gcm-mode
294 GCM => Ok(12),
295 _ => Err(Error::UnsupportedAEADAlgorithm(*self).into()),
296 }
297 }
298}
299
300/// Schedules key, nonce, and additional authenticated data (AAD) for
301/// use with chunked AEAD encryption.
302pub trait Schedule<T>: Send + Sync {
303 /// Computes key, nonce, and AAD for a chunk.
304 ///
305 /// For every chunk, implementations must produce a key, a nonce,
306 /// and the additional authenticated data (AAD), then invoke `fun`
307 /// with key, nonce, and AAD.
308 ///
309 /// `index` is the current chunk index.
310 fn chunk(&self,
311 index: u64,
312 fun: &mut dyn FnMut(&SessionKey, &[u8], &[u8]) -> Result<T>)
313 -> Result<T>;
314
315 /// Computes key, nonce, and AAD for the final authentication tag.
316 ///
317 /// When doing chunked AEAD, we need to protect against truncation
318 /// of the chunked stream. In OpenPGP this is done by adding a
319 /// final empty chunk that includes the length of the stream in
320 /// the additional authenticated data (AAD).
321 ///
322 /// Implementations must produce a key, a nonce, and the AAD
323 /// (which SHOULD include the length of the stream), then invoke
324 /// `fun` with key, nonce, and AAD.
325 ///
326 /// `index` is the current chunk index. `length` is the total
327 /// length of the stream.
328 fn finalizer(&self,
329 index: u64,
330 length: u64,
331 fun: &mut dyn FnMut(&SessionKey, &[u8], &[u8]) -> Result<T>)
332 -> Result<T>;
333}
334
335/// The key, nonce, and AAD schedule for the version 2 SEIPD packet.
336///
337/// See [Section 5.13.2 of RFC 9580].
338///
339/// [Section 5.13.2 of RFC 9580]: https://www.rfc-editor.org/rfc/rfc9580.html#section-5.13.2
340#[derive(Clone)]
341pub struct SEIPv2Schedule {
342 key: SessionKey,
343 nonce: Box<[u8]>,
344 ad: [u8; Self::AD_PREFIX_LEN],
345 nonce_len: usize,
346}
347
348impl SEIPv2Schedule {
349 /// Minimum AEAD chunk size.
350 ///
351 /// Implementations MUST support chunk sizes down to 64B.
352 const MIN_CHUNK_SIZE: usize = 1 << 6; // 64B
353
354 /// Maximum AEAD chunk size.
355 ///
356 /// Implementations MUST support chunk sizes up to 4MiB.
357 const MAX_CHUNK_SIZE: usize = 1 << 22; // 4MiB
358
359 /// The length of the additional authenticated data.
360 ///
361 /// For the final tag, the stream length as big-endian u64 is
362 /// appended to this prefix.
363 const AD_PREFIX_LEN: usize = 5;
364
365 /// Creates a new schedule to encrypt or decrypt version 2 SEIPD
366 /// packets.
367 pub fn new(session_key: &SessionKey,
368 sym_algo: SymmetricAlgorithm,
369 aead: AEADAlgorithm,
370 chunk_size: usize,
371 salt: &[u8]) -> Result<Self>
372 {
373 if !(Self::MIN_CHUNK_SIZE..=Self::MAX_CHUNK_SIZE).contains(&chunk_size)
374 {
375 return Err(Error::InvalidArgument(
376 format!("Invalid AEAD chunk size: {}", chunk_size)).into());
377 }
378
379 // Derive the message key and initialization vector.
380 let key_size = sym_algo.key_size()?;
381 // The NONCE size is NONCE_LEN - 8 bytes taken from the KDF.
382 let nonce_size = aead.nonce_size()? - 8;
383 let mut key_nonce: SessionKey =
384 vec![0; key_size + nonce_size].into();
385 let ad = [
386 0xd2, // Tag.
387 2, // Version.
388 sym_algo.into(),
389 aead.into(),
390 chunk_size.trailing_zeros() as u8 - 6,
391 ];
392 Backend::hkdf_sha256(session_key, Some(salt), &ad, &mut key_nonce)?;
393 let key = Vec::from(&key_nonce[..key_size]).into();
394 let nonce = Vec::from(&key_nonce[key_size..]).into();
395
396 Ok(Self {
397 key,
398 nonce,
399 ad,
400 nonce_len: aead.nonce_size()?,
401 })
402 }
403}
404
405impl<T> Schedule<T> for SEIPv2Schedule {
406 fn chunk(&self,
407 index: u64,
408 fun: &mut dyn FnMut(&SessionKey, &[u8], &[u8]) -> Result<T>)
409 -> Result<T>
410 {
411 // The nonce is the NONCE (NONCE_LEN - 8 bytes taken from the
412 // KDF) concatenated with the chunk index.
413 let index_be: [u8; 8] = index.to_be_bytes();
414 let mut nonce_store = [0u8; MAX_NONCE_LEN];
415 let nonce = &mut nonce_store[..self.nonce_len];
416 nonce[..self.nonce.len()].copy_from_slice(&self.nonce);
417 nonce[self.nonce.len()..].copy_from_slice(&index_be);
418
419 fun(&self.key, nonce, &self.ad)
420 }
421
422 fn finalizer(&self,
423 index: u64,
424 length: u64,
425 fun: &mut dyn FnMut(&SessionKey, &[u8], &[u8]) -> Result<T>)
426 -> Result<T>
427 {
428 // Prepare the associated data.
429 let mut ad = [0u8; Self::AD_PREFIX_LEN + 8];
430 ad[..Self::AD_PREFIX_LEN].copy_from_slice(&self.ad);
431 write_be_u64(&mut ad[Self::AD_PREFIX_LEN..], length);
432
433 // The nonce is the NONCE (NONCE_LEN - 8 bytes taken from the
434 // KDF) concatenated with the chunk index.
435 let index_be: [u8; 8] = index.to_be_bytes();
436 let mut nonce_store = [0u8; MAX_NONCE_LEN];
437 let nonce = &mut nonce_store[..self.nonce_len];
438 nonce[..self.nonce.len()].copy_from_slice(&self.nonce);
439 nonce[self.nonce.len()..].copy_from_slice(&index_be);
440
441 fun(&self.key, nonce, &ad)
442 }
443}
444
445/// A `Read`er for decrypting AEAD-encrypted data.
446pub(crate) struct InternalDecryptor<'a, 's> {
447 // The encrypted data.
448 source: Box<dyn BufferedReader<Cookie> + 'a>,
449
450 sym_algo: SymmetricAlgorithm,
451 aead: AEADAlgorithm,
452 schedule: Box<dyn Schedule<DecryptionContext> + 's>,
453
454 digest_size: usize,
455 chunk_size: usize,
456 chunk_index: u64,
457 bytes_decrypted: u64,
458 // Up to a chunk of unread data.
459 buffer: Vec<u8>,
460}
461assert_send_and_sync!(InternalDecryptor<'_, '_>);
462
463
464impl<'a, 's> InternalDecryptor<'a, 's> {
465 /// Instantiate a new AEAD decryptor.
466 ///
467 /// `source` is the source to wrap.
468 pub fn new<R, S>(sym_algo: SymmetricAlgorithm,
469 aead: AEADAlgorithm, chunk_size: usize,
470 schedule: S, source: R)
471 -> Result<Self>
472 where
473 R: BufferedReader<Cookie> + 'a,
474 S: Schedule<DecryptionContext> + 's,
475 {
476 Ok(InternalDecryptor {
477 source: source.into_boxed(),
478 sym_algo,
479 aead,
480 schedule: Box::new(schedule),
481 digest_size: aead.digest_size()?,
482 chunk_size,
483 chunk_index: 0,
484 bytes_decrypted: 0,
485 buffer: Vec::with_capacity(chunk_size),
486 })
487 }
488
489 // Note: this implementation tries *very* hard to make sure we don't
490 // gratuitiously do a short read. Specifically, if the return value
491 // is less than `plaintext.len()`, then it is either because we
492 // reached the end of the input or an error occurred.
493 fn read_helper(&mut self, plaintext: &mut [u8]) -> Result<usize> {
494 let mut pos = 0;
495
496 // 1. Copy any buffered data.
497 if !self.buffer.is_empty() {
498 let to_copy = cmp::min(self.buffer.len(), plaintext.len());
499 plaintext[..to_copy].copy_from_slice(&self.buffer[..to_copy]);
500 crate::vec_drain_prefix(&mut self.buffer, to_copy);
501
502 pos = to_copy;
503 if pos == plaintext.len() {
504 return Ok(pos);
505 }
506 }
507
508 // 2. Decrypt the data a chunk at a time until we've filled
509 // `plaintext`.
510 //
511 // Unfortunately, framing is hard.
512 //
513 // Recall: AEAD data is of the form:
514 //
515 // [ chunk1 ][ tag1 ] ... [ chunkN ][ tagN ][ tagF ]
516 //
517 // And, all chunks are the same size except for the last
518 // chunk, which may be shorter.
519 //
520 // The naive approach to decryption is to read a chunk and a
521 // tag at a time. Unfortunately, this may not work if the
522 // last chunk is a partial chunk.
523 //
524 // Assume that the chunk size is 32 bytes and the digest size
525 // is 16 bytes, and consider a message with 17 bytes of data.
526 // That message will be encrypted as follows:
527 //
528 // [ chunk1 ][ tag1 ][ tagF ]
529 // 17B 16B 16B
530 //
531 // If we read a chunk and a digest, we'll successfully read 48
532 // bytes of data. Unfortunately, we'll have over read: the
533 // last 15 bytes are from the final tag.
534 //
535 // To correctly handle this case, we have to make sure that
536 // there are at least a tag worth of bytes left over when we
537 // read a chunk and a tag.
538
539 let n_chunks
540 = (plaintext.len() - pos + self.chunk_size - 1) / self.chunk_size;
541 let chunk_digest_size = self.chunk_size + self.digest_size;
542 let final_digest_size = self.digest_size;
543
544 for _ in 0..n_chunks {
545 // Do a little dance to avoid exclusively locking
546 // `self.source`.
547 let to_read = chunk_digest_size + final_digest_size;
548 let result = {
549 match self.source.data(to_read) {
550 Ok(_) => Ok(self.source.buffer()),
551 Err(err) => Err(err),
552 }
553 };
554
555 let check_final_tag;
556 let chunk = match result {
557 Ok(chunk) => {
558 if chunk.is_empty() {
559 // Exhausted source.
560 return Ok(pos);
561 }
562
563 if chunk.len() < final_digest_size {
564 return Err(Error::ManipulatedMessage.into());
565 }
566
567 check_final_tag = chunk.len() < to_read;
568
569 // Return the chunk.
570 &chunk[..cmp::min(chunk.len(), to_read) - final_digest_size]
571 },
572 Err(e) => return Err(e.into()),
573 };
574
575 assert!(chunk.len() <= chunk_digest_size);
576
577 if chunk.is_empty() {
578 // There is nothing to decrypt: all that is left is
579 // the final tag.
580 } else if chunk.len() <= self.digest_size {
581 // A chunk has to include at least one byte and a tag.
582 return Err(Error::ManipulatedMessage.into());
583 } else {
584 let mut aead = self.schedule.chunk(
585 self.chunk_index,
586 &mut |key, iv, ad| {
587 self.aead.context(self.sym_algo, key, ad, iv)?
588 .for_decryption()
589 })?;
590
591 // Decrypt the chunk and check the tag.
592 let to_decrypt = chunk.len() - self.digest_size;
593
594 // If plaintext doesn't have enough room for the whole
595 // chunk, then we have to double buffer.
596 let double_buffer = to_decrypt > plaintext.len() - pos;
597 let buffer = if double_buffer {
598 self.buffer.resize(to_decrypt, 0);
599 &mut self.buffer[..]
600 } else {
601 &mut plaintext[pos..pos + to_decrypt]
602 };
603
604 aead.decrypt_verify(buffer, chunk)?;
605
606 if double_buffer {
607 let to_copy = plaintext.len() - pos;
608 assert!(0 < to_copy);
609 assert!(to_copy < self.chunk_size);
610
611 plaintext[pos..pos + to_copy]
612 .copy_from_slice(&self.buffer[..to_copy]);
613 crate::vec_drain_prefix(&mut self.buffer, to_copy);
614 pos += to_copy;
615 } else {
616 pos += to_decrypt;
617 }
618
619 // Increase index, update position in plaintext.
620 self.chunk_index += 1;
621 self.bytes_decrypted += to_decrypt as u64;
622
623 // Consume the data only on success so that we keep
624 // returning the error.
625 let chunk_len = chunk.len();
626 self.source.consume(chunk_len);
627 }
628
629 if check_final_tag {
630 // We read the whole ciphertext, now check the final digest.
631 let mut aead = self.schedule.finalizer(
632 self.chunk_index, self.bytes_decrypted,
633 &mut |key, iv, ad| {
634 self.aead.context(self.sym_algo, key, ad, iv)?
635 .for_decryption()
636 })?;
637
638 let final_digest = self.source.data(final_digest_size)?;
639
640 aead.decrypt_verify(&mut [], final_digest)?;
641
642 // Consume the data only on success so that we keep
643 // returning the error.
644 self.source.consume(final_digest_size);
645 break;
646 }
647 }
648
649 Ok(pos)
650 }
651}
652
653// Note: this implementation tries *very* hard to make sure we don't
654// gratuitiously do a short read. Specifically, if the return value
655// is less than `plaintext.len()`, then it is either because we
656// reached the end of the input or an error occurred.
657impl io::Read for InternalDecryptor<'_, '_> {
658 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
659 match self.read_helper(buf) {
660 Ok(n) => Ok(n),
661 Err(e) => match e.downcast::<io::Error>() {
662 // An io::Error. Pass as-is.
663 Ok(e) => Err(e),
664 // A failure. Wrap it.
665 Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
666 },
667 }
668 }
669}
670
671/// A `BufferedReader` that decrypts AEAD-encrypted data as it is
672/// read.
673pub struct Decryptor<'a, 's> {
674 reader: buffered_reader::Generic<InternalDecryptor<'a, 's>, Cookie>,
675}
676
677impl<'a, 's> Decryptor<'a, 's> {
678 /// Instantiate a new AEAD decryptor.
679 ///
680 /// `source` is the ciphertext to decrypt.
681 pub fn new<S>(symm: SymmetricAlgorithm,
682 aead: AEADAlgorithm,
683 chunk_size: usize,
684 schedule: S,
685 source: Box<dyn BufferedReader<Cookie> + 'a>)
686 -> Result<Self>
687 where
688 S: Schedule<DecryptionContext> + 's,
689 {
690 Self::with_cookie(symm, aead, chunk_size, schedule, source,
691 Default::default())
692 }
693
694 /// Like [`Decryptor::new`], but sets a cookie.
695 pub fn with_cookie<S>(symm: SymmetricAlgorithm,
696 aead: AEADAlgorithm,
697 chunk_size: usize,
698 schedule: S,
699 source: Box<dyn BufferedReader<Cookie> + 'a>,
700 cookie: Cookie)
701 -> Result<Self>
702 where
703 S: Schedule<DecryptionContext> + 's,
704 {
705 Ok(Decryptor {
706 reader: buffered_reader::Generic::with_cookie(
707 InternalDecryptor::new(
708 symm, aead, chunk_size, schedule, source)?,
709 None, cookie),
710 })
711 }
712}
713
714impl io::Read for Decryptor<'_, '_> {
715 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
716 self.reader.read(buf)
717 }
718}
719
720impl fmt::Display for Decryptor<'_, '_> {
721 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
722 write!(f, "Decryptor")
723 }
724}
725
726impl fmt::Debug for Decryptor<'_, '_> {
727 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
728 f.debug_struct("Decryptor")
729 .field("reader", &self.get_ref().unwrap())
730 .finish()
731 }
732}
733
734impl BufferedReader<Cookie> for Decryptor<'_, '_> {
735 fn buffer(&self) -> &[u8] {
736 self.reader.buffer()
737 }
738
739 fn data(&mut self, amount: usize) -> io::Result<&[u8]> {
740 self.reader.data(amount)
741 }
742
743 fn data_hard(&mut self, amount: usize) -> io::Result<&[u8]> {
744 self.reader.data_hard(amount)
745 }
746
747 fn data_eof(&mut self) -> io::Result<&[u8]> {
748 self.reader.data_eof()
749 }
750
751 fn consume(&mut self, amount: usize) -> &[u8] {
752 self.reader.consume(amount)
753 }
754
755 fn data_consume(&mut self, amount: usize)
756 -> io::Result<&[u8]> {
757 self.reader.data_consume(amount)
758 }
759
760 fn data_consume_hard(&mut self, amount: usize) -> io::Result<&[u8]> {
761 self.reader.data_consume_hard(amount)
762 }
763
764 fn read_be_u16(&mut self) -> io::Result<u16> {
765 self.reader.read_be_u16()
766 }
767
768 fn read_be_u32(&mut self) -> io::Result<u32> {
769 self.reader.read_be_u32()
770 }
771
772 fn steal(&mut self, amount: usize) -> io::Result<Vec<u8>> {
773 self.reader.steal(amount)
774 }
775
776 fn steal_eof(&mut self) -> io::Result<Vec<u8>> {
777 self.reader.steal_eof()
778 }
779
780 fn get_mut(&mut self) -> Option<&mut dyn BufferedReader<Cookie>> {
781 Some(&mut self.reader.reader_mut().source)
782 }
783
784 fn get_ref(&self) -> Option<&dyn BufferedReader<Cookie>> {
785 Some(&self.reader.reader_ref().source)
786 }
787
788 fn into_inner<'b>(self: Box<Self>)
789 -> Option<Box<dyn BufferedReader<Cookie> + 'b>> where Self: 'b {
790 Some(self.reader.into_reader().source.into_boxed())
791 }
792
793 fn cookie_set(&mut self, cookie: Cookie) -> Cookie {
794 self.reader.cookie_set(cookie)
795 }
796
797 fn cookie_ref(&self) -> &Cookie {
798 self.reader.cookie_ref()
799 }
800
801 fn cookie_mut(&mut self) -> &mut Cookie {
802 self.reader.cookie_mut()
803 }
804}
805
806/// A `Write`r for AEAD encrypting data.
807pub struct Encryptor<'s, W: io::Write> {
808 inner: Option<W>,
809
810 sym_algo: SymmetricAlgorithm,
811 aead: AEADAlgorithm,
812 schedule: Box<dyn Schedule<EncryptionContext> + 's>,
813
814 digest_size: usize,
815 chunk_size: usize,
816 chunk_index: u64,
817 bytes_encrypted: u64,
818 // Up to a chunk of unencrypted data.
819 buffer: Vec<u8>,
820
821 // A place to write encrypted data into.
822 scratch: Vec<u8>,
823}
824assert_send_and_sync!(Encryptor<'_, W> where W: io::Write);
825
826impl<'s, W: io::Write> Encryptor<'s, W> {
827 /// Instantiate a new AEAD encryptor.
828 pub fn new<S>(sym_algo: SymmetricAlgorithm, aead: AEADAlgorithm,
829 chunk_size: usize, schedule: S, sink: W)
830 -> Result<Self>
831 where
832 S: Schedule<EncryptionContext> + 's,
833 {
834 Ok(Encryptor {
835 inner: Some(sink),
836 sym_algo,
837 aead,
838 schedule: Box::new(schedule),
839 digest_size: aead.digest_size()?,
840 chunk_size,
841 chunk_index: 0,
842 bytes_encrypted: 0,
843 buffer: Vec::with_capacity(chunk_size),
844 scratch: vec![0; chunk_size + aead.digest_size()?],
845 })
846 }
847
848 // Like io::Write, but returns our Result.
849 fn write_helper(&mut self, mut buf: &[u8]) -> Result<usize> {
850 if self.inner.is_none() {
851 return Err(io::Error::new(io::ErrorKind::BrokenPipe,
852 "Inner writer was taken").into());
853 }
854 let amount = buf.len();
855
856 // First, fill the buffer if there is something in it.
857 if !self.buffer.is_empty() {
858 let n = cmp::min(buf.len(), self.chunk_size - self.buffer.len());
859 self.buffer.extend_from_slice(&buf[..n]);
860 assert!(self.buffer.len() <= self.chunk_size);
861 buf = &buf[n..];
862
863 // And possibly encrypt the chunk.
864 if self.buffer.len() == self.chunk_size {
865 let mut aead =
866 self.schedule.chunk(self.chunk_index, &mut |key, iv, ad| {
867 self.aead.context(self.sym_algo, key, ad, iv)?
868 .for_encryption()
869 })?;
870
871 let inner = self.inner.as_mut().unwrap();
872
873 // Encrypt the chunk.
874 aead.encrypt_seal(&mut self.scratch, &self.buffer)?;
875 self.bytes_encrypted += self.chunk_size as u64;
876 self.chunk_index += 1;
877 // XXX: clear plaintext buffer.
878 crate::vec_truncate(&mut self.buffer, 0);
879 inner.write_all(&self.scratch)?;
880 }
881 }
882
883 // Then, encrypt all whole chunks.
884 for chunk in buf.chunks(self.chunk_size) {
885 if chunk.len() == self.chunk_size {
886 // Complete chunk.
887 let mut aead =
888 self.schedule.chunk(self.chunk_index, &mut |key, iv, ad| {
889 self.aead.context(self.sym_algo, key, ad, iv)?
890 .for_encryption()
891 })?;
892
893 let inner = self.inner.as_mut().unwrap();
894
895 // Encrypt the chunk.
896 aead.encrypt_seal(&mut self.scratch, chunk)?;
897 self.bytes_encrypted += self.chunk_size as u64;
898 self.chunk_index += 1;
899 inner.write_all(&self.scratch)?;
900 } else {
901 // Stash for later.
902 assert!(self.buffer.is_empty());
903 self.buffer.extend_from_slice(chunk);
904 }
905 }
906
907 Ok(amount)
908 }
909
910 /// Finish encryption and write last partial block.
911 pub fn finalize(mut self) -> Result<W> {
912 self.finalize_intern()
913 }
914
915 /// Like [`Self::finalize`], but with a mutable reference.
916 ///
917 /// This can be used in [`Self::drop`], whereas [`Self::finalize`]
918 /// consumes self, and is convenient for callers because consuming
919 /// self makes Rust understand that any borrow on the writer
920 /// terminates.
921 fn finalize_intern(&mut self) -> Result<W> {
922 if let Some(mut inner) = self.inner.take() {
923 if !self.buffer.is_empty() {
924 let mut aead =
925 self.schedule.chunk(self.chunk_index, &mut |key, iv, ad| {
926 self.aead.context(self.sym_algo, key, ad, iv)?
927 .for_encryption()
928 })?;
929
930 // Encrypt the chunk.
931 unsafe {
932 // Safety: remaining data is less than the chunk
933 // size. The vector has capacity chunk size plus
934 // digest size.
935 debug_assert!(self.buffer.len() < self.chunk_size);
936 self.scratch.set_len(self.buffer.len() + self.digest_size)
937 }
938 aead.encrypt_seal(&mut self.scratch, &self.buffer)?;
939 self.bytes_encrypted += self.buffer.len() as u64;
940 self.chunk_index += 1;
941 // XXX: clear plaintext buffer
942 crate::vec_truncate(&mut self.buffer, 0);
943 inner.write_all(&self.scratch)?;
944 }
945
946 // Write final digest.
947 let mut aead = self.schedule.finalizer(
948 self.chunk_index, self.bytes_encrypted,
949 &mut |key, iv, ad| {
950 self.aead.context(self.sym_algo, key, ad, iv)?
951 .for_encryption()
952 })?;
953 debug_assert!(self.digest_size <= self.scratch.len());
954 aead.encrypt_seal(&mut self.scratch[..self.digest_size], b"")?;
955 inner.write_all(&self.scratch[..self.digest_size])?;
956
957 Ok(inner)
958 } else {
959 Err(io::Error::new(io::ErrorKind::BrokenPipe,
960 "Inner writer was taken").into())
961 }
962 }
963
964 /// Acquires a reference to the underlying writer.
965 pub(crate) fn get_ref(&self) -> Option<&W> {
966 self.inner.as_ref()
967 }
968
969 /// Acquires a mutable reference to the underlying writer.
970 #[allow(dead_code)]
971 pub(crate) fn get_mut(&mut self) -> Option<&mut W> {
972 self.inner.as_mut()
973 }
974}
975
976impl<W: io::Write> io::Write for Encryptor<'_, W> {
977 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
978 match self.write_helper(buf) {
979 Ok(n) => Ok(n),
980 Err(e) => match e.downcast::<io::Error>() {
981 // An io::Error. Pass as-is.
982 Ok(e) => Err(e),
983 // A failure. Wrap it.
984 Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
985 },
986 }
987 }
988
989 fn flush(&mut self) -> io::Result<()> {
990 // It is not clear how we can implement this, because we can
991 // only operate on chunk sizes. We will, however, ask our
992 // inner writer to flush.
993 if let Some(ref mut inner) = self.inner {
994 inner.flush()
995 } else {
996 Err(io::Error::new(io::ErrorKind::BrokenPipe,
997 "Inner writer was taken"))
998 }
999 }
1000}
1001
1002impl<W: io::Write> Drop for Encryptor<'_, W> {
1003 fn drop(&mut self) {
1004 // Unfortunately, we cannot handle errors here. If error
1005 // handling is a concern, call finalize() and properly handle
1006 // errors there.
1007 let _ = self.finalize_intern();
1008 }
1009}
1010
1011#[cfg(test)]
1012mod tests {
1013 use super::*;
1014 use std::io::{Read, Write};
1015
1016 /// This test tries to encrypt, then decrypt some data.
1017 #[test]
1018 fn roundtrip() {
1019 // EAX and OCB can be used with all symmetric algorithms using
1020 // a 16-byte block size.
1021 for sym_algo in [SymmetricAlgorithm::AES128,
1022 SymmetricAlgorithm::AES192,
1023 SymmetricAlgorithm::AES256,
1024 SymmetricAlgorithm::Twofish,
1025 SymmetricAlgorithm::Camellia128,
1026 SymmetricAlgorithm::Camellia192,
1027 SymmetricAlgorithm::Camellia256]
1028 .iter()
1029 .filter(|algo| algo.is_supported()) {
1030
1031 for aead in [
1032 AEADAlgorithm::EAX,
1033 AEADAlgorithm::OCB,
1034 AEADAlgorithm::GCM,
1035 ].iter().filter(|algo| {
1036 use crate::crypto::backend::{Backend, interface::Aead};
1037 Backend::supports_algo_with_symmetric(**algo, *sym_algo)
1038 }) {
1039 let chunk_size = 64;
1040 let mut key = vec![0; sym_algo.key_size().unwrap()];
1041 crate::crypto::random(&mut key).unwrap();
1042 let key: SessionKey = key.into();
1043 let mut iv = vec![0; aead.nonce_size().unwrap()];
1044 crate::crypto::random(&mut iv).unwrap();
1045
1046 let mut ciphertext = Vec::new();
1047 {
1048 let schedule = SEIPv2Schedule::new(
1049 &key,
1050 *sym_algo,
1051 *aead,
1052 chunk_size,
1053 &iv).expect("valid parameters");
1054 let mut encryptor = Encryptor::new(*sym_algo,
1055 *aead,
1056 chunk_size,
1057 schedule,
1058 &mut ciphertext)
1059 .unwrap();
1060
1061 encryptor.write_all(crate::tests::manifesto()).unwrap();
1062 }
1063
1064 let mut plaintext = Vec::new();
1065 {
1066 let cur = buffered_reader::Memory::with_cookie(
1067 &ciphertext, Default::default());
1068 let schedule = SEIPv2Schedule::new(
1069 &key,
1070 *sym_algo,
1071 *aead,
1072 chunk_size,
1073 &iv).expect("valid parameters");
1074 let mut decryptor = Decryptor::new(*sym_algo,
1075 *aead,
1076 chunk_size,
1077 schedule,
1078 cur.into_boxed())
1079 .unwrap();
1080
1081 decryptor.read_to_end(&mut plaintext).unwrap();
1082 }
1083
1084 assert_eq!(&plaintext[..], crate::tests::manifesto());
1085 }
1086 }
1087 }
1088}