sequoia_openpgp/crypto/
mem.rs

1//! Memory protection and encryption.
2//!
3//! Sequoia makes an effort to protect secrets stored in memory.  Even
4//! though a process's memory should be protected from being read by an
5//! adversary, there may be bugs in the program or the architecture
6//! the program is running on that allow (partial) recovery of data.
7//! Or, the process may be serialized to persistent storage, and its
8//! memory may be inspected while it is not running.
9//!
10//! To reduce the window for these kind of exfiltrations, we use
11//! [`Protected`] to clear the memory once it is no longer in use, and
12//! [`Encrypted`] to protect long-term secrets like passwords and
13//! secret keys.
14//!
15//!
16//! Furthermore, operations involving secrets must be carried out in a
17//! way that avoids leaking information.  For example, comparison
18//! must be done in constant time with [`secure_cmp`].
19//!
20//!   [`secure_cmp`]: secure_cmp()
21
22use std::cmp::{min, Ordering};
23use std::fmt;
24use std::hash::{Hash, Hasher};
25use std::ops::{Deref, DerefMut};
26
27/// Whether to trace execution by default (on stderr).
28const TRACE: bool = false;
29
30/// Protected memory.
31///
32/// The memory is guaranteed not to be copied around, and is cleared
33/// when the object is dropped.
34///
35/// # Examples
36///
37/// ```rust
38/// use sequoia_openpgp::crypto::mem::Protected;
39///
40/// {
41///     let p: Protected = vec![0, 1, 2].into();
42///     assert_eq!(p.as_ref(), &[0, 1, 2]);
43/// }
44///
45/// // p is cleared once it goes out of scope.
46/// ```
47// # Note on the implementation
48//
49// We use a boxed slice, then Box::leak the Box.  This takes the
50// knowledge about the shape of the heap allocation away from Rust,
51// preventing any optimization based on that.
52//
53// For example, Rust could conceivably compact the heap: The borrow
54// checker knows when no references exist, and this is an excellent
55// opportunity to move the object on the heap because only one pointer
56// needs to be updated.
57pub struct Protected(*mut [u8]);
58
59// Safety: Box<[u8]> is Send and Sync, we do not expose any
60// functionality that was not possible before, hence Protected may
61// still be Send and Sync.
62unsafe impl Send for Protected {}
63unsafe impl Sync for Protected {}
64
65impl Clone for Protected {
66    fn clone(&self) -> Self {
67        // Make a vector with the correct size to avoid potential
68        // reallocations when turning it into a `Protected`.
69        let mut p = Vec::with_capacity(self.len());
70        p.extend_from_slice(self);
71        p.into_boxed_slice().into()
72    }
73}
74
75impl PartialEq for Protected {
76    fn eq(&self, other: &Self) -> bool {
77        secure_cmp(self, other) == Ordering::Equal
78    }
79}
80
81impl Eq for Protected {}
82
83impl Hash for Protected {
84    fn hash<H: Hasher>(&self, state: &mut H) {
85        self.as_ref().hash(state);
86    }
87}
88
89impl Protected {
90    /// Allocates a chunk of protected memory.
91    ///
92    /// Effective protection of sensitive values requires avoiding any
93    /// copying and reallocations.  Therefore, it is required to
94    /// provide the size upfront at allocation time, then copying the
95    /// secrets into this protected memory region.
96    pub fn new(size: usize) -> Protected {
97        vec![0; size].into_boxed_slice().into()
98    }
99
100    /// Converts to a buffer for modification.
101    ///
102    /// Don't expose `Protected` values unless you know what you're doing.
103    pub(crate) fn expose_into_unprotected_vec(self) -> Vec<u8> {
104        let mut p = Vec::with_capacity(self.len());
105        p.extend_from_slice(&self);
106        p
107    }
108}
109
110impl Deref for Protected {
111    type Target = [u8];
112
113    fn deref(&self) -> &Self::Target {
114        self.as_ref()
115    }
116}
117
118impl AsRef<[u8]> for Protected {
119    fn as_ref(&self) -> &[u8] {
120        unsafe { &*self.0 }
121    }
122}
123
124impl AsMut<[u8]> for Protected {
125    fn as_mut(&mut self) -> &mut [u8] {
126        unsafe { &mut *self.0 }
127    }
128}
129
130impl DerefMut for Protected {
131    fn deref_mut(&mut self) -> &mut [u8] {
132        self.as_mut()
133    }
134}
135
136impl From<Vec<u8>> for Protected {
137    fn from(mut v: Vec<u8>) -> Self {
138        // Make a careful copy of the data.  We do this instead of
139        // reusing v's allocation so that our allocation has the exact
140        // size.
141        let p = Protected::from(&v[..]);
142
143        // Now clear the previous allocation.  Just to be safe, we
144        // clear the whole allocation.
145        let capacity = v.capacity();
146        unsafe {
147            // Safety: New size is equal to the capacity, and we
148            // initialize all elements.
149            v.set_len(capacity);
150            memsec::memzero(v.as_mut_ptr(), capacity);
151        }
152
153        p
154    }
155}
156
157/// Zeros N bytes on the stack after running the given closure.
158///
159/// Note: In general, don't use this function directly, use the more
160/// convenient and robust macro zero_stack! instead, like so:
161///
162/// ```ignore
163/// zero_stack!(128 bytes after running {
164///     let mut a = [0; 6];
165///     a.copy_from_slice(b"secret");
166/// })
167/// ```
168///
169/// Or, if you need to specify the type of the expression:
170///
171/// ```ignore
172/// zero_stack!(128 bytes after running || -> () {
173///     let mut a = [0; 6];
174///     a.copy_from_slice(b"secret");
175/// })
176/// ```
177///
178/// If you must use this function directly, make sure to declare `fun`
179/// as `#[inline(never)]`.
180#[allow(dead_code)]
181#[inline(never)]
182pub(crate) fn zero_stack_after<const N: usize, T>(fun: impl FnOnce() -> T) -> T
183{
184    zero_stack::<N, T>(fun())
185}
186
187/// Zeros N bytes on the stack, returning the given value.
188///
189/// Note: In general, don't use this function directly.  This is only
190/// effective if `v` has been computed by a function that has been
191/// marked as `#[inline(never)]`.  However, since the inline attribute
192/// is only a hint that may be freely ignored by the compiler, it is
193/// sometimes necessary to use this function directly.
194#[allow(dead_code)]
195#[inline(never)]
196pub(crate) fn zero_stack<const N: usize, T>(v: T) -> T {
197    tracer!(TRACE, "zero_stack");
198    let mut a = [0xffu8; N];
199    t!("zeroing {:?}..{:?}", a.as_ptr(), unsafe { a.as_ptr().offset(N as _) });
200    unsafe {
201        memsec::memzero(a.as_mut_ptr(), a.len());
202    }
203    std::hint::black_box(a);
204    v
205}
206
207/// Very carefully copies the slice.
208///
209/// The obvious `to.copy_from_slice(from);` indeed leaks secrets.
210pub(crate) fn careful_memcpy(from: &[u8], to: &mut [u8]) {
211    from.iter().zip(to.iter_mut()).for_each(|(f, t)| *t = *f);
212}
213
214impl From<Box<[u8]>> for Protected {
215    fn from(v: Box<[u8]>) -> Self {
216        Protected(Box::leak(v))
217    }
218}
219
220impl From<&[u8]> for Protected {
221    fn from(v: &[u8]) -> Self {
222        let mut p = Protected::new(v.len());
223        careful_memcpy(v, &mut p);
224        p
225    }
226}
227
228impl<const N: usize> From<[u8; N]> for Protected {
229    fn from(mut v: [u8; N]) -> Self {
230        let mut p = Protected::new(v.len());
231        careful_memcpy(&v, &mut p);
232        unsafe {
233            memsec::memzero(v.as_mut_ptr(), v.len());
234        }
235        p
236    }
237}
238
239impl Drop for Protected {
240    fn drop(&mut self) {
241        unsafe {
242            let len = self.len();
243            memsec::memzero(self.as_mut().as_mut_ptr(), len);
244            drop(Box::from_raw(self.0));
245        }
246    }
247}
248
249impl fmt::Debug for Protected {
250    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
251        if cfg!(debug_assertions) {
252            write!(f, "{:?}", self.0)
253        } else {
254            f.write_str("[<Redacted>]")
255        }
256    }
257}
258
259/// Encrypted memory.
260///
261/// This type encrypts sensitive data, such as secret keys, in memory
262/// while they are unused, and decrypts them on demand.  This protects
263/// against cross-protection-boundary readout via microarchitectural
264/// flaws like Spectre or Meltdown, via attacks on physical layout
265/// like Rowbleed, and even via coldboot attacks.
266///
267/// The key insight is that these kinds of attacks are imperfect,
268/// i.e. the recovered data contains bitflips, or the attack only
269/// provides a probability for any given bit.  Applied to
270/// cryptographic keys, these kind of imperfect attacks are enough to
271/// recover the actual key.
272///
273/// This implementation on the other hand, derives a sealing key from
274/// a large area of memory, the "pre-key", using a key derivation
275/// function.  Now, any single bitflip in the readout of the pre-key
276/// will avalanche through all the bits in the sealing key, rendering
277/// it unusable with no indication of where the error occurred.
278///
279/// This kind of protection was pioneered by OpenSSH.  The commit
280/// adding it can be found
281/// [here](https://marc.info/?l=openbsd-cvs&m=156109087822676).
282///
283/// # Examples
284///
285/// ```rust
286/// # fn main() -> sequoia_openpgp::Result<()> {
287/// use sequoia_openpgp::crypto::mem::Encrypted;
288///
289/// let e = Encrypted::new(vec![0, 1, 2].into())?;
290/// e.map(|p| {
291///     // e is temporarily decrypted and made available to the closure.
292///     assert_eq!(p.as_ref(), &[0, 1, 2]);
293///     // p is cleared once the function returns.
294/// });
295/// # Ok(()) }
296/// ```
297#[derive(Clone, Debug)]
298pub struct Encrypted {
299    ciphertext: Protected,
300    salt: [u8; 32],
301    plaintext_len: usize,
302}
303assert_send_and_sync!(Encrypted);
304
305impl PartialEq for Encrypted {
306    fn eq(&self, other: &Self) -> bool {
307        // Protected::eq is time-constant.
308        self.map(|a| other.map(|b| a == b))
309    }
310}
311
312impl Eq for Encrypted {}
313
314impl Hash for Encrypted {
315    fn hash<H: Hasher>(&self, state: &mut H) {
316        self.map(|k| Hash::hash(k, state));
317    }
318}
319
320/// Opt out of memory encryption.
321const DANGER_DISABLE_ENCRYPTED_MEMORY: bool = false;
322
323/// The number of pages containing random bytes to derive the prekey
324/// from.
325const ENCRYPTED_MEMORY_PREKEY_PAGES: usize = 4;
326
327/// Page size.
328const ENCRYPTED_MEMORY_PAGE_SIZE: usize = 4096;
329
330/// This module contains the code that needs to access the prekey.
331///
332/// Code outside of it cannot access it, because `PREKEY` is private.
333mod has_access_to_prekey {
334    use crate::Result;
335    use crate::types::{AEADAlgorithm, HashAlgorithm, SymmetricAlgorithm};
336    use crate::crypto::{aead, SessionKey};
337    use super::*;
338
339    /// Returns the pre-key.
340    ///
341    /// Access to this function is restricted to this module and its
342    /// descendants.
343    fn prekey() -> Result<&'static Box<[Box<[u8]>]>> {
344        use std::sync::OnceLock;
345
346        static PREKEY: OnceLock<Result<Box<[Box<[u8]>]>>>
347            = OnceLock::new();
348        PREKEY.get_or_init(|| -> Result<Box<[Box<[u8]>]>> {
349            let mut pages = Vec::new();
350            for _ in 0..ENCRYPTED_MEMORY_PREKEY_PAGES {
351                let mut page = vec![0; ENCRYPTED_MEMORY_PAGE_SIZE];
352                crate::crypto::random(&mut page)?;
353                pages.push(page.into());
354            }
355            Ok(pages.into())
356        }).as_ref().map_err(|e| anyhow::anyhow!("{}", e))
357    }
358
359    // Algorithms used for the memory encryption.
360    //
361    // The digest of the hash algorithm must be at least as large as
362    // the size of the key used by the symmetric algorithm.  All
363    // algorithms MUST be supported by the cryptographic library.
364    const HASH_ALGO: HashAlgorithm = HashAlgorithm::SHA256;
365    const SYMMETRIC_ALGO: SymmetricAlgorithm = SymmetricAlgorithm::AES256;
366
367    impl Encrypted {
368        /// Computes the sealing key used to encrypt the memory.
369        fn sealing_key(salt: &[u8; 32]) -> Result<SessionKey> {
370            let mut ctx = HASH_ALGO.context()
371                .expect("Mandatory algorithm unsupported")
372                .for_digest();
373            ctx.update(salt);
374            prekey()?
375                .iter().for_each(|page| ctx.update(page));
376            let mut sk: SessionKey = Protected::new(256/8).into();
377            let _ = ctx.digest(&mut sk);
378            Ok(sk)
379        }
380
381        /// Returns a zero nonce.
382        ///
383        /// The key is unique to every memory object, and we don't do
384        /// chunking.  The nonce is zero.
385        fn nonce(aead_algo: AEADAlgorithm) -> &'static [u8] {
386            const NONCE_STORE: [u8; aead::MAX_NONCE_LEN] =
387                [0u8; aead::MAX_NONCE_LEN];
388            let nonce_len = aead_algo.nonce_size()
389                .expect("Mandatory algorithm unsupported");
390            debug_assert!(nonce_len >= 8 && nonce_len <= aead::MAX_NONCE_LEN);
391            &NONCE_STORE[..nonce_len]
392        }
393
394        /// Encrypts the given chunk of memory.
395        pub fn new(p: Protected) -> Result<Self> {
396            if DANGER_DISABLE_ENCRYPTED_MEMORY {
397                return Ok(Encrypted {
398                    plaintext_len: p.len(),
399                    ciphertext: p,
400                    salt: Default::default(),
401                });
402            }
403
404            let aead_algo = AEADAlgorithm::default();
405            let mut salt = [0; 32];
406            crate::crypto::random(&mut salt)?;
407            let mut ciphertext = Protected::new(
408                p.len() + aead_algo.digest_size().expect("supported"));
409
410            aead_algo.context(SYMMETRIC_ALGO,
411                              &Self::sealing_key(&salt)?,
412                              &[],
413                              Self::nonce(aead_algo))?
414                .for_encryption()?
415                .encrypt_seal(&mut ciphertext, &p)?;
416
417            Ok(Encrypted {
418                plaintext_len: p.len(),
419                ciphertext,
420                salt,
421            })
422        }
423
424        /// Maps the given function over the temporarily decrypted
425        /// memory.
426        pub fn map<F, T>(&self, mut fun: F) -> T
427            where F: FnMut(&Protected) -> T
428        {
429            if DANGER_DISABLE_ENCRYPTED_MEMORY {
430                return fun(&self.ciphertext);
431            }
432
433            let aead_algo = AEADAlgorithm::default();
434            let mut plaintext = Protected::new(self.plaintext_len);
435
436            let r = aead_algo.context(SYMMETRIC_ALGO,
437                              &Self::sealing_key(&self.salt).unwrap(),
438                              &[],
439                              Self::nonce(aead_algo)).unwrap()
440                .for_decryption().unwrap()
441                .decrypt_verify(&mut plaintext, &self.ciphertext);
442
443            // Be careful not to leak partially decrypted plain text.
444            if r.is_err() {
445                drop(plaintext); // Securely erase partial plaintext.
446                panic!("Encrypted memory modified or corrupted");
447            }
448            fun(&plaintext)
449        }
450    }
451}
452
453/// Time-constant comparison.
454pub fn secure_cmp(a: &[u8], b: &[u8]) -> Ordering {
455    let ord1 = a.len().cmp(&b.len());
456    let ord2 = unsafe {
457        memsec::memcmp(a.as_ptr(), b.as_ptr(), min(a.len(), b.len()))
458    };
459    let ord2 = match ord2 {
460        1..=std::i32::MAX => Ordering::Greater,
461        0 => Ordering::Equal,
462        std::i32::MIN..=-1 => Ordering::Less,
463    };
464
465    if ord1 == Ordering::Equal { ord2 } else { ord1 }
466}