fog_pack/
entry.rs

1//! Serialized data associated with a parent Document and key string.
2//!
3//! Entries are created by calling [`NewEntry::new`] with serializable data, the hash of the parent
4//! document, and the key string. Once created, they can be signed and have their compression
5//! settings chosen. Entries (new or otherwise) are verified and encoded using a
6//! [`Schema`][crate::schema::Schema], which should match the schema used by the parent document.
7
8use crate::error::{Error, Result};
9use crate::{
10    compress::CompressType,
11    de::FogDeserializer,
12    document::Document,
13    element::{serialize_elem, Element},
14    ser::FogSerializer,
15    MAX_ENTRY_SIZE,
16};
17use byteorder::{LittleEndian, ReadBytesExt};
18use fog_crypto::{
19    hash::{Hash, HashState},
20    identity::{Identity, IdentityKey},
21};
22use serde::{Deserialize, Serialize};
23use std::convert::TryFrom;
24
25pub(crate) const ENTRY_PREFIX_LEN: usize = 3;
26
27pub(crate) struct SplitEntry<'a> {
28    pub compress_raw: u8,
29    pub data: &'a [u8],
30    pub signature_raw: &'a [u8],
31}
32
33impl<'a> SplitEntry<'a> {
34    pub(crate) fn split(buf: &'a [u8]) -> Result<SplitEntry> {
35        // Compression marker
36        let (&compress_raw, mut buf) = buf.split_first().ok_or(Error::LengthTooShort {
37            step: "get compress type",
38            actual: 0,
39            expected: 1,
40        })?;
41        // Data length
42        let data_len = buf
43            .read_u16::<LittleEndian>()
44            .map_err(|_| Error::LengthTooShort {
45                step: "get data length",
46                actual: buf.len(),
47                expected: 2,
48            })? as usize;
49        if data_len > buf.len() {
50            return Err(Error::LengthTooShort {
51                step: "get document data",
52                actual: buf.len(),
53                expected: data_len,
54            });
55        }
56        // Data & signature
57        let (data, signature_raw) = buf.split_at(data_len);
58        Ok(Self {
59            compress_raw,
60            data,
61            signature_raw,
62        })
63    }
64}
65
66/// A reference triplet to an [`Entry`], containing the hash of the entry's parent document, the
67/// key string for the entry, and the hash of the entry itself. Note that the entry hash is still
68/// formed in a way the includes the parent & key, so changing either means the entry hash would
69/// also change.
70#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
71pub struct EntryRef {
72    /// Hash of the parent document
73    pub parent: Hash,
74    /// Key for the entry
75    pub key: String,
76    /// Hash of the entry itself
77    pub hash: Hash,
78}
79
80impl std::fmt::Display for EntryRef {
81    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82        write!(f, "{}-{}-{}", self.parent, self.key, self.hash)
83    }
84}
85
86#[derive(Clone, Debug)]
87struct EntryInner {
88    buf: Vec<u8>,
89    /// Working memory for hash calculations. Should only be created by signing or new(), and only
90    /// modified & read within signing operations.
91    hash_state: Option<HashState>,
92    id: EntryRef,
93    schema_hash: Hash,
94    signer: Option<Identity>,
95    set_compress: Option<Option<u8>>,
96}
97
98impl EntryInner {
99    fn data(&self) -> &[u8] {
100        SplitEntry::split(&self.buf).unwrap().data
101    }
102
103    /// Get the hash of the Entry's parent [`Document`][crate::document::Document].
104    fn parent(&self) -> &Hash {
105        &self.id.parent
106    }
107
108    /// Get the hash of the [`Schema`][crate::schema::Schema] of the Entry's parent
109    /// [`Document`][crate::document::Document].
110    fn schema_hash(&self) -> &Hash {
111        &self.schema_hash
112    }
113
114    /// Get the Entry's string key.
115    fn key(&self) -> &str {
116        &self.id.key
117    }
118
119    /// Get the Identity of the signer of this entry, if the entry is signed.
120    fn signer(&self) -> Option<&Identity> {
121        self.signer.as_ref()
122    }
123
124    /// Get the hash of the complete entry. This can change if the entry is signed again with the
125    /// [`sign`][Self::sign] function.
126    fn hash(&self) -> &Hash {
127        &self.id.hash
128    }
129
130    fn reference(&self) -> &EntryRef {
131        &self.id
132    }
133
134    /// Deserialize the entry's contained data into a value.
135    fn deserialize<'de, D: Deserialize<'de>>(&'de self) -> Result<D> {
136        let buf = self.data();
137        let mut de = FogDeserializer::new(buf);
138        D::deserialize(&mut de)
139    }
140
141    /// Override the default compression settings. `None` will disable compression. `Some(level)`
142    /// will compress with the provided level as the setting for the algorithm.
143    fn compression(&mut self, setting: Option<u8>) -> &mut Self {
144        self.set_compress = Some(setting);
145        self
146    }
147
148    /// Set up the hash state for an entry. The data passed in must not include the prefix bytes.
149    fn setup_hash_state(parent_hash: Hash, key: &str, data: &[u8]) -> HashState {
150        let mut hash_state = HashState::new();
151        let mut prefix = Vec::new();
152        serialize_elem(&mut prefix, Element::Hash(parent_hash));
153        serialize_elem(&mut prefix, Element::Str(key));
154        hash_state.update(&prefix);
155        hash_state.update(data);
156        hash_state
157    }
158
159    /// Sign the entry, or or replace the existing signature if one exists already. Fails if the
160    /// signature would grow the entry size beyond the maximum allowed. In the event of a failure.
161    /// the entry is dropped.
162    fn sign(mut self, key: &IdentityKey) -> Result<Self> {
163        // If a signature already exists, reload the hash state
164        let pre_sign_len = if self.signer.is_some() {
165            let split = SplitEntry::split(&self.buf).unwrap();
166            let new_len = split.data.len() + ENTRY_PREFIX_LEN;
167            self.hash_state = Some(Self::setup_hash_state(
168                self.id.parent.clone(),
169                &self.id.key,
170                split.data,
171            ));
172            new_len
173        } else {
174            self.buf.len()
175        };
176
177        // Load the hash state
178        if self.hash_state.is_none() {
179            let split = SplitEntry::split(&self.buf).unwrap();
180            let state = Self::setup_hash_state(self.id.parent.clone(), &self.id.key, split.data);
181            self.hash_state = Some(state);
182        }
183        let hash_state = self.hash_state.as_mut().unwrap();
184
185        // Hash state does not yet contain the signature - thus, it holds the hash we're going to
186        // sign
187        let entry_hash = hash_state.hash();
188
189        // Sign and check for size violation
190        let signature = key.sign(&entry_hash);
191        let new_len = pre_sign_len + signature.size();
192        if new_len > MAX_ENTRY_SIZE {
193            return Err(Error::LengthTooLong {
194                max: MAX_ENTRY_SIZE,
195                actual: self.buf.len(),
196            });
197        }
198
199        // Append the signature and update the hasher
200        self.buf.resize(pre_sign_len, 0);
201        signature.encode_vec(&mut self.buf);
202        hash_state.update(&self.buf[pre_sign_len..]);
203        self.id.hash = hash_state.hash();
204        self.signer = Some(key.id().clone());
205        Ok(self)
206    }
207
208    fn complete(self) -> (EntryRef, Vec<u8>, Option<Option<u8>>) {
209        (self.id, self.buf, self.set_compress)
210    }
211}
212
213/// A new Entry that has not yet been validated.
214///
215/// This struct acts like an Entry, but cannot be decoded until it has passed through a
216/// [`Schema`][crate::schema::Schema].
217#[derive(Clone, Debug)]
218pub struct NewEntry(EntryInner);
219
220impl NewEntry {
221    fn new_from<F>(key: &str, parent: &Document, encoder: F) -> Result<Self>
222    where
223        F: FnOnce(Vec<u8>) -> Result<Vec<u8>>,
224    {
225        // Serialize the data
226        let buf: Vec<u8> = vec![CompressType::None.into(), 0u8, 0u8];
227        let mut buf = encoder(buf)?;
228
229        // Check the total size and update the data length
230        if buf.len() > MAX_ENTRY_SIZE {
231            return Err(Error::LengthTooLong {
232                max: MAX_ENTRY_SIZE,
233                actual: buf.len(),
234            });
235        }
236        let data_len = (buf.len() - ENTRY_PREFIX_LEN).to_le_bytes();
237        buf[1] = data_len[0];
238        buf[2] = data_len[1];
239
240        // Create and update the Hash state
241        let hash_state =
242            EntryInner::setup_hash_state(parent.hash().clone(), key, &buf[ENTRY_PREFIX_LEN..]);
243        let this_hash = hash_state.hash();
244
245        let schema_hash = match parent.schema_hash() {
246            Some(h) => h.clone(),
247            None => {
248                return Err(Error::FailValidate(
249                    "Entries can only be created for documents that use a schema.".into(),
250                ))
251            }
252        };
253
254        Ok(Self(EntryInner {
255            buf,
256            hash_state: Some(hash_state),
257            id: EntryRef {
258                parent: parent.hash().clone(),
259                key: key.to_owned(),
260                hash: this_hash,
261            },
262            schema_hash,
263            signer: None,
264            set_compress: None,
265        }))
266    }
267
268    /// Create a new Entry from any serializable data, a key, and the Hash of the parent document.
269    pub fn new<S: Serialize>(key: &str, parent: &Document, data: S) -> Result<Self> {
270        Self::new_from(key, parent, |buf| {
271            // Serialize the data
272            let mut ser = FogSerializer::from_vec(buf, false);
273            data.serialize(&mut ser)?;
274            Ok(ser.finish())
275        })
276    }
277
278    /// Create a new Entry from a key, the Hash of the parent document, and any serializable data
279    /// whose keys are all ordered. For structs, this means all fields are declared in
280    /// lexicographic order. For maps, this means a `BTreeMap` type must be used, whose keys are
281    /// ordered such that they serialize to lexicographically ordered strings. All sub-structs and
282    /// sub-maps must be similarly ordered.
283    pub fn new_ordered<S: Serialize>(data: S, key: &str, parent: &Document) -> Result<Self> {
284        Self::new_from(key, parent, |buf| {
285            // Serialize the data
286            let mut ser = FogSerializer::from_vec(buf, true);
287            data.serialize(&mut ser)?;
288            Ok(ser.finish())
289        })
290    }
291
292    /// Override the default compression settings. `None` will disable compression. `Some(level)`
293    /// will compress with the provided level as the setting for the algorithm.
294    pub fn compression(mut self, setting: Option<u8>) -> Self {
295        self.0.compression(setting);
296        self
297    }
298
299    /// Sign the document, or or replace the existing signature if one exists already. Fails if the
300    /// signature would grow the document size beyond the maximum allowed.
301    pub fn sign(self, key: &IdentityKey) -> Result<Self> {
302        Ok(Self(self.0.sign(key)?))
303    }
304
305    /// Get what the document's hash will be, given its current state
306    pub fn hash(&self) -> &Hash {
307        self.0.hash()
308    }
309
310    pub(crate) fn data(&self) -> &[u8] {
311        self.0.data()
312    }
313
314    /// Get the hash of the Entry's parent [`Document`][crate::document::Document].
315    pub fn parent(&self) -> &Hash {
316        self.0.parent()
317    }
318
319    /// Get the hash of the [`Schema`][crate::schema::Schema] of the Entry's parent
320    /// [`Document`][crate::document::Document].
321    pub fn schema_hash(&self) -> &Hash {
322        self.0.schema_hash()
323    }
324
325    /// Get the Entry's string key.
326    pub fn key(&self) -> &str {
327        self.0.key()
328    }
329
330    /// Get a [`EntryRef`] containing a full reference to the entry.
331    pub fn reference(&self) -> &EntryRef {
332        self.0.reference()
333    }
334}
335
336/// Holds serialized data associated with a parent document and a key string.
337///
338/// An Entry holds a piece of serialized data, which may be deserialized by calling
339/// [`deserialize`][Entry::deserialize].
340#[derive(Clone, Debug)]
341pub struct Entry(EntryInner);
342
343impl Entry {
344    pub(crate) fn from_new(entry: NewEntry) -> Entry {
345        Self(entry.0)
346    }
347
348    pub(crate) fn trusted_new(
349        buf: Vec<u8>,
350        key: &str,
351        parent: &Document,
352        entry: &Hash,
353    ) -> Result<Self> {
354        if buf.len() > MAX_ENTRY_SIZE {
355            return Err(Error::LengthTooLong {
356                max: MAX_ENTRY_SIZE,
357                actual: buf.len(),
358            });
359        }
360
361        let split = SplitEntry::split(&buf)?;
362
363        let signer = if !split.signature_raw.is_empty() {
364            let unverified =
365                fog_crypto::identity::UnverifiedSignature::try_from(split.signature_raw)?;
366            Some(unverified.signer().clone())
367        } else {
368            None
369        };
370
371        let schema_hash = match parent.schema_hash() {
372            Some(h) => h.clone(),
373            None => {
374                return Err(Error::FailValidate(
375                    "Entries can only be created for documents that use a schema.".into(),
376                ))
377            }
378        };
379
380        Ok(Self(EntryInner {
381            buf,
382            hash_state: None,
383            id: EntryRef {
384                parent: parent.hash().to_owned(),
385                key: key.to_owned(),
386                hash: entry.to_owned(),
387            },
388            schema_hash,
389            signer,
390            set_compress: None,
391        }))
392    }
393
394    pub(crate) fn new(buf: Vec<u8>, key: &str, parent: &Document) -> Result<Self> {
395        if buf.len() > MAX_ENTRY_SIZE {
396            return Err(Error::LengthTooLong {
397                max: MAX_ENTRY_SIZE,
398                actual: buf.len(),
399            });
400        }
401
402        let split = SplitEntry::split(&buf)?;
403
404        let mut hash_state = EntryInner::setup_hash_state(parent.hash().clone(), key, split.data);
405        let entry_hash = hash_state.hash();
406        if !split.signature_raw.is_empty() {
407            hash_state.update(split.signature_raw);
408        }
409        let this_hash = hash_state.hash();
410
411        let signer = if !split.signature_raw.is_empty() {
412            let unverified =
413                fog_crypto::identity::UnverifiedSignature::try_from(split.signature_raw)?;
414            let verified = unverified.verify(&entry_hash)?;
415            Some(verified.signer().clone())
416        } else {
417            None
418        };
419
420        let schema_hash = match parent.schema_hash() {
421            Some(h) => h.clone(),
422            None => {
423                return Err(Error::FailValidate(
424                    "Entries can only be created for documents that use a schema.".into(),
425                ))
426            }
427        };
428
429        Ok(Self(EntryInner {
430            buf,
431            hash_state: Some(hash_state),
432            id: EntryRef {
433                parent: parent.hash().to_owned(),
434                key: key.to_owned(),
435                hash: this_hash,
436            },
437            schema_hash,
438            signer,
439            set_compress: None,
440        }))
441    }
442
443    pub(crate) fn data(&self) -> &[u8] {
444        self.0.data()
445    }
446
447    /// Find all hashes in this entry and return them.
448    pub fn find_hashes(&self) -> Vec<Hash> {
449        crate::find_hashes(self.data())
450    }
451
452    /// Get the hash of the Entry's parent [`Document`][crate::document::Document].
453    pub fn parent(&self) -> &Hash {
454        self.0.parent()
455    }
456
457    /// Get the hash of the [`Schema`][crate::schema::Schema] of the Entry's parent
458    /// [`Document`][crate::document::Document].
459    pub fn schema_hash(&self) -> &Hash {
460        self.0.schema_hash()
461    }
462
463    /// Get the Entry's string key.
464    pub fn key(&self) -> &str {
465        self.0.key()
466    }
467
468    /// Get a [`EntryRef`] containing a full reference to the entry.
469    pub fn reference(&self) -> &EntryRef {
470        self.0.reference()
471    }
472
473    /// Get the Identity of the signer of this entry, if the entry is signed.
474    pub fn signer(&self) -> Option<&Identity> {
475        self.0.signer()
476    }
477
478    /// Get the hash of the complete entry. This can change if the entry is signed again with the
479    /// [`sign`][Self::sign] function.
480    pub fn hash(&self) -> &Hash {
481        self.0.hash()
482    }
483
484    /// Deserialize the entry's contained data into a value.
485    pub fn deserialize<'de, D: Deserialize<'de>>(&'de self) -> Result<D> {
486        self.0.deserialize()
487    }
488
489    /// Override the default compression settings. `None` will disable compression. `Some(level)`
490    /// will compress with the provided level as the setting for the algorithm.
491    pub fn compression(mut self, setting: Option<u8>) -> Self {
492        self.0.compression(setting);
493        self
494    }
495
496    /// Sign the entry, or or replace the existing signature if one exists already. Fails if the
497    /// signature would grow the entry size beyond the maximum allowed. In the event of a failure.
498    /// the entry is unmodified.
499    pub fn sign(self, key: &IdentityKey) -> Result<Self> {
500        Ok(Self(self.0.sign(key)?))
501    }
502
503    pub(crate) fn complete(self) -> (EntryRef, Vec<u8>, Option<Option<u8>>) {
504        self.0.complete()
505    }
506}