1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
//! A library for parsing and generating ESP-IDF partition tables, both in the
//! binary and CSV formats as described in the ESP-IDF documentation.
//!
//! For additional information regarding the partition table format please refer
//! to the ESP-IDF documentation:  
//! <https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/partition-tables.html>
//!
//! ## Features
//!
//! There is currently only a single feature, `std`; this feature is enabled by
//! default.
//!
//! The following functionality is unavailable if the `std` feature is disabled:
//!
//! - (De)serializing a [PartitionTable] from/to CSV or binary format
//! - Writing a [Partition] to a CSV or binary writer
//!
//! ## Examples
//!
//! ```rust,ignore
//! // Read a partition table from a CSV file:
//! let csv = std::fs::read_to_string("partitions.csv").unwrap();
//! let table = PartitionTable::try_from_str(csv).unwrap();
//!
//! // Read a partition table from a binary file:
//! let bin = std::fs::read("partitions.bin").unwrap();
//! let table = PartitionTable::try_from_bytes(bin).unwrap();
//!
//! // Or, you can automatically determine which format is being passed:
//! let table = PartitionTable::try_from(csv).unwrap();
//! let table = PartitionTable::try_from(bin).unwrap();
//!
//! // You can find a partition by name, type, or subtype:
//! let foo = table.find("factory").unwrap();
//! let bar = table.find_by_type(Type::App).unwrap();
//! let baz = table.find_by_type(Type::Data, DataType::Ota).unwrap();
//! ```

#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(docsrs, feature(doc_cfg))]

use core::ops::Rem as _;
#[cfg(feature = "std")]
use std::io::Write as _;

#[cfg(feature = "std")]
use deku::prelude::DekuContainerRead as _;
use serde::{Deserialize, Serialize};

pub use self::{
    error::Error,
    partition::{AppType, DataType, Partition, SubType, Type},
};
#[cfg(feature = "std")]
use self::{
    hash_writer::HashWriter,
    partition::{DeserializedBinPartition, DeserializedCsvPartition},
};

mod error;
mod partition;

#[cfg(not(feature = "std"))]
type Vec<T> = heapless::Vec<T, PARTITION_SIZE>;

pub(crate) const MD5_NUM_MAGIC_BYTES: usize = 16;
#[cfg(feature = "std")]
const MD5_PART_MAGIC_BYTES: [u8; MD5_NUM_MAGIC_BYTES] = [
    0xEB, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
];
const PARTITION_SIZE: usize = 32;

/// A partition table; a collection of partitions
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct PartitionTable {
    partitions: Vec<Partition>,
}

impl PartitionTable {
    /// Construct a new partition table from zero or more partitions
    ///
    /// Note that in order for a partition table to pass validation, it must
    /// have at least one partition with type [`Type::App`].
    pub fn new(partitions: Vec<Partition>) -> Self {
        Self { partitions }
    }

    /// Attempt to parse either a binary or CSV partition table from the given
    /// input.
    ///
    /// For more information on the partition table format see:  
    /// <https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/partition-tables.html>
    #[cfg(feature = "std")]
    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
    pub fn try_from<D>(data: D) -> Result<Self, Error>
    where
        D: Into<Vec<u8>>,
    {
        let input: Vec<u8> = data.into();

        // If a partition table was detected from ESP-IDF (eg. using `esp-idf-sys`) then
        // it will be passed in its _binary_ form. Otherwise, it will be provided as a
        // CSV.
        if let Ok(part_table) = Self::try_from_bytes(&*input) {
            Ok(part_table)
        } else if let Ok(part_table) = Self::try_from_str(String::from_utf8(input)?) {
            Ok(part_table)
        } else {
            Err(Error::InvalidPartitionTable)
        }
    }

    /// Attempt to parse a binary partition table from the given bytes.
    ///
    /// For more information on the partition table format see:  
    /// <https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/partition-tables.html>
    #[cfg(feature = "std")]
    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
    pub fn try_from_bytes<B>(bytes: B) -> Result<Self, Error>
    where
        B: Into<Vec<u8>>,
    {
        const END_MARKER: [u8; 32] = [0xFF; 32];

        let data = bytes.into();

        // The data's MUST be an even multiple of 32
        if data.len() % 32 != 0 {
            return Err(Error::LengthNotMultipleOf32);
        }

        let mut ctx = md5::Context::new();

        let mut partitions = vec![];
        for line in data.chunks_exact(PARTITION_SIZE) {
            if line.starts_with(&MD5_PART_MAGIC_BYTES) {
                // The first 16 bytes are just the marker. The next 16 bytes is
                // the actual MD5 string.
                let digest_in_file = &line[16..32];
                let digest_computed = *ctx.clone().compute();

                if digest_computed != digest_in_file {
                    return Err(Error::InvalidChecksum {
                        expected: digest_in_file.to_vec(),
                        computed: digest_computed.to_vec(),
                    });
                }
            } else if line != END_MARKER {
                let (_, partition) = DeserializedBinPartition::from_bytes((line, 0))?;

                let partition = Partition::from(partition);
                partitions.push(partition);

                ctx.consume(line);
            } else {
                // We're finished parsing the binary data, time to construct and return the
                // [PartitionTable].
                let table = Self::new(partitions);
                table.validate()?;

                return Ok(table);
            }
        }

        Err(Error::NoEndMarker)
    }

    /// Attempt to parse a CSV partition table from the given string.
    ///
    /// For more information on the partition table format see:  
    /// <https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/partition-tables.html>
    #[cfg(feature = "std")]
    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
    pub fn try_from_str<S>(string: S) -> Result<Self, Error>
    where
        S: Into<String>,
    {
        let data = string.into();
        let mut reader = csv::ReaderBuilder::new()
            .comment(Some(b'#'))
            .flexible(true)
            .has_headers(false)
            .trim(csv::Trim::All)
            .from_reader(data.as_bytes());

        // Default offset is 0x8000 in ESP-IDF, partition table size is 0x1000
        let mut offset = 0x9000;

        let mut partitions = vec![];
        for record in reader.deserialize() {
            // Since offsets are optional, we need to update the deserialized
            // partition when this field is omitted
            let mut partition: DeserializedCsvPartition = record?;
            offset = partition.fix_offset(offset);

            let partition = Partition::from(partition);
            partitions.push(partition);
        }

        let table = Self::new(partitions);
        table.validate()?;

        Ok(table)
    }

    /// Return a reference to a vector containing each partition in the
    /// partition table
    pub fn partitions(&self) -> &Vec<Partition> {
        &self.partitions
    }

    /// Find a partition with the given name in the partition table
    pub fn find(&self, name: &str) -> Option<&Partition> {
        self.partitions.iter().find(|p| p.name() == name)
    }

    /// Find a partition with the given type in the partition table
    pub fn find_by_type(&self, ty: Type) -> Option<&Partition> {
        self.partitions.iter().find(|p| p.ty() == ty)
    }

    /// Find a partition with the given type and subtype in the partition table
    pub fn find_by_subtype(&self, ty: Type, subtype: SubType) -> Option<&Partition> {
        self.partitions
            .iter()
            .find(|p| p.ty() == ty && p.subtype() == subtype)
    }

    /// Convert a partition table to binary
    #[cfg(feature = "std")]
    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
    pub fn to_bin(&self) -> Result<Vec<u8>, Error> {
        const MAX_PARTITION_LENGTH: usize = 0xC00;
        const PARTITION_TABLE_SIZE: usize = 0x1000;

        let mut result = Vec::with_capacity(PARTITION_TABLE_SIZE);
        let mut hasher = HashWriter::new(&mut result);

        for partition in &self.partitions {
            partition.write_bin(&mut hasher)?;
        }

        let (writer, hash) = hasher.compute();

        writer.write_all(&MD5_PART_MAGIC_BYTES)?;
        writer.write_all(&hash.0)?;

        let written = self.partitions.len() * PARTITION_SIZE + 32;
        let padding = std::iter::repeat(0xFF)
            .take(MAX_PARTITION_LENGTH - written)
            .collect::<Vec<_>>();

        writer.write_all(&padding)?;

        Ok(result)
    }

    /// Convert a partition table to a CSV string
    #[cfg(feature = "std")]
    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
    pub fn to_csv(&self) -> Result<String, Error> {
        let mut csv = String::new();

        // We will use the same common "header" that is used in ESP-IDF
        csv.push_str("# ESP-IDF Partition Table\n");
        csv.push_str("# Name,Type,SubType,Offset,Size,Flags\n");

        // Serialize each partition using a [csv::Writer]
        let mut writer = csv::WriterBuilder::new()
            .has_headers(false)
            .from_writer(vec![]);

        for partition in &self.partitions {
            partition.write_csv(&mut writer)?;
        }

        // Append the serialized partitions to the header text, leaving us with our
        // completed CSV text
        csv.push_str(&String::from_utf8_lossy(&writer.into_inner().unwrap()));

        Ok(csv)
    }

    /// Validate a partition table
    pub fn validate(&self) -> Result<(), Error> {
        use self::partition::{APP_PARTITION_ALIGNMENT, DATA_PARTITION_ALIGNMENT};

        const MAX_PART_SIZE: u32 = 0x100_0000; // 16MB
        const OTADATA_SIZE: u32 = 0x2000; // 8kB

        // There must be at least one partition with type 'app'
        if self.find_by_type(Type::App).is_none() {
            return Err(Error::NoAppPartition);
        }

        // There can be at most one partition of type 'app' and of subtype 'factory'
        if self
            .partitions
            .iter()
            .filter(|p| p.ty() == Type::App && p.subtype() == SubType::App(AppType::Factory))
            .count()
            > 1
        {
            return Err(Error::MultipleFactoryPartitions);
        }

        for partition in &self.partitions {
            // Partitions of type 'app' have to be placed at offsets aligned to 0x10000
            // (64k)
            if partition.ty() == Type::App && partition.offset().rem(APP_PARTITION_ALIGNMENT) != 0 {
                return Err(Error::UnalignedPartition);
            }

            // Partitions of type 'data' have to be placed at offsets aligned to 0x1000 (4k)
            if partition.ty() == Type::Data && partition.offset().rem(DATA_PARTITION_ALIGNMENT) != 0
            {
                return Err(Error::UnalignedPartition);
            }

            // Partitions cannot exceed 16MB; see:
            // https://github.com/espressif/esp-idf/blob/c212305/components/bootloader_support/src/esp_image_format.c#L158-L161
            if partition.size() > MAX_PART_SIZE {
                return Err(Error::PartitionTooLarge(partition.name()));
            }
        }

        for partition_a in &self.partitions {
            for partition_b in &self.partitions {
                // Do not compare partitions with themselves :)
                if partition_a == partition_b {
                    continue;
                }

                // Partitions cannot have conflicting names
                if partition_a.name() == partition_b.name() {
                    return Err(Error::DuplicatePartitions(partition_a.name()));
                }

                // Partitions cannot overlap each other
                if partition_a.overlaps(partition_b) {
                    return Err(Error::OverlappingPartitions(
                        partition_a.name(),
                        partition_b.name(),
                    ));
                }
            }
        }

        // Check that otadata should be unique
        let ota_duplicates = self
            .partitions
            .iter()
            .filter(|p| p.ty() == Type::Data && p.subtype() == SubType::Data(DataType::Ota))
            .collect::<Vec<_>>();

        if ota_duplicates.len() > 1 {
            return Err(Error::MultipleOtadataPartitions);
        }

        if ota_duplicates.len() == 1 && ota_duplicates[0].size() != OTADATA_SIZE {
            return Err(Error::InvalidOtadataPartitionSize);
        }

        Ok(())
    }
}

#[cfg(feature = "std")]
mod hash_writer {
    use md5::{Context, Digest};

    pub(crate) struct HashWriter<W> {
        inner: W,
        hasher: Context,
    }

    impl<W> std::io::Write for HashWriter<W>
    where
        W: std::io::Write,
    {
        fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
            self.hasher.write_all(buf)?;
            self.inner.write(buf)
        }

        fn flush(&mut self) -> std::io::Result<()> {
            self.inner.flush()
        }
    }

    impl<W> HashWriter<W>
    where
        W: std::io::Write,
    {
        pub fn new(inner: W) -> Self {
            Self {
                inner,
                hasher: Context::new(),
            }
        }

        pub fn compute(self) -> (W, Digest) {
            (self.inner, self.hasher.compute())
        }
    }
}