1use std::{io::Write as _, ops::Rem as _};
40
41use deku::prelude::DekuContainerRead as _;
42use serde::{Deserialize, Serialize};
43
44pub use self::{
45 error::Error,
46 partition::{AppType, DataType, Flags, Partition, SubType, Type},
47};
48use self::{
49 hash_writer::HashWriter,
50 partition::{DeserializedBinPartition, DeserializedCsvPartition},
51};
52
53mod error;
54mod partition;
55
56pub(crate) const MD5_NUM_MAGIC_BYTES: usize = 16;
57const MD5_PART_MAGIC_BYTES: [u8; MD5_NUM_MAGIC_BYTES] = [
58 0xEB, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
59];
60const PARTITION_SIZE: usize = 32;
61
62#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
64pub struct PartitionTable {
65 partitions: Vec<Partition>,
66}
67
68impl PartitionTable {
69 pub fn new(partitions: Vec<Partition>) -> Self {
74 Self { partitions }
75 }
76
77 pub fn try_from<D>(data: D) -> Result<Self, Error>
83 where
84 D: Into<Vec<u8>>,
85 {
86 let input: Vec<u8> = data.into();
87
88 if input[..2] == [0xAA, 0x50] {
92 Self::try_from_bytes(&*input)
93 } else {
94 Self::try_from_str(String::from_utf8(input)?)
95 }
96 }
97
98 pub fn try_from_bytes<B>(bytes: B) -> Result<Self, Error>
103 where
104 B: Into<Vec<u8>>,
105 {
106 use md5::Digest;
107
108 const END_MARKER: [u8; 32] = [0xFF; 32];
109
110 let data = bytes.into();
111
112 if data.len() % 32 != 0 {
114 return Err(Error::LengthNotMultipleOf32);
115 }
116
117 let mut ctx = md5::Md5::new();
118
119 let mut partitions = vec![];
120 for line in data.chunks_exact(PARTITION_SIZE) {
121 if line.starts_with(&MD5_PART_MAGIC_BYTES) {
122 let digest_in_file = &line[16..32];
125 let digest_computed = ctx.clone().finalize();
126
127 if digest_computed.as_slice() != digest_in_file {
128 return Err(Error::InvalidChecksum {
129 expected: digest_in_file.to_vec(),
130 computed: digest_computed.to_vec(),
131 });
132 }
133 } else if line != END_MARKER {
134 let (_, partition) = DeserializedBinPartition::from_bytes((line, 0))?;
135
136 let partition = Partition::from(partition);
137 partitions.push(partition);
138
139 ctx.update(line);
140 } else {
141 let table = Self::new(partitions);
144 table.validate()?;
145
146 return Ok(table);
147 }
148 }
149
150 Err(Error::NoEndMarker)
151 }
152
153 pub fn try_from_str<S>(string: S) -> Result<Self, Error>
158 where
159 S: Into<String>,
160 {
161 let data = string.into();
162 let mut reader = csv::ReaderBuilder::new()
163 .comment(Some(b'#'))
164 .flexible(true)
165 .has_headers(false)
166 .trim(csv::Trim::All)
167 .from_reader(data.as_bytes());
168
169 let mut offset = 0x9000;
171
172 let mut partitions = vec![];
173 for record in reader.deserialize() {
174 let mut partition: DeserializedCsvPartition = record?;
177 offset = partition.fix_offset(offset);
178
179 let partition = Partition::from(partition);
180 partitions.push(partition);
181 }
182
183 let table = Self::new(partitions);
184 table.validate()?;
185
186 Ok(table)
187 }
188
189 pub fn partitions(&self) -> &Vec<Partition> {
192 &self.partitions
193 }
194
195 pub fn find(&self, name: &str) -> Option<&Partition> {
197 self.partitions.iter().find(|p| p.name() == name)
198 }
199
200 pub fn find_by_type(&self, ty: Type) -> Option<&Partition> {
202 self.partitions.iter().find(|p| p.ty() == ty)
203 }
204
205 pub fn find_by_subtype(&self, ty: Type, subtype: SubType) -> Option<&Partition> {
207 self.partitions
208 .iter()
209 .find(|p| p.ty() == ty && p.subtype() == subtype)
210 }
211
212 pub fn to_bin(&self) -> Result<Vec<u8>, Error> {
214 const MAX_PARTITION_LENGTH: usize = 0xC00;
215 const PARTITION_TABLE_SIZE: usize = 0x1000;
216
217 let mut result = Vec::with_capacity(PARTITION_TABLE_SIZE);
218 let mut hasher = HashWriter::new(&mut result);
219
220 for partition in &self.partitions {
221 partition.write_bin(&mut hasher)?;
222 }
223
224 let (writer, hash) = hasher.compute();
225
226 writer.write_all(&MD5_PART_MAGIC_BYTES)?;
227 writer.write_all(hash.as_slice())?;
228
229 let written = self.partitions.len() * PARTITION_SIZE + 32;
230 let padding = std::iter::repeat_n(0xFF, MAX_PARTITION_LENGTH - written).collect::<Vec<_>>();
231
232 writer.write_all(&padding)?;
233
234 Ok(result)
235 }
236
237 pub fn to_csv(&self) -> Result<String, Error> {
239 let mut csv = String::new();
240
241 csv.push_str("# ESP-IDF Partition Table\n");
243 csv.push_str("# Name,Type,SubType,Offset,Size,Flags\n");
244
245 let mut writer = csv::WriterBuilder::new()
247 .has_headers(false)
248 .from_writer(vec![]);
249
250 for partition in &self.partitions {
251 partition.write_csv(&mut writer)?;
252 }
253
254 csv.push_str(&String::from_utf8_lossy(&writer.into_inner().unwrap()));
257
258 Ok(csv)
259 }
260
261 pub fn validate(&self) -> Result<(), Error> {
263 use self::partition::{APP_PARTITION_ALIGNMENT, DATA_PARTITION_ALIGNMENT};
264
265 const MAX_APP_PART_SIZE: u32 = 0x100_0000; const OTADATA_SIZE: u32 = 0x2000; if self.find_by_type(Type::App).is_none() {
270 return Err(Error::NoAppPartition);
271 }
272
273 if self
275 .partitions
276 .iter()
277 .filter(|p| p.ty() == Type::App && p.subtype() == SubType::App(AppType::Factory))
278 .count()
279 > 1
280 {
281 return Err(Error::MultipleFactoryPartitions);
282 }
283
284 if self
286 .partitions
287 .iter()
288 .filter(|p| p.ty() == Type::Data && p.subtype() == SubType::Data(DataType::Ota))
289 .count()
290 > 1
291 {
292 return Err(Error::MultipleOtadataPartitions);
293 }
294
295 for partition in &self.partitions {
296 if partition.ty() == Type::App && partition.offset().rem(APP_PARTITION_ALIGNMENT) != 0 {
299 return Err(Error::UnalignedPartition);
300 }
301
302 if partition.ty() == Type::Data && partition.offset().rem(DATA_PARTITION_ALIGNMENT) != 0
304 {
305 return Err(Error::UnalignedPartition);
306 }
307
308 if partition.ty() == Type::App && partition.size() > MAX_APP_PART_SIZE {
311 return Err(Error::PartitionTooLarge(partition.name()));
312 }
313
314 if partition.ty() == Type::Data
315 && partition.subtype() == SubType::Data(DataType::Ota)
316 && partition.size() != OTADATA_SIZE
317 {
318 return Err(Error::InvalidOtadataPartitionSize);
319 }
320 }
321
322 for partition_a in &self.partitions {
323 for partition_b in &self.partitions {
324 if partition_a == partition_b {
326 continue;
327 }
328
329 if partition_a.name() == partition_b.name() {
331 return Err(Error::DuplicatePartitions(partition_a.name()));
332 }
333
334 if partition_a.overlaps(partition_b) {
336 return Err(Error::OverlappingPartitions(
337 partition_a.name(),
338 partition_b.name(),
339 ));
340 }
341 }
342 }
343
344 Ok(())
345 }
346}
347
348mod hash_writer {
349 use md5::{
350 Digest,
351 Md5,
352 digest::{consts::U16, generic_array::GenericArray},
353 };
354
355 pub(crate) struct HashWriter<W> {
356 inner: W,
357 hasher: Md5,
358 }
359
360 impl<W> std::io::Write for HashWriter<W>
361 where
362 W: std::io::Write,
363 {
364 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
365 self.hasher.update(buf);
366 self.inner.write(buf)
367 }
368
369 fn flush(&mut self) -> std::io::Result<()> {
370 self.inner.flush()
371 }
372 }
373
374 impl<W> HashWriter<W>
375 where
376 W: std::io::Write,
377 {
378 pub fn new(inner: W) -> Self {
379 Self {
380 inner,
381 hasher: Md5::new(),
382 }
383 }
384
385 pub fn compute(self) -> (W, GenericArray<u8, U16>) {
386 (self.inner, self.hasher.finalize())
387 }
388 }
389}