gpt_disk_io/disk.rs
1// Copyright 2022 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
8
9use crate::BlockIo;
10use bytemuck::{bytes_of, from_bytes};
11use core::fmt::{self, Debug, Display, Formatter};
12use core::mem;
13use gpt_disk_types::{
14 GptHeader, GptPartitionEntry, GptPartitionEntryArray,
15 GptPartitionEntryArrayError, GptPartitionEntryArrayLayout, Lba,
16 MasterBootRecord,
17};
18
19/// Iterator over entries in a partition entry array.
20struct GptPartitionEntryIter<'disk, 'buf, Io: BlockIo> {
21 disk: &'disk mut Disk<Io>,
22 block_buf: &'buf mut [u8],
23 layout: GptPartitionEntryArrayLayout,
24 next_index: u32,
25 current_lba: Lba,
26 byte_offset_within_lba: usize,
27 entry_size: usize,
28}
29
30impl<'disk, 'buf, Io: BlockIo> GptPartitionEntryIter<'disk, 'buf, Io> {
31 fn new(
32 disk: &'disk mut Disk<Io>,
33 layout: GptPartitionEntryArrayLayout,
34 block_buf: &'buf mut [u8],
35 ) -> Result<Self, DiskError<Io::Error>> {
36 let mut iter = Self {
37 disk,
38 block_buf,
39 next_index: 0,
40 current_lba: layout.start_lba,
41 byte_offset_within_lba: 0,
42 layout,
43 entry_size: layout
44 .entry_size
45 .to_usize()
46 .ok_or(DiskError::Overflow)?,
47 };
48 iter.set_current_lba(iter.current_lba)?;
49 Ok(iter)
50 }
51
52 fn set_current_lba(
53 &mut self,
54 lba: Lba,
55 ) -> Result<(), DiskError<Io::Error>> {
56 self.current_lba = lba;
57 self.byte_offset_within_lba = 0;
58 Ok(self.disk.io.read_blocks(self.current_lba, self.block_buf)?)
59 }
60
61 fn read_current_entry(&mut self) -> Option<<Self as Iterator>::Item> {
62 let entry_bytes = self.block_buf.get(
63 self.byte_offset_within_lba
64 ..self.byte_offset_within_lba + self.entry_size,
65 )?;
66
67 self.byte_offset_within_lba += self.entry_size;
68
69 self.next_index += 1;
70
71 Some(Ok(*from_bytes::<GptPartitionEntry>(
72 &entry_bytes[..mem::size_of::<GptPartitionEntry>()],
73 )))
74 }
75}
76
77impl<Io: BlockIo> Iterator for GptPartitionEntryIter<'_, '_, Io> {
78 type Item = Result<GptPartitionEntry, DiskError<Io::Error>>;
79
80 fn next(&mut self) -> Option<Self::Item> {
81 if self.next_index >= self.layout.num_entries {
82 return None;
83 }
84
85 if let Some(entry) = self.read_current_entry() {
86 Some(entry)
87 } else {
88 let next_lba = Lba(self.current_lba.to_u64() + 1);
89 if let Err(err) = self.set_current_lba(next_lba) {
90 Some(Err(err))
91 } else {
92 self.read_current_entry()
93 }
94 }
95 }
96}
97
98/// Workaround for using `impl Trait` with multiple lifetimes. See
99/// <https://stackoverflow.com/a/50548538>.
100pub trait Captures<'a, 'b> {}
101
102impl<T: ?Sized> Captures<'_, '_> for T {}
103
104/// Error type used by [`Disk`] methods.
105#[allow(clippy::module_name_repetitions)]
106#[derive(Debug, PartialEq)]
107pub enum DiskError<IoError: Debug + Display> {
108 /// The storage buffer is not large enough.
109 BufferTooSmall,
110
111 /// Numeric overflow occurred.
112 Overflow,
113
114 /// The partition entry size is larger than a single block.
115 BlockSizeSmallerThanPartitionEntry,
116
117 /// Error from a [`BlockIo`] implementation (see [`BlockIo::Error`]).
118 ///
119 /// [`BlockIo`]: crate::BlockIo
120 /// [`BlockIo::Error`]: crate::BlockIo::Error
121 Io(IoError),
122}
123
124impl<IoError> From<IoError> for DiskError<IoError>
125where
126 IoError: Debug + Display,
127{
128 fn from(err: IoError) -> Self {
129 DiskError::Io(err)
130 }
131}
132
133impl<IoError> Display for DiskError<IoError>
134where
135 IoError: Debug + Display,
136{
137 fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
138 match self {
139 Self::BufferTooSmall => f.write_str("storage buffer is too small"),
140 Self::Overflow => f.write_str("numeric overflow occurred"),
141 Self::BlockSizeSmallerThanPartitionEntry => {
142 f.write_str("partition entries are larger than a single block")
143 }
144 Self::Io(io) => Display::fmt(io, f),
145 }
146 }
147}
148
149impl<IoError> core::error::Error for DiskError<IoError> where
150 IoError: Debug + Display
151{
152}
153
154/// Read and write GPT disk data.
155///
156/// The disk is accessed via an object implementing the [`BlockIo`]
157/// trait, so all reads and writes are on block boundaries. Writes are
158/// not guaranteed to be completed until [`flush`] is called. This
159/// happens automatically when the `Disk` is dropped, but if an error
160/// occurs at that point it will be silently ignored so it is
161/// recommended to call [`flush`] directly before dropping the disk.
162///
163/// Many of the methods on `Disk` take a `block_buf` argument, which is
164/// a mutable byte buffer with a length of at least one block. (The
165/// [`read_gpt_partition_entry_array`] and
166/// [`write_gpt_partition_entry_array`] methods take a larger `storage`
167/// argument that is multiple blocks in size.) These buffer arguments
168/// allow `Disk` to avoid doing any internal memory allocation.
169///
170/// # Partition entry arrays
171///
172/// Partition entry arrays can be read in two ways: one block at a time
173/// with [`gpt_partition_entry_array_iter`], or all at once with
174/// [`read_gpt_partition_entry_array`]. The former allows a smaller
175/// amount of memory usage bounded to the block size, while the latter
176/// may be more efficient since all the blocks can be read at once.
177///
178/// Writing the array can currently only be done all at once via
179/// [`write_gpt_partition_entry_array`]; a block-at-a-time method may be
180/// added in the future.
181///
182/// [`flush`]: Self::flush
183/// [`gpt_partition_entry_array_iter`]: Self::gpt_partition_entry_array_iter
184/// [`read_gpt_partition_entry_array`]: Self::read_gpt_partition_entry_array
185/// [`write_gpt_partition_entry_array`]: Self::write_gpt_partition_entry_array
186pub struct Disk<Io: BlockIo> {
187 io: Io,
188}
189
190impl<Io: BlockIo> Disk<Io> {
191 /// Create a `Disk`.
192 pub fn new(io: Io) -> Result<Self, DiskError<Io::Error>> {
193 Ok(Self { io })
194 }
195
196 /// Clip the size of `block_buf` to a single block. Return
197 /// `BufferTooSmall` if the buffer isn't big enough.
198 fn clip_block_buf_size<'buf>(
199 &self,
200 block_buf: &'buf mut [u8],
201 ) -> Result<&'buf mut [u8], DiskError<Io::Error>> {
202 if let Some(block_size) = self.io.block_size().to_usize() {
203 block_buf
204 .get_mut(..block_size)
205 .ok_or(DiskError::BufferTooSmall)
206 } else {
207 Err(DiskError::BufferTooSmall)
208 }
209 }
210
211 /// Read the primary GPT header from the second block. No validation
212 /// of the header is performed.
213 pub fn read_primary_gpt_header(
214 &mut self,
215 block_buf: &mut [u8],
216 ) -> Result<GptHeader, DiskError<Io::Error>> {
217 self.read_gpt_header(Lba(1), block_buf)
218 }
219
220 /// Read the secondary GPT header from the last block. No validation
221 /// of the header is performed.
222 ///
223 /// `block_buf` is a mutable byte buffer with a length of at least one block.
224 pub fn read_secondary_gpt_header(
225 &mut self,
226 block_buf: &mut [u8],
227 ) -> Result<GptHeader, DiskError<Io::Error>> {
228 let num_blocks = self.io.num_blocks()?;
229 let last_block =
230 Lba(num_blocks.checked_sub(1).ok_or(DiskError::Overflow)?);
231 self.read_gpt_header(last_block, block_buf)
232 }
233
234 /// Read a GPT header at the given [`Lba`]. No validation of the
235 /// header is performed.
236 ///
237 /// `block_buf` is a mutable byte buffer with a length of at least one block.
238 pub fn read_gpt_header(
239 &mut self,
240 lba: Lba,
241 mut block_buf: &mut [u8],
242 ) -> Result<GptHeader, DiskError<Io::Error>> {
243 block_buf = self.clip_block_buf_size(block_buf)?;
244 self.io.read_blocks(lba, block_buf)?;
245 let bytes = block_buf
246 .get(..mem::size_of::<GptHeader>())
247 // OK to unwrap since the block size type guarantees a
248 // minimum size greater than GptHeader.
249 .unwrap();
250 Ok(*from_bytes(bytes))
251 }
252
253 /// Read the entire partition entry array. The `storage` buffer must
254 /// be at least [`layout.num_bytes_rounded_to_block`] in size.
255 ///
256 /// [`layout.num_bytes_rounded_to_block`]: GptPartitionEntryArrayLayout::num_bytes_rounded_to_block
257 pub fn read_gpt_partition_entry_array<'buf>(
258 &mut self,
259 layout: GptPartitionEntryArrayLayout,
260 storage: &'buf mut [u8],
261 ) -> Result<GptPartitionEntryArray<'buf>, DiskError<Io::Error>> {
262 let mut entry_array =
263 GptPartitionEntryArray::new(layout, self.io.block_size(), storage)
264 .map_err(|err| match err {
265 GptPartitionEntryArrayError::BufferTooSmall => {
266 DiskError::BufferTooSmall
267 }
268 GptPartitionEntryArrayError::Overflow => {
269 DiskError::Overflow
270 }
271 })?;
272 self.io
273 .read_blocks(layout.start_lba, entry_array.storage_mut())?;
274 Ok(entry_array)
275 }
276
277 /// Write an entire [`GptPartitionEntryArray`] to disk.
278 pub fn write_gpt_partition_entry_array(
279 &mut self,
280 entry_array: &GptPartitionEntryArray,
281 ) -> Result<(), DiskError<Io::Error>> {
282 Ok(self.io.write_blocks(
283 entry_array.layout().start_lba,
284 entry_array.storage(),
285 )?)
286 }
287
288 /// Get an iterator over partition entries. The `layout` parameter
289 /// indicates where to read the entries from; see
290 /// [`GptPartitionEntryArrayLayout`] for more.
291 ///
292 /// `block_buf` is a mutable byte buffer with a length of at least one block.
293 #[allow(clippy::type_complexity)]
294 pub fn gpt_partition_entry_array_iter<'disk, 'buf>(
295 &'disk mut self,
296 layout: GptPartitionEntryArrayLayout,
297 mut block_buf: &'buf mut [u8],
298 ) -> Result<
299 impl Iterator<Item = Result<GptPartitionEntry, DiskError<Io::Error>>>
300 + Captures<'disk, 'buf>,
301 DiskError<Io::Error>,
302 > {
303 block_buf = self.clip_block_buf_size(block_buf)?;
304
305 let entry_size =
306 layout.entry_size.to_usize().ok_or(DiskError::Overflow)?;
307 if entry_size > block_buf.len() {
308 return Err(DiskError::BlockSizeSmallerThanPartitionEntry);
309 }
310
311 GptPartitionEntryIter::<'disk, 'buf>::new(self, layout, block_buf)
312 }
313
314 /// Write a protective MBR to the first block. If the block size is
315 /// bigger than the MBR, the rest of the block will be filled with
316 /// zeroes.
317 ///
318 /// `block_buf` is a mutable byte buffer with a length of at least one block.
319 pub fn write_protective_mbr(
320 &mut self,
321 block_buf: &mut [u8],
322 ) -> Result<(), DiskError<Io::Error>> {
323 let mbr = MasterBootRecord::protective_mbr(self.io.num_blocks()?);
324 self.write_mbr(&mbr, block_buf)
325 }
326
327 /// Write an MBR to the first block. If the block size is bigger
328 /// than the MBR, the rest of the block will be filled with zeroes.
329 ///
330 /// `block_buf` is a mutable byte buffer with a length of at least one block.
331 pub fn write_mbr(
332 &mut self,
333 mbr: &MasterBootRecord,
334 mut block_buf: &mut [u8],
335 ) -> Result<(), DiskError<Io::Error>> {
336 block_buf = self.clip_block_buf_size(block_buf)?;
337
338 let mbr_bytes = bytes_of(mbr);
339
340 // This should always be true because the block_buf size is
341 // already known to match the block size, and the block size is
342 // enforced to be at least 512 bytes which is the size of the
343 // MBR struct.
344 assert!(block_buf.len() >= mbr_bytes.len());
345
346 {
347 let (left, right) = block_buf.split_at_mut(mbr_bytes.len());
348 left.copy_from_slice(mbr_bytes);
349 right.fill(0);
350 }
351
352 self.io.write_blocks(Lba(0), block_buf)?;
353 Ok(())
354 }
355
356 /// Write the primary GPT header to the second block.
357 ///
358 /// The header is written to the beginning of the block, and all
359 /// remaining bytes in the block are set to zero (see Table 5-5 "GPT
360 /// Header" in the UEFI Specification: "The rest of the block is
361 /// reserved by UEFI and must be zero").
362 ///
363 /// `block_buf` is a mutable byte buffer with a length of at least one block.
364 pub fn write_primary_gpt_header(
365 &mut self,
366 header: &GptHeader,
367 block_buf: &mut [u8],
368 ) -> Result<(), DiskError<Io::Error>> {
369 self.write_gpt_header(Lba(1), header, block_buf)
370 }
371
372 /// Write the secondary GPT header to the last block.
373 ///
374 /// The header is written to the beginning of the block, and all
375 /// remaining bytes in the block are set to zero (see Table 5-5 "GPT
376 /// Header" in the UEFI Specification: "The rest of the block is
377 /// reserved by UEFI and must be zero").
378 ///
379 /// `block_buf` is a mutable byte buffer with a length of at least one block.
380 pub fn write_secondary_gpt_header(
381 &mut self,
382 header: &GptHeader,
383 block_buf: &mut [u8],
384 ) -> Result<(), DiskError<Io::Error>> {
385 let num_blocks = self.io.num_blocks()?;
386 let last_block =
387 Lba(num_blocks.checked_sub(1).ok_or(DiskError::Overflow)?);
388 self.write_gpt_header(last_block, header, block_buf)
389 }
390
391 /// Write a [`GptHeader`] to the specified [`Lba`].
392 ///
393 /// The header is written to the beginning of the block, and all
394 /// remaining bytes in the block are set to zero (see Table 5-5 "GPT
395 /// Header" in the UEFI Specification: "The rest of the block is
396 /// reserved by UEFI and must be zero").
397 ///
398 /// `block_buf` is a mutable byte buffer with a length of at least one block.
399 pub fn write_gpt_header(
400 &mut self,
401 lba: Lba,
402 header: &GptHeader,
403 mut block_buf: &mut [u8],
404 ) -> Result<(), DiskError<Io::Error>> {
405 block_buf = self.clip_block_buf_size(block_buf)?;
406
407 let header_bytes = bytes_of(header);
408
409 // This should always be true because the block_buf size is
410 // already known to match the block size, and the block size is
411 // enforced to be at least 512 bytes which is much larger than
412 // the size of the GptHeader struct.
413 assert!(block_buf.len() >= header_bytes.len());
414
415 {
416 let (left, right) = block_buf.split_at_mut(header_bytes.len());
417 left.copy_from_slice(header_bytes);
418 right.fill(0);
419 }
420
421 self.io.write_blocks(lba, block_buf)?;
422 Ok(())
423 }
424
425 /// Flush any pending writes to the disk.
426 ///
427 /// This is called automatically when the disk is dropped, but if an
428 /// error occurs at that point it will be silently ignored. It is
429 /// recommended to call this method directly before dropping the disk.
430 pub fn flush(&mut self) -> Result<(), DiskError<Io::Error>> {
431 Ok(self.io.flush()?)
432 }
433}
434
435impl<Io: BlockIo> Drop for Disk<Io> {
436 fn drop(&mut self) {
437 // Throw away any errors.
438 let _r = self.flush();
439 }
440}