Skip to main content

doublecrypt_core/
block_store.rs

1use crate::error::{FsError, FsResult};
2use rand::RngCore;
3use std::collections::HashMap;
4use std::fs::{File, OpenOptions};
5use std::io::{Seek, SeekFrom, Write};
6use std::os::unix::fs::FileExt;
7use std::sync::Mutex;
8
9/// Trait for a fixed-size block store backend.
10/// All blocks are the same size. Block IDs are u64.
11pub trait BlockStore: Send + Sync {
12    /// Block size in bytes.
13    fn block_size(&self) -> usize;
14
15    /// Total number of blocks in the store.
16    fn total_blocks(&self) -> u64;
17
18    /// Read a full block. Returns exactly `block_size()` bytes.
19    fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>>;
20
21    /// Write a full block. `data` must be exactly `block_size()` bytes.
22    fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()>;
23
24    /// Sync / flush all writes. No-op for in-memory stores.
25    fn sync(&self) -> FsResult<()> {
26        Ok(())
27    }
28
29    /// Read multiple blocks in one call.
30    ///
31    /// The default implementation reads them sequentially; network-backed
32    /// stores should override this with pipelined I/O.
33    fn read_blocks(&self, block_ids: &[u64]) -> FsResult<Vec<Vec<u8>>> {
34        block_ids.iter().map(|&id| self.read_block(id)).collect()
35    }
36
37    /// Write multiple blocks in one call.
38    ///
39    /// The default implementation writes them sequentially; network-backed
40    /// stores should override this with pipelined I/O.
41    fn write_blocks(&self, blocks: &[(u64, &[u8])]) -> FsResult<()> {
42        for &(id, data) in blocks {
43            self.write_block(id, data)?;
44        }
45        Ok(())
46    }
47}
48
49/// Simple in-memory block store for testing and development.
50pub struct MemoryBlockStore {
51    block_size: usize,
52    total_blocks: u64,
53    blocks: Mutex<HashMap<u64, Vec<u8>>>,
54    /// Counter incremented on every `write_block` call (for perf testing).
55    write_count: std::sync::atomic::AtomicU64,
56}
57
58impl MemoryBlockStore {
59    pub fn new(block_size: usize, total_blocks: u64) -> Self {
60        Self {
61            block_size,
62            total_blocks,
63            blocks: Mutex::new(HashMap::new()),
64            write_count: std::sync::atomic::AtomicU64::new(0),
65        }
66    }
67
68    /// Return the total number of `write_block` calls made so far.
69    pub fn stats_writes(&self) -> u64 {
70        self.write_count.load(std::sync::atomic::Ordering::Relaxed)
71    }
72}
73
74impl BlockStore for MemoryBlockStore {
75    fn block_size(&self) -> usize {
76        self.block_size
77    }
78
79    fn total_blocks(&self) -> u64 {
80        self.total_blocks
81    }
82
83    fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>> {
84        if block_id >= self.total_blocks {
85            return Err(FsError::BlockOutOfRange(block_id));
86        }
87        let blocks = self
88            .blocks
89            .lock()
90            .map_err(|e| FsError::Internal(e.to_string()))?;
91        match blocks.get(&block_id) {
92            Some(data) => Ok(data.clone()),
93            None => {
94                // Unwritten blocks return zeroes.
95                Ok(vec![0u8; self.block_size])
96            }
97        }
98    }
99
100    fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()> {
101        if block_id >= self.total_blocks {
102            return Err(FsError::BlockOutOfRange(block_id));
103        }
104        if data.len() != self.block_size {
105            return Err(FsError::BlockSizeMismatch {
106                expected: self.block_size,
107                got: data.len(),
108            });
109        }
110        self.write_count
111            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
112        let mut blocks = self
113            .blocks
114            .lock()
115            .map_err(|e| FsError::Internal(e.to_string()))?;
116        blocks.insert(block_id, data.to_vec());
117        Ok(())
118    }
119}
120
121/// File-backed block store. Uses a regular file as a virtual block device.
122///
123/// Uses `pread`/`pwrite` (via `FileExt`) for positioned I/O without seeking,
124/// which is safe for concurrent reads without a mutex on the file descriptor.
125pub struct DiskBlockStore {
126    file: File,
127    block_size: usize,
128    total_blocks: u64,
129}
130
131impl DiskBlockStore {
132    /// Open an existing file as a block store.
133    ///
134    /// The file must already exist and be at least `block_size * total_blocks` bytes.
135    /// If `total_blocks` is 0, it is inferred from the file size.
136    pub fn open(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
137        let file = OpenOptions::new()
138            .read(true)
139            .write(true)
140            .open(path)
141            .map_err(|e| FsError::Internal(format!("open {path}: {e}")))?;
142
143        let file_len = file
144            .metadata()
145            .map_err(|e| FsError::Internal(format!("stat {path}: {e}")))?
146            .len();
147
148        let total_blocks = if total_blocks == 0 {
149            file_len / block_size as u64
150        } else {
151            total_blocks
152        };
153
154        let required = total_blocks * block_size as u64;
155        if file_len < required {
156            return Err(FsError::Internal(format!(
157                "file too small: {file_len} bytes, need {required}"
158            )));
159        }
160
161        Ok(Self {
162            file,
163            block_size,
164            total_blocks,
165        })
166    }
167
168    /// Create a new file of the given size and open it as a block store.
169    ///
170    /// Every block is filled with cryptographically random data so that
171    /// unallocated blocks are indistinguishable from encrypted ones.
172    pub fn create(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
173        let mut file = OpenOptions::new()
174            .read(true)
175            .write(true)
176            .create_new(true)
177            .open(path)
178            .map_err(|e| FsError::Internal(format!("create {path}: {e}")))?;
179
180        // Fill every block with random bytes so free space looks like ciphertext.
181        let mut rng = rand::thread_rng();
182        let mut buf = vec![0u8; block_size];
183        for _ in 0..total_blocks {
184            rng.fill_bytes(&mut buf);
185            file.write_all(&buf)
186                .map_err(|e| FsError::Internal(format!("write {path}: {e}")))?;
187        }
188        file.sync_all()
189            .map_err(|e| FsError::Internal(format!("sync {path}: {e}")))?;
190
191        Ok(Self {
192            file,
193            block_size,
194            total_blocks,
195        })
196    }
197}
198
199impl BlockStore for DiskBlockStore {
200    fn block_size(&self) -> usize {
201        self.block_size
202    }
203
204    fn total_blocks(&self) -> u64 {
205        self.total_blocks
206    }
207
208    fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>> {
209        if block_id >= self.total_blocks {
210            return Err(FsError::BlockOutOfRange(block_id));
211        }
212        let offset = block_id * self.block_size as u64;
213        let mut buf = vec![0u8; self.block_size];
214        self.file
215            .read_exact_at(&mut buf, offset)
216            .map_err(|e| FsError::Internal(format!("read block {block_id}: {e}")))?;
217        Ok(buf)
218    }
219
220    fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()> {
221        if block_id >= self.total_blocks {
222            return Err(FsError::BlockOutOfRange(block_id));
223        }
224        if data.len() != self.block_size {
225            return Err(FsError::BlockSizeMismatch {
226                expected: self.block_size,
227                got: data.len(),
228            });
229        }
230        let offset = block_id * self.block_size as u64;
231        self.file
232            .write_all_at(data, offset)
233            .map_err(|e| FsError::Internal(format!("write block {block_id}: {e}")))?;
234        Ok(())
235    }
236
237    fn sync(&self) -> FsResult<()> {
238        self.file
239            .sync_all()
240            .map_err(|e| FsError::Internal(format!("fsync: {e}")))
241    }
242}
243
244/// Block-device-backed block store for raw devices such as EBS volumes.
245///
246/// Unlike [`DiskBlockStore`] which operates on regular files, this backend
247/// targets raw block devices (e.g. `/dev/xvdf`, `/dev/nvme1n1p1`).  The
248/// device must already exist; Linux does not allow creating device nodes
249/// from userspace in the normal flow.
250///
251/// Device size is discovered via `lseek(SEEK_END)` because `stat()` reports
252/// `st_size = 0` for block devices.  I/O uses `pread`/`pwrite` (via
253/// [`FileExt`]) exactly like `DiskBlockStore`.
254pub struct DeviceBlockStore {
255    file: File,
256    block_size: usize,
257    total_blocks: u64,
258}
259
260impl DeviceBlockStore {
261    /// Open an existing block device.
262    ///
263    /// `total_blocks` – pass 0 to infer from the device size.
264    pub fn open(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
265        let mut file = OpenOptions::new()
266            .read(true)
267            .write(true)
268            .open(path)
269            .map_err(|e| FsError::Internal(format!("open device {path}: {e}")))?;
270
271        let device_size = file
272            .seek(SeekFrom::End(0))
273            .map_err(|e| FsError::Internal(format!("seek device {path}: {e}")))?;
274
275        let total_blocks = if total_blocks == 0 {
276            device_size / block_size as u64
277        } else {
278            total_blocks
279        };
280
281        let required = total_blocks * block_size as u64;
282        if device_size < required {
283            return Err(FsError::Internal(format!(
284                "device too small: {device_size} bytes, need {required}"
285            )));
286        }
287
288        Ok(Self {
289            file,
290            block_size,
291            total_blocks,
292        })
293    }
294
295    /// Initialize a block device by filling every block with random data so
296    /// that free space is indistinguishable from ciphertext.
297    ///
298    /// **Warning:** this writes to *every* block and can take a long time on
299    /// large volumes.  Call this once when first provisioning the device.
300    ///
301    /// `total_blocks` – pass 0 to use the entire device.
302    pub fn initialize(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
303        let mut file = OpenOptions::new()
304            .read(true)
305            .write(true)
306            .open(path)
307            .map_err(|e| FsError::Internal(format!("open device {path}: {e}")))?;
308
309        let device_size = file
310            .seek(SeekFrom::End(0))
311            .map_err(|e| FsError::Internal(format!("seek device {path}: {e}")))?;
312
313        let total_blocks = if total_blocks == 0 {
314            device_size / block_size as u64
315        } else {
316            total_blocks
317        };
318
319        let required = total_blocks * block_size as u64;
320        if device_size < required {
321            return Err(FsError::Internal(format!(
322                "device too small: {device_size} bytes, need {required}"
323            )));
324        }
325
326        // Seek back to the start before writing.
327        file.seek(SeekFrom::Start(0))
328            .map_err(|e| FsError::Internal(format!("seek device {path}: {e}")))?;
329
330        let mut rng = rand::thread_rng();
331        let mut buf = vec![0u8; block_size];
332        for _ in 0..total_blocks {
333            rng.fill_bytes(&mut buf);
334            file.write_all(&buf)
335                .map_err(|e| FsError::Internal(format!("write device {path}: {e}")))?;
336        }
337        file.sync_all()
338            .map_err(|e| FsError::Internal(format!("sync device {path}: {e}")))?;
339
340        Ok(Self {
341            file,
342            block_size,
343            total_blocks,
344        })
345    }
346}
347
348impl BlockStore for DeviceBlockStore {
349    fn block_size(&self) -> usize {
350        self.block_size
351    }
352
353    fn total_blocks(&self) -> u64 {
354        self.total_blocks
355    }
356
357    fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>> {
358        if block_id >= self.total_blocks {
359            return Err(FsError::BlockOutOfRange(block_id));
360        }
361        let offset = block_id * self.block_size as u64;
362        let mut buf = vec![0u8; self.block_size];
363        self.file
364            .read_exact_at(&mut buf, offset)
365            .map_err(|e| FsError::Internal(format!("read block {block_id}: {e}")))?;
366        Ok(buf)
367    }
368
369    fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()> {
370        if block_id >= self.total_blocks {
371            return Err(FsError::BlockOutOfRange(block_id));
372        }
373        if data.len() != self.block_size {
374            return Err(FsError::BlockSizeMismatch {
375                expected: self.block_size,
376                got: data.len(),
377            });
378        }
379        let offset = block_id * self.block_size as u64;
380        self.file
381            .write_all_at(data, offset)
382            .map_err(|e| FsError::Internal(format!("write block {block_id}: {e}")))?;
383        Ok(())
384    }
385
386    fn sync(&self) -> FsResult<()> {
387        self.file
388            .sync_all()
389            .map_err(|e| FsError::Internal(format!("fsync: {e}")))
390    }
391}
392
393#[cfg(test)]
394mod tests {
395    use super::*;
396
397    #[test]
398    fn test_memory_block_store_roundtrip() {
399        let store = MemoryBlockStore::new(64, 10);
400        let data = vec![0xAB; 64];
401        store.write_block(0, &data).unwrap();
402        let read = store.read_block(0).unwrap();
403        assert_eq!(read, data);
404    }
405
406    #[test]
407    fn test_unwritten_block_returns_zeroes() {
408        let store = MemoryBlockStore::new(64, 10);
409        let read = store.read_block(5).unwrap();
410        assert_eq!(read, vec![0u8; 64]);
411    }
412
413    #[test]
414    fn test_out_of_range_read() {
415        let store = MemoryBlockStore::new(64, 10);
416        assert!(store.read_block(10).is_err());
417    }
418
419    #[test]
420    fn test_block_size_mismatch() {
421        let store = MemoryBlockStore::new(64, 10);
422        assert!(store.write_block(0, &[0u8; 32]).is_err());
423    }
424
425    #[test]
426    fn test_disk_block_store_roundtrip() {
427        let dir = std::env::temp_dir();
428        let path = dir.join(format!("doublecrypt_test_{}.img", std::process::id()));
429        let path_str = path.to_str().unwrap();
430
431        // Cleanup if leftover from a previous run.
432        let _ = std::fs::remove_file(&path);
433
434        let store = DiskBlockStore::create(path_str, 512, 16).unwrap();
435        let data = vec![0xAB; 512];
436        store.write_block(0, &data).unwrap();
437        store.sync().unwrap();
438        let read = store.read_block(0).unwrap();
439        assert_eq!(read, data);
440
441        // Unwritten block should be random-filled (not zero).
442        let unwritten = store.read_block(10).unwrap();
443        assert_eq!(unwritten.len(), 512);
444        // Overwhelmingly unlikely that 512 random bytes are all zero.
445        assert!(unwritten.iter().any(|&b| b != 0));
446
447        // Out of range.
448        assert!(store.read_block(16).is_err());
449        assert!(store.write_block(16, &data).is_err());
450
451        // Block size mismatch.
452        assert!(store.write_block(0, &[0u8; 64]).is_err());
453
454        drop(store);
455        std::fs::remove_file(&path).unwrap();
456    }
457
458    #[test]
459    fn test_disk_block_store_open_existing() {
460        let dir = std::env::temp_dir();
461        let path = dir.join(format!("doublecrypt_test_open_{}.img", std::process::id()));
462        let path_str = path.to_str().unwrap();
463        let _ = std::fs::remove_file(&path);
464
465        // Create and write.
466        {
467            let store = DiskBlockStore::create(path_str, 256, 8).unwrap();
468            let data = vec![0xCD; 256];
469            store.write_block(3, &data).unwrap();
470            store.sync().unwrap();
471        }
472
473        // Reopen and verify.
474        {
475            let store = DiskBlockStore::open(path_str, 256, 8).unwrap();
476            let read = store.read_block(3).unwrap();
477            assert_eq!(read, vec![0xCD; 256]);
478        }
479
480        // Open with inferred total_blocks (0).
481        {
482            let store = DiskBlockStore::open(path_str, 256, 0).unwrap();
483            assert_eq!(store.total_blocks(), 8);
484        }
485
486        std::fs::remove_file(&path).unwrap();
487    }
488
489    #[test]
490    fn test_disk_block_store_file_too_small() {
491        let dir = std::env::temp_dir();
492        let path = dir.join(format!("doublecrypt_test_small_{}.img", std::process::id()));
493        let path_str = path.to_str().unwrap();
494        let _ = std::fs::remove_file(&path);
495
496        // Create a small file.
497        std::fs::write(&path, vec![0u8; 100]).unwrap();
498
499        // Try to open with more blocks than fit.
500        assert!(DiskBlockStore::open(path_str, 256, 8).is_err());
501
502        std::fs::remove_file(&path).unwrap();
503    }
504}