1use crate::error::{FsError, FsResult};
2use rand::RngCore;
3use std::collections::HashMap;
4use std::fs::{File, OpenOptions};
5use std::io::{Seek, SeekFrom, Write};
6use std::os::unix::fs::FileExt;
7use std::sync::Mutex;
8
9pub trait BlockStore: Send + Sync {
12 fn block_size(&self) -> usize;
14
15 fn total_blocks(&self) -> u64;
17
18 fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>>;
20
21 fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()>;
23
24 fn sync(&self) -> FsResult<()> {
26 Ok(())
27 }
28
29 fn read_blocks(&self, block_ids: &[u64]) -> FsResult<Vec<Vec<u8>>> {
34 block_ids.iter().map(|&id| self.read_block(id)).collect()
35 }
36
37 fn write_blocks(&self, blocks: &[(u64, &[u8])]) -> FsResult<()> {
42 for &(id, data) in blocks {
43 self.write_block(id, data)?;
44 }
45 Ok(())
46 }
47}
48
49pub struct MemoryBlockStore {
51 block_size: usize,
52 total_blocks: u64,
53 blocks: Mutex<HashMap<u64, Vec<u8>>>,
54 write_count: std::sync::atomic::AtomicU64,
56}
57
58impl MemoryBlockStore {
59 pub fn new(block_size: usize, total_blocks: u64) -> Self {
60 Self {
61 block_size,
62 total_blocks,
63 blocks: Mutex::new(HashMap::new()),
64 write_count: std::sync::atomic::AtomicU64::new(0),
65 }
66 }
67
68 pub fn stats_writes(&self) -> u64 {
70 self.write_count.load(std::sync::atomic::Ordering::Relaxed)
71 }
72}
73
74impl BlockStore for MemoryBlockStore {
75 fn block_size(&self) -> usize {
76 self.block_size
77 }
78
79 fn total_blocks(&self) -> u64 {
80 self.total_blocks
81 }
82
83 fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>> {
84 if block_id >= self.total_blocks {
85 return Err(FsError::BlockOutOfRange(block_id));
86 }
87 let blocks = self
88 .blocks
89 .lock()
90 .map_err(|e| FsError::Internal(e.to_string()))?;
91 match blocks.get(&block_id) {
92 Some(data) => Ok(data.clone()),
93 None => {
94 Ok(vec![0u8; self.block_size])
96 }
97 }
98 }
99
100 fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()> {
101 if block_id >= self.total_blocks {
102 return Err(FsError::BlockOutOfRange(block_id));
103 }
104 if data.len() != self.block_size {
105 return Err(FsError::BlockSizeMismatch {
106 expected: self.block_size,
107 got: data.len(),
108 });
109 }
110 self.write_count
111 .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
112 let mut blocks = self
113 .blocks
114 .lock()
115 .map_err(|e| FsError::Internal(e.to_string()))?;
116 blocks.insert(block_id, data.to_vec());
117 Ok(())
118 }
119}
120
121pub struct DiskBlockStore {
126 file: File,
127 block_size: usize,
128 total_blocks: u64,
129}
130
131impl DiskBlockStore {
132 pub fn open(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
137 let file = OpenOptions::new()
138 .read(true)
139 .write(true)
140 .open(path)
141 .map_err(|e| FsError::Internal(format!("open {path}: {e}")))?;
142
143 let file_len = file
144 .metadata()
145 .map_err(|e| FsError::Internal(format!("stat {path}: {e}")))?
146 .len();
147
148 let total_blocks = if total_blocks == 0 {
149 file_len / block_size as u64
150 } else {
151 total_blocks
152 };
153
154 let required = total_blocks * block_size as u64;
155 if file_len < required {
156 return Err(FsError::Internal(format!(
157 "file too small: {file_len} bytes, need {required}"
158 )));
159 }
160
161 Ok(Self {
162 file,
163 block_size,
164 total_blocks,
165 })
166 }
167
168 pub fn create(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
173 let mut file = OpenOptions::new()
174 .read(true)
175 .write(true)
176 .create_new(true)
177 .open(path)
178 .map_err(|e| FsError::Internal(format!("create {path}: {e}")))?;
179
180 let mut rng = rand::thread_rng();
182 let mut buf = vec![0u8; block_size];
183 for _ in 0..total_blocks {
184 rng.fill_bytes(&mut buf);
185 file.write_all(&buf)
186 .map_err(|e| FsError::Internal(format!("write {path}: {e}")))?;
187 }
188 file.sync_all()
189 .map_err(|e| FsError::Internal(format!("sync {path}: {e}")))?;
190
191 Ok(Self {
192 file,
193 block_size,
194 total_blocks,
195 })
196 }
197}
198
199impl BlockStore for DiskBlockStore {
200 fn block_size(&self) -> usize {
201 self.block_size
202 }
203
204 fn total_blocks(&self) -> u64 {
205 self.total_blocks
206 }
207
208 fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>> {
209 if block_id >= self.total_blocks {
210 return Err(FsError::BlockOutOfRange(block_id));
211 }
212 let offset = block_id * self.block_size as u64;
213 let mut buf = vec![0u8; self.block_size];
214 self.file
215 .read_exact_at(&mut buf, offset)
216 .map_err(|e| FsError::Internal(format!("read block {block_id}: {e}")))?;
217 Ok(buf)
218 }
219
220 fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()> {
221 if block_id >= self.total_blocks {
222 return Err(FsError::BlockOutOfRange(block_id));
223 }
224 if data.len() != self.block_size {
225 return Err(FsError::BlockSizeMismatch {
226 expected: self.block_size,
227 got: data.len(),
228 });
229 }
230 let offset = block_id * self.block_size as u64;
231 self.file
232 .write_all_at(data, offset)
233 .map_err(|e| FsError::Internal(format!("write block {block_id}: {e}")))?;
234 Ok(())
235 }
236
237 fn sync(&self) -> FsResult<()> {
238 self.file
239 .sync_all()
240 .map_err(|e| FsError::Internal(format!("fsync: {e}")))
241 }
242}
243
244pub struct DeviceBlockStore {
255 file: File,
256 block_size: usize,
257 total_blocks: u64,
258}
259
260impl DeviceBlockStore {
261 pub fn open(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
265 let mut file = OpenOptions::new()
266 .read(true)
267 .write(true)
268 .open(path)
269 .map_err(|e| FsError::Internal(format!("open device {path}: {e}")))?;
270
271 let device_size = file
272 .seek(SeekFrom::End(0))
273 .map_err(|e| FsError::Internal(format!("seek device {path}: {e}")))?;
274
275 let total_blocks = if total_blocks == 0 {
276 device_size / block_size as u64
277 } else {
278 total_blocks
279 };
280
281 let required = total_blocks * block_size as u64;
282 if device_size < required {
283 return Err(FsError::Internal(format!(
284 "device too small: {device_size} bytes, need {required}"
285 )));
286 }
287
288 Ok(Self {
289 file,
290 block_size,
291 total_blocks,
292 })
293 }
294
295 pub fn initialize(path: &str, block_size: usize, total_blocks: u64) -> FsResult<Self> {
303 let mut file = OpenOptions::new()
304 .read(true)
305 .write(true)
306 .open(path)
307 .map_err(|e| FsError::Internal(format!("open device {path}: {e}")))?;
308
309 let device_size = file
310 .seek(SeekFrom::End(0))
311 .map_err(|e| FsError::Internal(format!("seek device {path}: {e}")))?;
312
313 let total_blocks = if total_blocks == 0 {
314 device_size / block_size as u64
315 } else {
316 total_blocks
317 };
318
319 let required = total_blocks * block_size as u64;
320 if device_size < required {
321 return Err(FsError::Internal(format!(
322 "device too small: {device_size} bytes, need {required}"
323 )));
324 }
325
326 file.seek(SeekFrom::Start(0))
328 .map_err(|e| FsError::Internal(format!("seek device {path}: {e}")))?;
329
330 let mut rng = rand::thread_rng();
331 let mut buf = vec![0u8; block_size];
332 for _ in 0..total_blocks {
333 rng.fill_bytes(&mut buf);
334 file.write_all(&buf)
335 .map_err(|e| FsError::Internal(format!("write device {path}: {e}")))?;
336 }
337 file.sync_all()
338 .map_err(|e| FsError::Internal(format!("sync device {path}: {e}")))?;
339
340 Ok(Self {
341 file,
342 block_size,
343 total_blocks,
344 })
345 }
346}
347
348impl BlockStore for DeviceBlockStore {
349 fn block_size(&self) -> usize {
350 self.block_size
351 }
352
353 fn total_blocks(&self) -> u64 {
354 self.total_blocks
355 }
356
357 fn read_block(&self, block_id: u64) -> FsResult<Vec<u8>> {
358 if block_id >= self.total_blocks {
359 return Err(FsError::BlockOutOfRange(block_id));
360 }
361 let offset = block_id * self.block_size as u64;
362 let mut buf = vec![0u8; self.block_size];
363 self.file
364 .read_exact_at(&mut buf, offset)
365 .map_err(|e| FsError::Internal(format!("read block {block_id}: {e}")))?;
366 Ok(buf)
367 }
368
369 fn write_block(&self, block_id: u64, data: &[u8]) -> FsResult<()> {
370 if block_id >= self.total_blocks {
371 return Err(FsError::BlockOutOfRange(block_id));
372 }
373 if data.len() != self.block_size {
374 return Err(FsError::BlockSizeMismatch {
375 expected: self.block_size,
376 got: data.len(),
377 });
378 }
379 let offset = block_id * self.block_size as u64;
380 self.file
381 .write_all_at(data, offset)
382 .map_err(|e| FsError::Internal(format!("write block {block_id}: {e}")))?;
383 Ok(())
384 }
385
386 fn sync(&self) -> FsResult<()> {
387 self.file
388 .sync_all()
389 .map_err(|e| FsError::Internal(format!("fsync: {e}")))
390 }
391}
392
393#[cfg(test)]
394mod tests {
395 use super::*;
396
397 #[test]
398 fn test_memory_block_store_roundtrip() {
399 let store = MemoryBlockStore::new(64, 10);
400 let data = vec![0xAB; 64];
401 store.write_block(0, &data).unwrap();
402 let read = store.read_block(0).unwrap();
403 assert_eq!(read, data);
404 }
405
406 #[test]
407 fn test_unwritten_block_returns_zeroes() {
408 let store = MemoryBlockStore::new(64, 10);
409 let read = store.read_block(5).unwrap();
410 assert_eq!(read, vec![0u8; 64]);
411 }
412
413 #[test]
414 fn test_out_of_range_read() {
415 let store = MemoryBlockStore::new(64, 10);
416 assert!(store.read_block(10).is_err());
417 }
418
419 #[test]
420 fn test_block_size_mismatch() {
421 let store = MemoryBlockStore::new(64, 10);
422 assert!(store.write_block(0, &[0u8; 32]).is_err());
423 }
424
425 #[test]
426 fn test_disk_block_store_roundtrip() {
427 let dir = std::env::temp_dir();
428 let path = dir.join(format!("doublecrypt_test_{}.img", std::process::id()));
429 let path_str = path.to_str().unwrap();
430
431 let _ = std::fs::remove_file(&path);
433
434 let store = DiskBlockStore::create(path_str, 512, 16).unwrap();
435 let data = vec![0xAB; 512];
436 store.write_block(0, &data).unwrap();
437 store.sync().unwrap();
438 let read = store.read_block(0).unwrap();
439 assert_eq!(read, data);
440
441 let unwritten = store.read_block(10).unwrap();
443 assert_eq!(unwritten.len(), 512);
444 assert!(unwritten.iter().any(|&b| b != 0));
446
447 assert!(store.read_block(16).is_err());
449 assert!(store.write_block(16, &data).is_err());
450
451 assert!(store.write_block(0, &[0u8; 64]).is_err());
453
454 drop(store);
455 std::fs::remove_file(&path).unwrap();
456 }
457
458 #[test]
459 fn test_disk_block_store_open_existing() {
460 let dir = std::env::temp_dir();
461 let path = dir.join(format!("doublecrypt_test_open_{}.img", std::process::id()));
462 let path_str = path.to_str().unwrap();
463 let _ = std::fs::remove_file(&path);
464
465 {
467 let store = DiskBlockStore::create(path_str, 256, 8).unwrap();
468 let data = vec![0xCD; 256];
469 store.write_block(3, &data).unwrap();
470 store.sync().unwrap();
471 }
472
473 {
475 let store = DiskBlockStore::open(path_str, 256, 8).unwrap();
476 let read = store.read_block(3).unwrap();
477 assert_eq!(read, vec![0xCD; 256]);
478 }
479
480 {
482 let store = DiskBlockStore::open(path_str, 256, 0).unwrap();
483 assert_eq!(store.total_blocks(), 8);
484 }
485
486 std::fs::remove_file(&path).unwrap();
487 }
488
489 #[test]
490 fn test_disk_block_store_file_too_small() {
491 let dir = std::env::temp_dir();
492 let path = dir.join(format!("doublecrypt_test_small_{}.img", std::process::id()));
493 let path_str = path.to_str().unwrap();
494 let _ = std::fs::remove_file(&path);
495
496 std::fs::write(&path, vec![0u8; 100]).unwrap();
498
499 assert!(DiskBlockStore::open(path_str, 256, 8).is_err());
501
502 std::fs::remove_file(&path).unwrap();
503 }
504}