use crate::block_index::FsBlockIndex;
use crate::block_size::BlockSize;
use crate::error::CorruptKind;
use crate::error::Ext4Error;
use crate::util::usize_from_u32;
use alloc::boxed::Box;
use alloc::collections::VecDeque;
use alloc::vec;
#[derive(Clone)]
struct CacheEntry {
block_index: FsBlockIndex,
data: Box<[u8]>,
}
pub(crate) struct BlockCache {
read_buf: Box<[u8]>,
max_blocks_per_read: u32,
entries: VecDeque<CacheEntry>,
block_size: BlockSize,
num_fs_blocks: u64,
}
impl BlockCache {
pub(crate) fn new(
block_size: BlockSize,
num_fs_blocks: u64,
) -> Result<Self, Ext4Error> {
Self::with_opts(CacheOpts::new(block_size), num_fs_blocks)
}
fn with_opts(
opts: CacheOpts,
num_fs_blocks: u64,
) -> Result<Self, Ext4Error> {
assert!(usize_from_u32(opts.max_blocks_per_read) <= opts.num_entries);
let read_buf_len = opts.read_buf_size_in_bytes();
let entries = vec![
CacheEntry {
block_index: 0,
data: vec![0; opts.block_size.to_usize()].into_boxed_slice(),
};
opts.num_entries
];
Ok(Self {
entries: VecDeque::from(entries),
max_blocks_per_read: opts.max_blocks_per_read,
read_buf: vec![0; read_buf_len].into_boxed_slice(),
block_size: opts.block_size,
num_fs_blocks,
})
}
fn num_blocks_to_read(&self, block_index: FsBlockIndex) -> u32 {
assert!(block_index < self.num_fs_blocks);
let end_block = block_index
.saturating_add(u64::from(self.max_blocks_per_read))
.min(self.num_fs_blocks);
let num_blocks = end_block.checked_sub(block_index).unwrap();
u32::try_from(num_blocks).unwrap()
}
pub(crate) fn get_or_insert_blocks<F>(
&mut self,
block_index: FsBlockIndex,
f: F,
) -> Result<&[u8], Ext4Error>
where
F: FnOnce(&mut [u8]) -> Result<(), Ext4Error>,
{
assert!(block_index < self.num_fs_blocks);
if let Some(index) = self
.entries
.iter()
.position(|entry| entry.block_index == block_index)
{
if index != 0 {
let entry = self.entries.remove(index).unwrap();
self.entries.push_front(entry);
}
return Ok(&*self.entries[0].data);
}
let num_blocks = self.num_blocks_to_read(block_index);
let num_bytes = usize_from_u32(num_blocks)
.checked_mul(self.block_size.to_usize())
.ok_or(CorruptKind::BlockCacheReadTooLarge {
num_blocks,
block_size: self.block_size,
})?;
f(&mut self.read_buf[..num_bytes])?;
for i in (0..num_blocks).rev() {
let block_index = block_index.checked_add(u64::from(i)).unwrap();
self.insert_block(block_index, i);
}
let entry = &self.entries[0];
assert_eq!(entry.block_index, block_index);
Ok(&*entry.data)
}
fn insert_block(
&mut self,
block_index: FsBlockIndex,
block_within_read_buf: u32,
) {
assert!(block_within_read_buf < self.max_blocks_per_read);
let start = usize_from_u32(block_within_read_buf)
.checked_mul(self.block_size.to_usize())
.unwrap();
let end = start.checked_add(self.block_size.to_usize()).unwrap();
let src = &self.read_buf[start..end];
let mut entry = self.entries.pop_back().unwrap();
entry.block_index = block_index;
entry.data.copy_from_slice(src);
self.entries.push_front(entry);
}
}
#[derive(Debug, PartialEq)]
struct CacheOpts {
block_size: BlockSize,
max_blocks_per_read: u32,
num_entries: usize,
}
impl CacheOpts {
fn new(block_size: BlockSize) -> Self {
let max_bytes_per_read = 8 * 4096;
let max_blocks_per_read =
1.max(max_bytes_per_read / block_size.to_nz_u32());
let num_entries: u32 = max_blocks_per_read.checked_mul(8).unwrap();
Self {
block_size,
max_blocks_per_read,
num_entries: usize_from_u32(num_entries),
}
}
fn read_buf_size_in_bytes(&self) -> usize {
usize_from_u32(self.max_blocks_per_read)
.checked_mul(self.block_size.to_usize())
.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn get_block_size(sz: u32) -> BlockSize {
let bs = BlockSize::from_superblock_value(sz.ilog2() - 10).unwrap();
assert_eq!(bs.to_u32(), sz);
bs
}
#[test]
fn test_cache_opts() {
let block_size = get_block_size(1024);
assert_eq!(
CacheOpts::new(block_size),
CacheOpts {
block_size,
max_blocks_per_read: 32,
num_entries: 256,
}
);
let block_size = get_block_size(4096);
assert_eq!(
CacheOpts::new(block_size),
CacheOpts {
block_size,
max_blocks_per_read: 8,
num_entries: 64,
}
);
let block_size = get_block_size(65536);
assert_eq!(
CacheOpts::new(block_size),
CacheOpts {
block_size,
max_blocks_per_read: 1,
num_entries: 8,
}
);
}
#[test]
fn test_num_blocks_to_read() {
let num_fs_blocks = 8;
let cache = BlockCache::with_opts(
CacheOpts {
block_size: get_block_size(1024),
max_blocks_per_read: 4,
num_entries: 4,
},
num_fs_blocks,
)
.unwrap();
assert_eq!(cache.num_blocks_to_read(0), 4);
assert_eq!(cache.num_blocks_to_read(4), 4);
assert_eq!(cache.num_blocks_to_read(5), 3);
assert_eq!(cache.num_blocks_to_read(7), 1);
}
#[test]
fn test_insert_block() {
let num_fs_blocks = 8;
let mut cache = BlockCache::with_opts(
CacheOpts {
block_size: get_block_size(1024),
max_blocks_per_read: 4,
num_entries: 4,
},
num_fs_blocks,
)
.unwrap();
cache.read_buf[0] = 6;
cache.read_buf[1024] = 7;
cache.insert_block(123, 0);
assert_eq!(cache.entries[0].block_index, 123);
assert_eq!(cache.entries[0].data[0], 6);
let block123_ptr = cache.entries[0].data.as_ptr();
cache.insert_block(456, 1);
assert_eq!(cache.entries[0].block_index, 456);
assert_eq!(cache.entries[0].data[0], 7);
assert_eq!(cache.entries[1].block_index, 123);
assert_eq!(cache.entries[1].data[0], 6);
assert_eq!(cache.entries[1].data.as_ptr(), block123_ptr);
}
#[test]
fn test_get_or_insert_blocks() {
let num_fs_blocks = 8;
let mut cache = BlockCache::with_opts(
CacheOpts {
block_size: get_block_size(1024),
max_blocks_per_read: 2,
num_entries: 4,
},
num_fs_blocks,
)
.unwrap();
assert_eq!(
cache
.get_or_insert_blocks(1, |_| {
Err(CorruptKind::TooManyBlocksInFile.into())
})
.unwrap_err(),
CorruptKind::TooManyBlocksInFile
);
let data = cache
.get_or_insert_blocks(1, |buf| {
assert_eq!(buf.len(), 1024 * 2);
buf[0] = 3;
buf[1024] = 4;
Ok(())
})
.unwrap();
assert_eq!(data[0], 3);
assert_eq!(cache.entries[0].block_index, 1);
assert_eq!(cache.entries[0].data[0], 3);
assert_eq!(cache.entries[1].block_index, 2);
assert_eq!(cache.entries[1].data[0], 4);
let data = cache
.get_or_insert_blocks(2, |_| {
panic!("read closure called unexpectedly");
})
.unwrap();
assert_eq!(data[0], 4);
assert_eq!(cache.entries[0].block_index, 2);
assert_eq!(cache.entries[1].block_index, 1);
cache.get_or_insert_blocks(3, |_| Ok(())).unwrap();
assert_eq!(cache.entries[0].block_index, 3);
assert_eq!(cache.entries[1].block_index, 4);
assert_eq!(cache.entries[2].block_index, 2);
assert_eq!(cache.entries[3].block_index, 1);
cache.get_or_insert_blocks(5, |_| Ok(())).unwrap();
assert_eq!(cache.entries[0].block_index, 5);
assert_eq!(cache.entries[1].block_index, 6);
assert_eq!(cache.entries[2].block_index, 3);
assert_eq!(cache.entries[3].block_index, 4);
}
}