use crate::cache::{get_or_create_cached_device, open_device_uncached, CachedDevice};
use crate::options::Options;
use crate::state::State;
use blkmap::{Fiemap, FiemapExtent};
use std::fs::File;
use std::io;
use std::os::unix::fs::FileExt;
use std::path::{Path, PathBuf};
use std::sync::Arc;
pub trait BlkReader {
fn blk_read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
let state = self.blk_read_at_opt(buf, offset, &Options::default())?;
Ok(state.bytes_read)
}
fn blk_read_at_opt(&self, buf: &mut [u8], offset: u64, options: &Options) -> io::Result<State>;
}
struct ReadContext<'a> {
file: &'a File,
options: &'a Options,
}
impl<'a> ReadContext<'a> {
fn new(file: &'a File, options: &'a Options) -> Self {
Self { file, options }
}
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<State> {
if buf.is_empty() {
return Ok(State::fallback(Vec::new(), 0));
}
let length = buf.len() as u64;
let extents = self.file.fiemap_range(offset, length)?;
if extents.is_empty() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"file has no extents",
));
}
if self.options.allow_fallback && self.can_use_fallback(&extents, offset, length) {
return self.fallback_read(buf, offset, extents);
}
let device = self.get_device_handle()?;
let bytes_read = self.read_from_device(&device, buf, offset, &extents)?;
Ok(State::new(
device.path().clone(),
extents,
bytes_read,
false,
))
}
fn can_use_fallback(&self, extents: &[FiemapExtent], offset: u64, length: u64) -> bool {
if extents.is_empty() {
return false;
}
let end = offset + length;
let mut current = offset;
for extent in extents {
if extent.logical > current {
return false;
}
if extent.flags.is_unwritten() {
return false;
}
if extent.flags.is_unknown() || extent.flags.is_delalloc() {
return false;
}
let extent_end = extent.logical + extent.length;
if extent_end >= end {
return true;
}
current = extent_end;
}
false
}
fn fallback_read(
&self,
buf: &mut [u8],
offset: u64,
extents: Vec<FiemapExtent>,
) -> io::Result<State> {
let bytes_read = if self.options.dry_run {
buf.len()
} else if self.options.read_exact {
self.file.read_exact_at(buf, offset)?;
buf.len()
} else {
self.file.read_at(buf, offset)?
};
Ok(State::fallback(extents, bytes_read))
}
fn get_device_handle(&self) -> io::Result<DeviceHandle> {
if self.options.enable_cache {
let cached = get_or_create_cached_device(self.file)?;
Ok(DeviceHandle::Cached(cached))
} else {
let uncached = open_device_uncached(self.file)?;
Ok(DeviceHandle::Uncached(uncached))
}
}
fn read_from_device(
&self,
device: &DeviceHandle,
buf: &mut [u8],
offset: u64,
extents: &[FiemapExtent],
) -> io::Result<usize> {
let length = buf.len() as u64;
let end = offset + length;
let mut bytes_read = 0usize;
let mut current_offset = offset;
for extent in extents {
if current_offset >= end {
break;
}
let extent_end = extent.logical + extent.length;
if extent.logical > current_offset {
let hole_end = extent.logical.min(end);
let hole_len = (hole_end - current_offset) as usize;
if !self.options.fill_holes {
return Ok(bytes_read);
}
let buf_start = bytes_read;
let buf_end = buf_start + hole_len;
buf[buf_start..buf_end].fill(0);
bytes_read += hole_len;
current_offset = hole_end;
if current_offset >= end {
break;
}
}
if extent.flags.is_unwritten() && self.options.zero_unwritten {
let read_start = current_offset.max(extent.logical);
let read_end = extent_end.min(end);
let read_len = (read_end - read_start) as usize;
let buf_start = bytes_read;
let buf_end = buf_start + read_len;
buf[buf_start..buf_end].fill(0);
bytes_read += read_len;
current_offset = read_end;
continue;
}
if extent.flags.is_unknown() || extent.flags.is_delalloc() {
let read_start = current_offset.max(extent.logical);
let read_end = extent_end.min(end);
let hole_len = (read_end - read_start) as usize;
if !self.options.fill_holes {
return Ok(bytes_read);
}
let buf_start = bytes_read;
let buf_end = buf_start + hole_len;
buf[buf_start..buf_end].fill(0);
bytes_read += hole_len;
current_offset = read_end;
continue;
}
let read_start = current_offset.max(extent.logical);
let read_end = extent_end.min(end);
let read_len = (read_end - read_start) as usize;
let physical_offset = extent.physical + (read_start - extent.logical);
let buf_start = bytes_read;
let buf_end = buf_start + read_len;
let actual_read = device.read_at(
&mut buf[buf_start..buf_end],
physical_offset,
self.options.dry_run,
)?;
bytes_read += actual_read;
current_offset = read_start + actual_read as u64;
if actual_read < read_len {
break;
}
}
if current_offset < end && self.options.fill_holes {
let remaining = (end - current_offset) as usize;
let buf_start = bytes_read;
let buf_end = buf_start + remaining;
if buf_end <= buf.len() {
buf[buf_start..buf_end].fill(0);
bytes_read += remaining;
}
}
if self.options.read_exact && bytes_read < buf.len() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"failed to fill entire buffer: expected {} bytes, got {} bytes",
buf.len(),
bytes_read
),
));
}
Ok(bytes_read)
}
}
enum DeviceHandle {
Cached(Arc<CachedDevice>),
Uncached(CachedDevice),
}
impl DeviceHandle {
fn path(&self) -> &PathBuf {
match self {
DeviceHandle::Cached(cached) => &cached.path,
DeviceHandle::Uncached(uncached) => &uncached.path,
}
}
fn read_at(&self, buf: &mut [u8], offset: u64, dry_run: bool) -> io::Result<usize> {
let file = match self {
DeviceHandle::Cached(cached) => &cached.file,
DeviceHandle::Uncached(uncached) => &uncached.file,
};
let bytes = if dry_run {
buf.len()
} else {
FileExt::read_at(file, buf, offset)?
};
Ok(bytes)
}
}
impl BlkReader for Path {
fn blk_read_at_opt(&self, buf: &mut [u8], offset: u64, options: &Options) -> io::Result<State> {
let file = File::open(self)?;
let ctx = ReadContext::new(&file, options);
ctx.read_at(buf, offset)
}
}
impl BlkReader for PathBuf {
fn blk_read_at_opt(&self, buf: &mut [u8], offset: u64, options: &Options) -> io::Result<State> {
self.as_path().blk_read_at_opt(buf, offset, options)
}
}
impl BlkReader for File {
fn blk_read_at_opt(&self, buf: &mut [u8], offset: u64, options: &Options) -> io::Result<State> {
let ctx = ReadContext::new(self, options);
ctx.read_at(buf, offset)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_options_builder() {
let opts = Options::new()
.with_cache(false)
.with_fill_holes(true)
.with_zero_unwritten(true)
.with_allow_fallback(true)
.with_read_exact(false)
.with_dry_run(true);
assert!(!opts.enable_cache);
assert!(opts.fill_holes);
assert!(opts.zero_unwritten);
assert!(opts.allow_fallback);
assert!(!opts.read_exact);
assert!(opts.dry_run);
}
#[test]
fn test_can_use_fallback() {
use blkmap::ExtentFlags;
let file = File::open("/proc/self/exe").unwrap();
let options = Options::new().with_allow_fallback(true);
let ctx = ReadContext::new(&file, &options);
assert!(!ctx.can_use_fallback(&[], 0, 100));
let extents = vec![FiemapExtent {
logical: 0,
physical: 1000,
length: 4096,
flags: ExtentFlags::empty(),
}];
assert!(ctx.can_use_fallback(&extents, 0, 100));
let extents = vec![FiemapExtent {
logical: 0,
physical: 1000,
length: 4096,
flags: ExtentFlags::UNWRITTEN,
}];
assert!(!ctx.can_use_fallback(&extents, 0, 100));
let extents = vec![FiemapExtent {
logical: 100,
physical: 1000,
length: 4096,
flags: ExtentFlags::empty(),
}];
assert!(!ctx.can_use_fallback(&extents, 0, 200));
}
#[test]
fn test_read_exact_builder() {
let opts = Options::new().with_read_exact(false);
assert!(!opts.read_exact);
let opts = opts.with_read_exact(true);
assert!(opts.read_exact);
}
}