use crate::{Blob, Error};
use commonware_utils::StableBuf;
use std::num::NonZeroUsize;
pub struct Read<B: Blob> {
blob: B,
buffer: StableBuf,
blob_position: u64,
blob_size: u64,
buffer_position: usize,
buffer_valid_len: usize,
buffer_size: usize,
}
impl<B: Blob> Read<B> {
pub fn new(blob: B, blob_size: u64, buffer_size: NonZeroUsize) -> Self {
Self {
blob,
buffer: vec![0; buffer_size.get()].into(),
blob_position: 0,
blob_size,
buffer_position: 0,
buffer_valid_len: 0,
buffer_size: buffer_size.get(),
}
}
pub fn buffer_remaining(&self) -> usize {
self.buffer_valid_len - self.buffer_position
}
pub fn blob_remaining(&self) -> u64 {
self.blob_size
.saturating_sub(self.blob_position + self.buffer_position as u64)
}
pub fn blob_size(&self) -> u64 {
self.blob_size
}
async fn refill(&mut self) -> Result<usize, Error> {
self.blob_position += self.buffer_position as u64;
self.buffer_position = 0;
self.buffer_valid_len = 0;
let blob_remaining = self.blob_size.saturating_sub(self.blob_position);
if blob_remaining == 0 {
return Err(Error::BlobInsufficientLength);
}
let bytes_to_read = std::cmp::min(self.buffer_size as u64, blob_remaining) as usize;
if bytes_to_read < self.buffer_size {
let mut tmp_buffer = vec![0u8; bytes_to_read];
tmp_buffer = self
.blob
.read_at(tmp_buffer, self.blob_position)
.await?
.into();
self.buffer.as_mut()[0..bytes_to_read].copy_from_slice(&tmp_buffer[0..bytes_to_read]);
} else {
self.buffer = self
.blob
.read_at(std::mem::take(&mut self.buffer), self.blob_position)
.await?;
}
self.buffer_valid_len = bytes_to_read;
Ok(bytes_to_read)
}
pub async fn read_exact(&mut self, buf: &mut [u8], size: usize) -> Result<(), Error> {
assert!(
size <= buf.len(),
"provided buffer is too small for requested size"
);
if (self.buffer_remaining() + self.blob_remaining() as usize) < size {
return Err(Error::BlobInsufficientLength);
}
let mut bytes_read = 0;
while bytes_read < size {
if self.buffer_position >= self.buffer_valid_len {
self.refill().await?;
}
let bytes_to_copy = std::cmp::min(size - bytes_read, self.buffer_remaining());
buf[bytes_read..(bytes_read + bytes_to_copy)].copy_from_slice(
&self.buffer.as_ref()[self.buffer_position..(self.buffer_position + bytes_to_copy)],
);
self.buffer_position += bytes_to_copy;
bytes_read += bytes_to_copy;
}
Ok(())
}
pub fn position(&self) -> u64 {
self.blob_position + self.buffer_position as u64
}
pub fn seek_to(&mut self, position: u64) -> Result<(), Error> {
if position > self.blob_size {
return Err(Error::BlobInsufficientLength);
}
let buffer_start = self.blob_position;
let buffer_end = self.blob_position + self.buffer_valid_len as u64;
if position >= buffer_start && position < buffer_end {
self.buffer_position = (position - self.blob_position) as usize;
} else {
self.blob_position = position;
self.buffer_position = 0;
self.buffer_valid_len = 0;
}
Ok(())
}
pub async fn resize(self, len: u64) -> Result<(), Error> {
self.blob.resize(len).await?;
self.blob.sync().await
}
}