use crate::Result;
use crate::error::Error;
use crate::format::{
BINTERNAL_HEADER_SIZE, BLEAF_HEADER_SIZE, NOVFLSIZE, P_BIGDATA, P_BIGKEY, P_BINTERNAL, P_BLEAF,
P_OVERFLOW, P_TYPE, PAGE_HEADER_SIZE,
};
pub(crate) struct PageView<'a> {
pub(crate) pgno: u32,
pub(crate) prev_pg: u32,
pub(crate) next_pg: u32,
bytes: &'a [u8],
swap: bool,
flags: u32,
nentries: usize,
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct LeafEntry {
pub(crate) key: ItemRef,
pub(crate) data: ItemRef,
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct InternalEntry {
pub(crate) key: ItemRef,
pub(crate) child: u32,
}
#[derive(Clone, Copy, Debug)]
pub(crate) enum ItemRef {
Inline { off: u32, len: u32 },
Overflow { pgno: u32, size: u32 },
}
impl<'a> PageView<'a> {
pub(crate) fn parse(pgno: u32, bytes: &'a [u8], swap: bool) -> Result<Self> {
if bytes.len() < PAGE_HEADER_SIZE {
return Err(Error::CorruptPage {
pgno,
reason: "page shorter than header",
});
}
let prev_pg = read_u32(bytes, 4, swap);
let next_pg = read_u32(bytes, 8, swap);
let flags = read_u32(bytes, 12, swap);
let lower = read_u16(bytes, 16, swap) as usize;
let upper = read_u16(bytes, 18, swap) as usize;
if lower < PAGE_HEADER_SIZE
|| upper > bytes.len()
|| lower > upper
|| (lower - PAGE_HEADER_SIZE) % 2 != 0
{
return Err(Error::CorruptPage {
pgno,
reason: "page bounds inconsistent",
});
}
match flags & P_TYPE {
P_BINTERNAL | P_BLEAF | P_OVERFLOW => {}
_ => {
return Err(Error::CorruptPage {
pgno,
reason: "unsupported page type",
});
}
}
Ok(Self {
pgno,
prev_pg,
next_pg,
flags,
bytes,
swap,
nentries: (lower - PAGE_HEADER_SIZE) / 2,
})
}
#[inline]
#[must_use]
pub(crate) const fn is_leaf(&self) -> bool {
self.flags & P_BLEAF != 0
}
#[inline]
#[must_use]
pub(crate) const fn is_internal(&self) -> bool {
self.flags & P_BINTERNAL != 0
}
#[inline]
#[must_use]
pub(crate) const fn is_overflow(&self) -> bool {
self.flags & P_OVERFLOW != 0
}
#[inline]
#[must_use]
pub(crate) const fn nentries(&self) -> usize {
self.nentries
}
#[inline]
#[must_use]
pub(crate) const fn overflow_payload(&self) -> &'a [u8] {
let (_, rest) = self.bytes.split_at(PAGE_HEADER_SIZE);
rest
}
pub(crate) fn inline(&self, item: ItemRef) -> Result<&'a [u8]> {
match item {
ItemRef::Inline { off, len } => {
let start = off as usize;
let end = start
.checked_add(len as usize)
.ok_or_else(|| self.corrupt("inline offset + length overflows"))?;
self.bytes
.get(start..end)
.ok_or_else(|| self.corrupt("inline offset out of range"))
}
ItemRef::Overflow { .. } => Err(self.corrupt("expected inline item, found overflow")),
}
}
pub(crate) fn leaf_entry(&self, idx: usize) -> Result<LeafEntry> {
let off = self.entry_offset(idx)?;
let ksize = self.u32_at(off)? as usize;
let dsize = self.u32_at(off + 4)? as usize;
let eflags = self.byte_at(off + 8)?;
let payload = off + BLEAF_HEADER_SIZE;
let payload_end = payload
.checked_add(ksize)
.and_then(|p| p.checked_add(dsize))
.ok_or_else(|| self.corrupt("leaf payload length overflows"))?;
if payload_end > self.bytes.len() {
return Err(self.corrupt("leaf payload out of range"));
}
let key = self.decode_item(payload, ksize, eflags & P_BIGKEY != 0)?;
let data = self.decode_item(payload + ksize, dsize, eflags & P_BIGDATA != 0)?;
Ok(LeafEntry { key, data })
}
pub(crate) fn internal_entry(&self, idx: usize) -> Result<InternalEntry> {
let off = self.entry_offset(idx)?;
let ksize = self.u32_at(off)? as usize;
let child = self.u32_at(off + 4)?;
let eflags = self.byte_at(off + 8)?;
let payload = off + BINTERNAL_HEADER_SIZE;
let payload_end = payload
.checked_add(ksize)
.ok_or_else(|| self.corrupt("internal payload length overflows"))?;
if payload_end > self.bytes.len() {
return Err(self.corrupt("internal payload out of range"));
}
let key = self.decode_item(payload, ksize, eflags & P_BIGKEY != 0)?;
Ok(InternalEntry { key, child })
}
fn entry_offset(&self, idx: usize) -> Result<usize> {
if idx >= self.nentries {
return Err(self.corrupt("entry index out of range"));
}
Ok(self.u16_at(PAGE_HEADER_SIZE + idx * 2)? as usize)
}
#[allow(clippy::cast_possible_truncation)]
fn decode_item(&self, off: usize, len: usize, big: bool) -> Result<ItemRef> {
if !big {
return Ok(ItemRef::Inline {
off: off as u32,
len: len as u32,
});
}
if len < NOVFLSIZE {
return Err(self.corrupt("overflow ref too short"));
}
let pgno = self.u32_at(off)?;
let size = self.u32_at(off + 4)?;
Ok(ItemRef::Overflow { pgno, size })
}
#[inline]
fn byte_at(&self, off: usize) -> Result<u8> {
self.bytes
.get(off)
.copied()
.ok_or_else(|| self.corrupt("byte read out of range"))
}
#[inline]
fn u16_at(&self, off: usize) -> Result<u16> {
let end = off
.checked_add(2)
.ok_or_else(|| self.corrupt("u16 offset overflows"))?;
let b = self
.bytes
.get(off..end)
.ok_or_else(|| self.corrupt("u16 read out of range"))?;
Ok(read_u16_bytes([b[0], b[1]], self.swap))
}
#[inline]
fn u32_at(&self, off: usize) -> Result<u32> {
let end = off
.checked_add(4)
.ok_or_else(|| self.corrupt("u32 offset overflows"))?;
let b = self
.bytes
.get(off..end)
.ok_or_else(|| self.corrupt("u32 read out of range"))?;
Ok(read_u32_bytes([b[0], b[1], b[2], b[3]], self.swap))
}
#[inline]
const fn corrupt(&self, reason: &'static str) -> Error {
Error::CorruptPage {
pgno: self.pgno,
reason,
}
}
}
#[inline]
fn read_u16(bytes: &[u8], off: usize, swap: bool) -> u16 {
read_u16_bytes([bytes[off], bytes[off + 1]], swap)
}
#[inline]
fn read_u32(bytes: &[u8], off: usize, swap: bool) -> u32 {
read_u32_bytes(
[bytes[off], bytes[off + 1], bytes[off + 2], bytes[off + 3]],
swap,
)
}
#[inline]
const fn read_u16_bytes(b: [u8; 2], swap: bool) -> u16 {
let v = u16::from_ne_bytes(b);
if swap { v.swap_bytes() } else { v }
}
#[inline]
const fn read_u32_bytes(b: [u8; 4], swap: bool) -> u32 {
let v = u32::from_ne_bytes(b);
if swap { v.swap_bytes() } else { v }
}