sqjson 0.1.9

A simple JSON-based embedded database
Documentation
use crate::{error::DbError, file::open_and_resize, util::PAGE_SIZE};
use memmap2::MmapMut;
use std::fs::File;

pub struct Pager {
    mmap: MmapMut,
    file: File,
}

impl Pager {
    pub fn new(path: &str) -> Result<Self, DbError> {
        let file = open_and_resize(path)?;
        let mmap = unsafe { MmapMut::map_mut(&file)? };
        Ok(Self { mmap, file })
    }

    fn ensure_capacity(&mut self, required_end: usize) -> Result<(), DbError> {
        if required_end <= self.mmap.len() {
            return Ok(());
        }

        let current_len = self.file.metadata()?.len() as usize;
        let mut new_len = current_len.max(PAGE_SIZE * 100);

        // Grow by chunks of 100 pages until we have enough
        let grow_chunk = PAGE_SIZE * 100;
        while new_len < required_end {
            new_len += grow_chunk;
        }

        self.file.set_len(new_len as u64)?;
        // Remap after growth
        self.mmap = unsafe { MmapMut::map_mut(&self.file)? };
        Ok(())
    }

    pub fn get_page(&self, page_id: u32) -> Result<&[u8], DbError> {
        let offset = (page_id as usize) * PAGE_SIZE;
        let end = offset + PAGE_SIZE;
        self.mmap.get(offset..end).ok_or(DbError::PageOutOfBounds)
    }

    pub fn write_page(&mut self, page_id: u32, data: &[u8]) -> Result<(), DbError> {
        if data.len() > PAGE_SIZE {
            return Err(DbError::Other("Data too large for page".into()));
        }

        let offset = (page_id as usize) * PAGE_SIZE;
        let end = offset + data.len();

        // Ensure we have enough capacity for this write
        self.ensure_capacity(offset + PAGE_SIZE)?;

        self.mmap[offset..end].copy_from_slice(data);
        Ok(())
    }

    pub fn flush(&mut self) -> Result<(), DbError> {
        self.mmap.flush()?;
        Ok(())
    }
}