use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::path::{Path, PathBuf};
use crate::error::{Result, SQLRiteError};
use crate::sql::pager::file::FileStorage;
use crate::sql::pager::header::{DbHeader, decode_header, encode_header};
use crate::sql::pager::page::PAGE_SIZE;
use crate::sql::pager::wal::Wal;
pub(crate) fn wal_path_for(main: &Path) -> PathBuf {
let mut os = main.as_os_str().to_owned();
os.push("-wal");
PathBuf::from(os)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AccessMode {
ReadWrite,
ReadOnly,
}
#[cfg(feature = "file-locks")]
pub(crate) fn acquire_lock(file: &File, path: &Path, mode: AccessMode) -> Result<()> {
let res = match mode {
AccessMode::ReadWrite => fs2::FileExt::try_lock_exclusive(file),
AccessMode::ReadOnly => fs2::FileExt::try_lock_shared(file),
};
res.map_err(|e| {
let how = match mode {
AccessMode::ReadWrite => {
"is in use (another process has it open; readers and writers are exclusive)"
}
AccessMode::ReadOnly => {
"is locked for writing by another process (read-only open blocked until the writer closes)"
}
};
SQLRiteError::General(format!(
"database '{}' {how} ({e})",
path.display()
))
})
}
#[cfg(not(feature = "file-locks"))]
pub(crate) fn acquire_lock(_file: &File, _path: &Path, _mode: AccessMode) -> Result<()> {
Ok(())
}
const AUTO_CHECKPOINT_THRESHOLD_FRAMES: usize = 100;
pub struct Pager {
storage: FileStorage,
current_header: DbHeader,
on_disk: HashMap<u32, Box<[u8; PAGE_SIZE]>>,
staged: HashMap<u32, Box<[u8; PAGE_SIZE]>>,
wal_cache: HashMap<u32, Box<[u8; PAGE_SIZE]>>,
wal: Option<Wal>,
access_mode: AccessMode,
}
impl Pager {
pub fn open(path: &Path) -> Result<Self> {
Self::open_with_mode(path, AccessMode::ReadWrite)
}
pub fn open_read_only(path: &Path) -> Result<Self> {
Self::open_with_mode(path, AccessMode::ReadOnly)
}
pub fn open_with_mode(path: &Path, mode: AccessMode) -> Result<Self> {
let file = match mode {
AccessMode::ReadWrite => OpenOptions::new().read(true).write(true).open(path)?,
AccessMode::ReadOnly => OpenOptions::new().read(true).open(path)?,
};
acquire_lock(&file, path, mode)?;
let mut storage = FileStorage::new(file);
let mut header = storage.read_header()?;
let mut on_disk = HashMap::with_capacity(header.page_count.saturating_sub(1) as usize);
for page_num in 1..header.page_count {
let buf = read_raw_page(&mut storage, page_num)?;
on_disk.insert(page_num, buf);
}
let wal_path = wal_path_for(path);
let (wal_handle, wal_cache) = match mode {
AccessMode::ReadWrite => {
let mut wal = if wal_path.exists() {
Wal::open_with_mode(&wal_path, mode)?
} else {
Wal::create(&wal_path)?
};
let mut cache: HashMap<u32, Box<[u8; PAGE_SIZE]>> = HashMap::new();
wal.load_committed_into(&mut cache)?;
(Some(wal), cache)
}
AccessMode::ReadOnly => {
if wal_path.exists() {
let mut wal = Wal::open_with_mode(&wal_path, mode)?;
let mut cache: HashMap<u32, Box<[u8; PAGE_SIZE]>> = HashMap::new();
wal.load_committed_into(&mut cache)?;
(Some(wal), cache)
} else {
(None, HashMap::new())
}
}
};
if let Some(page0) = wal_cache.get(&0) {
header = decode_header(page0.as_ref())?;
} else if let Some(w) = wal_handle.as_ref()
&& let Some(committed_pc) = w.last_commit_page_count()
{
header.page_count = committed_pc;
}
Ok(Self {
storage,
current_header: header,
on_disk,
staged: HashMap::new(),
wal_cache,
wal: wal_handle,
access_mode: mode,
})
}
pub fn create(path: &Path) -> Result<Self> {
use crate::sql::pager::page::{PAGE_HEADER_SIZE, PageType};
use crate::sql::pager::table_page::TablePage;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(path)?;
acquire_lock(&file, path, AccessMode::ReadWrite)?;
let mut storage = FileStorage::new(file);
let empty_master = TablePage::empty();
let mut page1 = Box::new([0u8; PAGE_SIZE]);
page1[0] = PageType::TableLeaf as u8;
page1[1..5].copy_from_slice(&0u32.to_le_bytes());
page1[5..7].copy_from_slice(&0u16.to_le_bytes());
page1[PAGE_HEADER_SIZE..].copy_from_slice(empty_master.as_bytes());
let header = DbHeader {
page_count: 2,
schema_root_page: 1,
};
storage.seek_to(0)?;
storage.write_all(&encode_header(&header))?;
storage.write_all(page1.as_ref())?;
storage.flush()?;
let wal = Wal::create(&wal_path_for(path))?;
let mut on_disk = HashMap::new();
on_disk.insert(1, page1);
Ok(Self {
storage,
current_header: header,
on_disk,
staged: HashMap::new(),
wal_cache: HashMap::new(),
wal: Some(wal),
access_mode: AccessMode::ReadWrite,
})
}
pub fn header(&self) -> DbHeader {
self.current_header
}
pub fn access_mode(&self) -> AccessMode {
self.access_mode
}
fn require_writable(&self, op: &'static str) -> Result<()> {
if self.access_mode == AccessMode::ReadOnly {
return Err(SQLRiteError::General(format!(
"cannot {op}: database is opened read-only"
)));
}
Ok(())
}
pub fn read_page(&self, page_num: u32) -> Option<&[u8; PAGE_SIZE]> {
if let Some(b) = self.staged.get(&page_num) {
return Some(b);
}
if page_num >= self.current_header.page_count {
return None;
}
if let Some(b) = self.wal_cache.get(&page_num) {
return Some(b.as_ref());
}
self.on_disk.get(&page_num).map(|b| b.as_ref())
}
pub fn stage_page(&mut self, page_num: u32, bytes: [u8; PAGE_SIZE]) {
self.staged.insert(page_num, Box::new(bytes));
}
pub fn clear_staged(&mut self) {
self.staged.clear();
}
pub fn commit(&mut self, new_header: DbHeader) -> Result<usize> {
self.require_writable("commit")?;
let wal = self
.wal
.as_mut()
.expect("read-write Pager must carry a WAL handle");
let staged = std::mem::take(&mut self.staged);
let mut dirty: Vec<(u32, Box<[u8; PAGE_SIZE]>)> = staged
.into_iter()
.filter(|(n, bytes)| {
let existing = self.wal_cache.get(n).or_else(|| self.on_disk.get(n));
match existing {
Some(e) => e.as_ref() != bytes.as_ref(),
None => true,
}
})
.collect();
dirty.sort_by_key(|(n, _)| *n);
let writes = dirty.len();
for (n, bytes) in &dirty {
wal.append_frame(*n, bytes.as_ref(), None)?;
}
let page0 = encode_header(&new_header);
wal.append_frame(0, &page0, Some(new_header.page_count))?;
let frame_count_after_commit = wal.frame_count();
for (n, bytes) in dirty {
self.wal_cache.insert(n, bytes);
}
self.wal_cache.insert(0, Box::new(page0));
self.current_header = new_header;
if frame_count_after_commit >= AUTO_CHECKPOINT_THRESHOLD_FRAMES {
self.checkpoint()?;
}
Ok(writes)
}
pub fn checkpoint(&mut self) -> Result<usize> {
self.require_writable("checkpoint")?;
let wal_frame_count = self.wal.as_ref().map(|w| w.frame_count()).unwrap_or(0);
if wal_frame_count == 0 && self.wal_cache.is_empty() {
return Ok(0);
}
let page_count = self.current_header.page_count;
let mut pages: Vec<u32> = self
.wal_cache
.keys()
.copied()
.filter(|&n| n != 0 && n < page_count)
.collect();
pages.sort_unstable();
let written = pages.len();
for page_num in &pages {
let bytes = self
.wal_cache
.get(page_num)
.expect("iterated key must resolve");
self.storage
.seek_to((*page_num as u64) * (PAGE_SIZE as u64))?;
self.storage.write_all(bytes.as_ref())?;
}
if written > 0 {
self.storage.flush()?;
}
self.storage.write_header(&self.current_header)?;
self.storage.truncate_to_pages(page_count)?;
self.storage.flush()?;
self.wal
.as_mut()
.expect("read-write Pager must carry a WAL handle")
.truncate()?;
for (n, bytes) in self.wal_cache.drain().filter(|(n, _)| *n != 0) {
if n < page_count {
self.on_disk.insert(n, bytes);
}
}
self.on_disk.retain(|&n, _| n < page_count);
Ok(written)
}
}
fn read_raw_page(storage: &mut FileStorage, page_num: u32) -> Result<Box<[u8; PAGE_SIZE]>> {
storage.seek_to((page_num as u64) * (PAGE_SIZE as u64))?;
let mut buf = Box::new([0u8; PAGE_SIZE]);
storage.read_exact(buf.as_mut())?;
Ok(buf)
}
impl std::fmt::Debug for Pager {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Pager")
.field("access_mode", &self.access_mode)
.field("page_count", &self.current_header.page_count)
.field("schema_root_page", &self.current_header.schema_root_page)
.field("cached_pages", &self.on_disk.len())
.field("staged_pages", &self.staged.len())
.field("wal_pages", &self.wal_cache.len())
.field(
"wal_frames",
&self.wal.as_ref().map(|w| w.frame_count()).unwrap_or(0),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn tmp_path(name: &str) -> std::path::PathBuf {
let mut p = std::env::temp_dir();
let pid = std::process::id();
let nanos = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos())
.unwrap_or(0);
p.push(format!("sqlrite-pager-{pid}-{nanos}-{name}.sqlrite"));
p
}
fn cleanup(path: &Path) {
let _ = std::fs::remove_file(path);
let _ = std::fs::remove_file(wal_path_for(path));
}
fn make_page(first_byte: u8) -> [u8; PAGE_SIZE] {
let mut buf = [0u8; PAGE_SIZE];
buf[0] = first_byte;
buf
}
#[test]
fn create_then_open_round_trips() {
let path = tmp_path("create_open");
{
let p = Pager::create(&path).unwrap();
assert_eq!(p.header().page_count, 2);
assert_eq!(p.header().schema_root_page, 1);
}
let p2 = Pager::open(&path).unwrap();
assert_eq!(p2.header().page_count, 2);
cleanup(&path);
}
#[test]
fn create_spawns_wal_sidecar() {
use crate::sql::pager::wal::WAL_HEADER_SIZE;
let path = tmp_path("wal_sidecar");
let _p = Pager::create(&path).unwrap();
let wal = wal_path_for(&path);
assert!(wal.exists(), "WAL sidecar should exist after create");
let len = std::fs::metadata(&wal).unwrap().len();
assert_eq!(
len, WAL_HEADER_SIZE as u64,
"fresh WAL should be header-only"
);
cleanup(&path);
}
#[test]
fn commit_writes_only_dirty_pages() {
let path = tmp_path("diff");
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0xAA));
p.stage_page(3, make_page(0xBB));
p.stage_page(4, make_page(0xCC));
let writes = p
.commit(DbHeader {
page_count: 5,
schema_root_page: 1,
})
.unwrap();
assert_eq!(writes, 3);
p.stage_page(2, make_page(0xAA));
p.stage_page(3, make_page(0xBB));
p.stage_page(4, make_page(0xDD));
let writes = p
.commit(DbHeader {
page_count: 5,
schema_root_page: 1,
})
.unwrap();
assert_eq!(writes, 1, "only the changed page should have been written");
drop(p);
let p2 = Pager::open(&path).unwrap();
assert_eq!(p2.read_page(2).unwrap()[0], 0xAA);
assert_eq!(p2.read_page(3).unwrap()[0], 0xBB);
assert_eq!(p2.read_page(4).unwrap()[0], 0xDD);
cleanup(&path);
}
#[test]
fn second_pager_on_same_file_is_rejected() {
let path = tmp_path("lock_contention");
let _first = Pager::create(&path).unwrap();
let second = Pager::open(&path);
assert!(second.is_err(), "expected lock-contention error, got Ok");
let msg = format!("{}", second.unwrap_err());
assert!(
msg.contains("in use"),
"error message should signal lock contention; got: {msg}"
);
drop(_first);
let third = Pager::open(&path);
assert!(third.is_ok(), "reopen after drop should succeed: {third:?}");
cleanup(&path);
}
#[test]
fn commit_leaves_main_file_untouched_and_shrink_hides_dropped_pages() {
let path = tmp_path("shrink");
let mut p = Pager::create(&path).unwrap();
let main_size_after_create = std::fs::metadata(&path).unwrap().len();
p.stage_page(2, make_page(1));
p.stage_page(3, make_page(2));
p.stage_page(4, make_page(3));
p.commit(DbHeader {
page_count: 5,
schema_root_page: 1,
})
.unwrap();
assert_eq!(
std::fs::metadata(&path).unwrap().len(),
main_size_after_create,
"main file must stay frozen across commits"
);
let wal_size = std::fs::metadata(wal_path_for(&path)).unwrap().len();
assert!(
wal_size > 32,
"WAL should contain frames after a commit, got size {wal_size}"
);
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
assert!(p.read_page(4).is_none());
assert_eq!(p.read_page(2).unwrap()[0], 1);
drop(p);
let p2 = Pager::open(&path).unwrap();
assert_eq!(p2.header().page_count, 3);
assert!(p2.read_page(4).is_none());
cleanup(&path);
}
#[test]
fn wal_replay_on_reopen_restores_committed_state() {
let path = tmp_path("wal_replay");
{
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0x11));
p.stage_page(3, make_page(0x22));
p.commit(DbHeader {
page_count: 4,
schema_root_page: 1,
})
.unwrap();
}
let p2 = Pager::open(&path).unwrap();
assert_eq!(p2.header().page_count, 4);
assert_eq!(p2.read_page(2).unwrap()[0], 0x11);
assert_eq!(p2.read_page(3).unwrap()[0], 0x22);
cleanup(&path);
}
#[test]
fn orphan_dirty_frame_in_wal_is_invisible_on_reopen() {
let path = tmp_path("orphan_dirty");
{
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0xCC));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
}
{
let mut w = crate::sql::pager::wal::Wal::open(&wal_path_for(&path)).unwrap();
let mut other = Box::new([0u8; PAGE_SIZE]);
other[0] = 0x99;
w.append_frame(2, &other, None).unwrap();
}
let p = Pager::open(&path).unwrap();
assert_eq!(
p.read_page(2).unwrap()[0],
0xCC,
"orphan dirty frame must not shadow the last committed page"
);
cleanup(&path);
}
#[test]
fn two_commits_only_stage_the_delta() {
let path = tmp_path("diff_delta");
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0x77));
let first = p
.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
assert_eq!(first, 1);
p.stage_page(2, make_page(0x77));
let second = p
.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
assert_eq!(second, 0, "no data frames should be re-appended");
cleanup(&path);
}
#[test]
fn explicit_checkpoint_folds_wal_into_main_file_and_truncates_wal() {
use crate::sql::pager::wal::WAL_HEADER_SIZE;
let path = tmp_path("ckpt_explicit");
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0xA1));
p.stage_page(3, make_page(0xB2));
p.commit(DbHeader {
page_count: 4,
schema_root_page: 1,
})
.unwrap();
let wal = wal_path_for(&path);
assert!(std::fs::metadata(&wal).unwrap().len() > WAL_HEADER_SIZE as u64);
let written = p.checkpoint().unwrap();
assert_eq!(written, 2, "both data pages should flush to main file");
let wal_len = std::fs::metadata(&wal).unwrap().len();
assert_eq!(wal_len, WAL_HEADER_SIZE as u64);
let main_len = std::fs::metadata(&path).unwrap().len();
assert_eq!(main_len, 4 * PAGE_SIZE as u64);
drop(p);
let p2 = Pager::open(&path).unwrap();
assert_eq!(p2.header().page_count, 4);
assert_eq!(p2.read_page(2).unwrap()[0], 0xA1);
assert_eq!(p2.read_page(3).unwrap()[0], 0xB2);
cleanup(&path);
}
#[test]
fn checkpoint_is_idempotent() {
let path = tmp_path("ckpt_idempotent");
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0x42));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
let first = p.checkpoint().unwrap();
assert_eq!(first, 1);
let second = p.checkpoint().unwrap();
assert_eq!(second, 0, "second checkpoint should be a no-op");
cleanup(&path);
}
#[test]
fn checkpoint_with_shrink_truncates_main_file() {
let path = tmp_path("ckpt_shrink");
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(1));
p.stage_page(3, make_page(2));
p.stage_page(4, make_page(3));
p.commit(DbHeader {
page_count: 5,
schema_root_page: 1,
})
.unwrap();
p.checkpoint().unwrap();
assert_eq!(
std::fs::metadata(&path).unwrap().len(),
5 * PAGE_SIZE as u64
);
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
p.checkpoint().unwrap();
assert_eq!(
std::fs::metadata(&path).unwrap().len(),
3 * PAGE_SIZE as u64,
"main file should shrink to new page_count after checkpoint"
);
assert!(p.read_page(4).is_none());
cleanup(&path);
}
#[test]
fn auto_checkpoint_fires_past_frame_threshold() {
use crate::sql::pager::wal::WAL_HEADER_SIZE;
let path = tmp_path("ckpt_auto");
let mut p = Pager::create(&path).unwrap();
let commits_needed = AUTO_CHECKPOINT_THRESHOLD_FRAMES.div_ceil(2);
for i in 0..commits_needed {
p.stage_page(2, make_page((i & 0xff) as u8));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
}
let wal_len = std::fs::metadata(wal_path_for(&path)).unwrap().len();
assert_eq!(
wal_len, WAL_HEADER_SIZE as u64,
"auto-checkpoint should have truncated the WAL"
);
let expected = ((commits_needed - 1) & 0xff) as u8;
assert_eq!(p.read_page(2).unwrap()[0], expected);
cleanup(&path);
}
#[test]
fn two_read_only_openers_coexist() {
let path = tmp_path("ro_coexist");
{
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0x55));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
}
let reader1 = Pager::open_read_only(&path).unwrap();
let reader2 = Pager::open_read_only(&path).unwrap();
assert_eq!(reader1.read_page(2).unwrap()[0], 0x55);
assert_eq!(reader2.read_page(2).unwrap()[0], 0x55);
assert_eq!(reader1.access_mode(), AccessMode::ReadOnly);
cleanup(&path);
}
#[test]
fn read_write_blocks_read_only_and_vice_versa() {
let path = tmp_path("rw_vs_ro");
let _writer = Pager::create(&path).unwrap();
let reader_attempt = Pager::open_read_only(&path);
assert!(reader_attempt.is_err());
let msg = format!("{}", reader_attempt.unwrap_err());
assert!(
msg.contains("locked for writing"),
"read-only open while writer holds lock should mention writer; got: {msg}"
);
drop(_writer);
let _reader = Pager::open_read_only(&path).unwrap();
let writer_attempt = Pager::open(&path);
assert!(writer_attempt.is_err());
let msg = format!("{}", writer_attempt.unwrap_err());
assert!(
msg.contains("in use"),
"read-write open while reader holds lock should mention contention; got: {msg}"
);
cleanup(&path);
}
#[test]
fn read_only_pager_rejects_mutations() {
let path = tmp_path("ro_rejects");
{
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0x33));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
}
let mut ro = Pager::open_read_only(&path).unwrap();
let commit_err = ro
.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap_err();
assert!(
format!("{commit_err}").contains("read-only"),
"commit on RO pager should surface 'read-only'; got: {commit_err}"
);
let ckpt_err = ro.checkpoint().unwrap_err();
assert!(
format!("{ckpt_err}").contains("read-only"),
"checkpoint on RO pager should surface 'read-only'; got: {ckpt_err}"
);
assert_eq!(ro.read_page(2).unwrap()[0], 0x33);
cleanup(&path);
}
#[test]
fn read_only_open_without_wal_sidecar_succeeds() {
let path = tmp_path("ro_no_wal");
{
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0x44));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
p.checkpoint().unwrap();
}
std::fs::remove_file(wal_path_for(&path)).unwrap();
let ro = Pager::open_read_only(&path).unwrap();
assert_eq!(ro.read_page(2).unwrap()[0], 0x44);
assert!(!wal_path_for(&path).exists());
cleanup(&path);
}
#[test]
fn reopen_after_crash_between_data_write_and_header_write_recovers_via_wal() {
use std::io::{Seek, SeekFrom, Write};
let path = tmp_path("ckpt_crash_mid_flush");
{
let mut p = Pager::create(&path).unwrap();
p.stage_page(2, make_page(0xEE));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
}
{
let mut f = std::fs::OpenOptions::new().write(true).open(&path).unwrap();
f.seek(SeekFrom::Start(2 * PAGE_SIZE as u64)).unwrap();
f.write_all(&make_page(0xEE)).unwrap();
f.sync_all().unwrap();
}
let p2 = Pager::open(&path).unwrap();
assert_eq!(p2.header().page_count, 3);
assert_eq!(p2.read_page(2).unwrap()[0], 0xEE);
cleanup(&path);
}
#[test]
fn auto_checkpoint_crosses_threshold_mid_loop() {
let path = tmp_path("ckpt_threshold_crossing");
let mut p = Pager::create(&path).unwrap();
let commits_to_cross = AUTO_CHECKPOINT_THRESHOLD_FRAMES.div_ceil(2);
for i in 0..commits_to_cross - 1 {
p.stage_page(2, make_page((i & 0xff) as u8));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
}
let pre = std::fs::metadata(wal_path_for(&path)).unwrap().len();
assert!(
pre > crate::sql::pager::wal::WAL_HEADER_SIZE as u64,
"WAL should still carry frames right before the crossing commit"
);
p.stage_page(2, make_page(0xff));
p.commit(DbHeader {
page_count: 3,
schema_root_page: 1,
})
.unwrap();
let post = std::fs::metadata(wal_path_for(&path)).unwrap().len();
assert_eq!(
post,
crate::sql::pager::wal::WAL_HEADER_SIZE as u64,
"WAL must be header-only right after the threshold-crossing commit"
);
cleanup(&path);
}
}