use std::io::Write;
use colored::Colorize;
use serde::Serialize;
use crate::cli::{wprint, wprintln};
use crate::innodb::checksum;
use crate::innodb::compression;
use crate::innodb::encryption;
use crate::innodb::health::compute_fill_factor;
use crate::innodb::index::{FsegHeader, IndexHeader, SystemRecords};
use crate::innodb::lob::{BlobPageHeader, LobChainInfo, LobFirstPageHeader};
use crate::innodb::page::{FilHeader, FspHeader};
use crate::innodb::page_types::PageType;
use crate::innodb::record::walk_compact_records;
use crate::innodb::tablespace::Tablespace;
use crate::innodb::undo::{UndoPageHeader, UndoSegmentHeader};
use crate::util::hex::format_offset;
use crate::IdbError;
pub struct PagesOptions {
pub file: String,
pub page: Option<u64>,
pub verbose: bool,
pub show_empty: bool,
pub list_mode: bool,
pub filter_type: Option<String>,
pub page_size: Option<u32>,
pub json: bool,
pub keyring: Option<String>,
pub mmap: bool,
pub deleted: bool,
pub csv: bool,
pub lob_chain: bool,
}
#[derive(Serialize)]
struct PageDetailJson {
page_number: u64,
header: FilHeader,
page_type_name: String,
page_type_description: String,
byte_start: u64,
byte_end: u64,
#[serde(skip_serializing_if = "Option::is_none")]
index_header: Option<IndexHeader>,
#[serde(skip_serializing_if = "Option::is_none")]
fsp_header: Option<FspHeader>,
#[serde(skip_serializing_if = "Option::is_none")]
fill_factor: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
delete_marked_count: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
total_record_count: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
delete_marked_pct: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
lob_chain: Option<LobChainInfo>,
}
pub fn execute(opts: &PagesOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
let mut ts = crate::cli::open_tablespace(&opts.file, opts.page_size, opts.mmap)?;
if let Some(ref keyring_path) = opts.keyring {
crate::cli::setup_decryption(&mut ts, keyring_path)?;
}
let page_size = ts.page_size();
if opts.json {
return execute_json(opts, &mut ts, page_size, writer);
}
if opts.csv {
return execute_csv(opts, &mut ts, page_size, writer);
}
if let Some(page_num) = opts.page {
let page_data = ts.read_page(page_num)?;
print_full_page(
&page_data,
page_num,
page_size,
opts.verbose,
opts.deleted,
writer,
)?;
if opts.lob_chain {
print_lob_chain_if_applicable(&page_data, page_num, &mut ts, writer)?;
}
return Ok(());
}
if opts.filter_type.is_none() {
let page0 = ts.read_page(0)?;
if let Some(fsp) = FspHeader::parse(&page0) {
print_fsp_header_detail(&fsp, &page0, opts.verbose, ts.vendor_info(), writer)?;
}
}
for page_num in 0..ts.page_count() {
let page_data = ts.read_page(page_num)?;
let header = match FilHeader::parse(&page_data) {
Some(h) => h,
None => continue,
};
if !opts.show_empty && header.checksum == 0 && header.page_type == PageType::Allocated {
continue;
}
if let Some(ref filter) = opts.filter_type {
if !matches_page_type_filter(&header.page_type, filter) {
continue;
}
}
if opts.list_mode {
print_list_line(&page_data, page_num, page_size, opts.deleted, writer)?;
} else {
print_full_page(
&page_data,
page_num,
page_size,
opts.verbose,
opts.deleted,
writer,
)?;
if opts.lob_chain {
print_lob_chain_if_applicable(&page_data, page_num, &mut ts, writer)?;
}
}
}
Ok(())
}
fn execute_csv(
opts: &PagesOptions,
ts: &mut Tablespace,
page_size: u32,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
if opts.lob_chain {
wprintln!(
writer,
"page_number,page_type,byte_start,index_id,fill_factor,lob_chain_type,lob_chain_pages,lob_chain_total_bytes"
)?;
} else {
wprintln!(
writer,
"page_number,page_type,byte_start,index_id,fill_factor"
)?;
}
let range: Box<dyn Iterator<Item = u64>> = if let Some(p) = opts.page {
Box::new(std::iter::once(p))
} else {
Box::new(0..ts.page_count())
};
for page_num in range {
let page_data = ts.read_page(page_num)?;
let header = match FilHeader::parse(&page_data) {
Some(h) => h,
None => continue,
};
if !opts.show_empty && header.checksum == 0 && header.page_type == PageType::Allocated {
continue;
}
if let Some(ref filter) = opts.filter_type {
if !matches_page_type_filter(&header.page_type, filter) {
continue;
}
}
let pt = header.page_type;
let byte_start = page_num * page_size as u64;
let (index_id, fill_factor) = if pt == PageType::Index {
let idx = IndexHeader::parse(&page_data);
match idx {
Some(i) => {
let ff = compute_fill_factor(i.heap_top, i.garbage, page_size);
(Some(i.index_id), Some(format!("{:.4}", ff)))
}
None => (None, None),
}
} else {
(None, None)
};
if opts.lob_chain {
let (lob_type, lob_pages, lob_bytes) = if matches!(
pt,
PageType::Blob
| PageType::ZBlob
| PageType::ZBlob2
| PageType::LobFirst
| PageType::ZlobFirst
) {
match crate::innodb::lob::walk_lob_chain(ts, page_num, 10000) {
Ok(Some(chain)) => (
chain.chain_type,
chain.page_count.to_string(),
chain.total_data_len.to_string(),
),
_ => (String::new(), String::new(), String::new()),
}
} else {
(String::new(), String::new(), String::new())
};
wprintln!(
writer,
"{},{},{},{},{},{},{},{}",
page_num,
crate::cli::csv_escape(pt.name()),
byte_start,
index_id.map(|id| id.to_string()).unwrap_or_default(),
fill_factor.unwrap_or_default(),
crate::cli::csv_escape(&lob_type),
lob_pages,
lob_bytes
)?;
} else {
wprintln!(
writer,
"{},{},{},{},{}",
page_num,
crate::cli::csv_escape(pt.name()),
byte_start,
index_id.map(|id| id.to_string()).unwrap_or_default(),
fill_factor.unwrap_or_default()
)?;
}
}
Ok(())
}
fn execute_json(
opts: &PagesOptions,
ts: &mut Tablespace,
page_size: u32,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
let mut pages = Vec::new();
let range: Box<dyn Iterator<Item = u64>> = if let Some(p) = opts.page {
Box::new(std::iter::once(p))
} else {
Box::new(0..ts.page_count())
};
for page_num in range {
let page_data = ts.read_page(page_num)?;
let header = match FilHeader::parse(&page_data) {
Some(h) => h,
None => continue,
};
if !opts.show_empty && header.checksum == 0 && header.page_type == PageType::Allocated {
continue;
}
if let Some(ref filter) = opts.filter_type {
if !matches_page_type_filter(&header.page_type, filter) {
continue;
}
}
let pt = header.page_type;
let byte_start = page_num * page_size as u64;
let index_header = if pt == PageType::Index {
IndexHeader::parse(&page_data)
} else {
None
};
let fill_factor = index_header.as_ref().map(|idx| {
(compute_fill_factor(idx.heap_top, idx.garbage, page_size) * 10000.0).round() / 10000.0
});
let (delete_marked_count, total_record_count, delete_marked_pct) =
if pt == PageType::Index && opts.deleted {
let recs = walk_compact_records(&page_data);
let total = recs.len();
let deleted = recs.iter().filter(|r| r.header.delete_mark()).count();
let pct = if total > 0 {
(deleted as f64 / total as f64 * 10000.0).round() / 100.0
} else {
0.0
};
(Some(deleted), Some(total), Some(pct))
} else {
(None, None, None)
};
let fsp_header = if page_num == 0 {
FspHeader::parse(&page_data)
} else {
None
};
let lob_chain = if opts.lob_chain
&& matches!(
pt,
PageType::Blob
| PageType::ZBlob
| PageType::ZBlob2
| PageType::LobFirst
| PageType::ZlobFirst
) {
crate::innodb::lob::walk_lob_chain(ts, page_num, 10000)
.ok()
.flatten()
} else {
None
};
pages.push(PageDetailJson {
page_number: page_num,
page_type_name: pt.name().to_string(),
page_type_description: pt.description().to_string(),
byte_start,
byte_end: byte_start + page_size as u64,
header,
index_header,
fsp_header,
fill_factor,
delete_marked_count,
total_record_count,
delete_marked_pct,
lob_chain,
});
}
let json = serde_json::to_string_pretty(&pages)
.map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
wprintln!(writer, "{}", json)?;
Ok(())
}
fn print_list_line(
page_data: &[u8],
page_num: u64,
page_size: u32,
show_deleted: bool,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
let header = match FilHeader::parse(page_data) {
Some(h) => h,
None => return Ok(()),
};
let pt = header.page_type;
let byte_start = page_num * page_size as u64;
wprint!(
writer,
"-- Page {} - {}: {}",
page_num,
pt.name(),
pt.description()
)?;
if pt == PageType::Index {
if let Some(idx) = IndexHeader::parse(page_data) {
wprint!(writer, ", Index ID: {}", idx.index_id)?;
let ff = compute_fill_factor(idx.heap_top, idx.garbage, page_size);
let pct = ff * 100.0;
let fill_str = if pct >= 80.0 {
format!("{:.1}%", pct).green().to_string()
} else if pct >= 50.0 {
format!("{:.1}%", pct).yellow().to_string()
} else {
format!("{:.1}%", pct).red().to_string()
};
wprint!(writer, ", Fill: {}", fill_str)?;
if show_deleted {
let recs = walk_compact_records(page_data);
let total = recs.len();
let deleted = recs.iter().filter(|r| r.header.delete_mark()).count();
if total > 0 {
let del_pct = deleted as f64 / total as f64 * 100.0;
wprint!(writer, " (del: {}/{}, {:.1}%)", deleted, total, del_pct)?;
}
}
}
}
wprintln!(writer, ", Byte Start: {}", format_offset(byte_start))?;
Ok(())
}
fn print_full_page(
page_data: &[u8],
page_num: u64,
page_size: u32,
verbose: bool,
show_deleted: bool,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
let header = match FilHeader::parse(page_data) {
Some(h) => h,
None => {
eprintln!("Could not parse FIL header for page {}", page_num);
return Ok(());
}
};
let byte_start = page_num * page_size as u64;
let byte_end = byte_start + page_size as u64;
let pt = header.page_type;
wprintln!(writer)?;
wprintln!(writer, "=== HEADER: Page {}", header.page_number)?;
wprintln!(writer, "Byte Start: {}", format_offset(byte_start))?;
wprintln!(
writer,
"Page Type: {}\n-- {}: {} - {}",
pt.as_u16(),
pt.name(),
pt.description(),
pt.usage()
)?;
wprint!(writer, "Prev Page: ")?;
if !header.has_prev() {
wprintln!(writer, "Not used.")?;
} else {
wprintln!(writer, "{}", header.prev_page)?;
}
wprint!(writer, "Next Page: ")?;
if !header.has_next() {
wprintln!(writer, "Not used.")?;
} else {
wprintln!(writer, "{}", header.next_page)?;
}
wprintln!(writer, "LSN: {}", header.lsn)?;
wprintln!(writer, "Space ID: {}", header.space_id)?;
wprintln!(writer, "Checksum: {}", header.checksum)?;
if pt == PageType::Index {
if let Some(idx) = IndexHeader::parse(page_data) {
wprintln!(writer)?;
print_index_header(&idx, header.page_number, verbose, writer)?;
let ff = compute_fill_factor(idx.heap_top, idx.garbage, page_size);
let pct = ff * 100.0;
let fill_str = if pct >= 80.0 {
format!("{:.1}%", pct).green().to_string()
} else if pct >= 50.0 {
format!("{:.1}%", pct).yellow().to_string()
} else {
format!("{:.1}%", pct).red().to_string()
};
wprintln!(writer, "Fill Factor: {}", fill_str)?;
if show_deleted {
let recs = walk_compact_records(page_data);
let total = recs.len();
let deleted = recs.iter().filter(|r| r.header.delete_mark()).count();
wprintln!(writer)?;
wprintln!(
writer,
"=== Delete-Marked Records: Page {}",
header.page_number
)?;
wprintln!(writer, "Total Records: {}", total)?;
wprintln!(writer, "Delete-Marked: {}", deleted)?;
if total > 0 {
let del_pct = deleted as f64 / total as f64 * 100.0;
wprintln!(writer, "Delete-Marked Ratio: {:.1}%", del_pct)?;
}
}
wprintln!(writer)?;
print_fseg_headers(page_data, header.page_number, &idx, verbose, writer)?;
wprintln!(writer)?;
print_system_records(page_data, header.page_number, writer)?;
}
}
if pt == PageType::Rtree {
if let Some(info) = crate::innodb::rtree::parse_rtree_page(page_data) {
wprintln!(writer)?;
wprintln!(writer, "=== RTREE Detail: Page {}", header.page_number)?;
wprintln!(
writer,
"Level: {} ({})",
info.level,
if info.level == 0 { "leaf" } else { "non-leaf" }
)?;
wprintln!(writer, "Records: {}", info.record_count)?;
wprintln!(writer, "MBRs Extracted: {}", info.mbrs.len())?;
if let Some(ref enc) = info.enclosing_mbr {
wprintln!(
writer,
"MBR Coverage: ({:.6}, {:.6}) \u{2014} ({:.6}, {:.6})",
enc.min_x,
enc.min_y,
enc.max_x,
enc.max_y
)?;
wprintln!(writer, "MBR Area: {:.6}", enc.area())?;
}
if verbose {
for (i, mbr) in info.mbrs.iter().enumerate() {
wprintln!(
writer,
" [{:>3}] ({:.6}, {:.6}) \u{2014} ({:.6}, {:.6}) area={:.6}",
i,
mbr.min_x,
mbr.min_y,
mbr.max_x,
mbr.max_y,
mbr.area()
)?;
}
}
}
}
if matches!(pt, PageType::Blob | PageType::ZBlob | PageType::ZBlob2) {
if let Some(blob_hdr) = BlobPageHeader::parse(page_data) {
wprintln!(writer)?;
wprintln!(writer, "=== BLOB Header: Page {}", header.page_number)?;
wprintln!(writer, "Data Length: {} bytes", blob_hdr.part_len)?;
if blob_hdr.has_next() {
wprintln!(writer, "Next BLOB Page: {}", blob_hdr.next_page_no)?;
} else {
wprintln!(writer, "Next BLOB Page: None (last in chain)")?;
}
}
}
if pt == PageType::LobFirst {
if let Some(lob_hdr) = LobFirstPageHeader::parse(page_data) {
wprintln!(writer)?;
wprintln!(
writer,
"=== LOB First Page Header: Page {}",
header.page_number
)?;
wprintln!(writer, "Version: {}", lob_hdr.version)?;
wprintln!(writer, "Flags: {}", lob_hdr.flags)?;
wprintln!(writer, "Total Data Length: {} bytes", lob_hdr.data_len)?;
if lob_hdr.trx_id > 0 {
wprintln!(writer, "Transaction ID: {}", lob_hdr.trx_id)?;
}
}
}
if pt == PageType::UndoLog {
if let Some(undo_hdr) = UndoPageHeader::parse(page_data) {
wprintln!(writer)?;
wprintln!(writer, "=== UNDO Header: Page {}", header.page_number)?;
wprintln!(
writer,
"Undo Type: {} ({})",
undo_hdr.page_type.name(),
undo_hdr.page_type.name()
)?;
wprintln!(writer, "Log Start Offset: {}", undo_hdr.start)?;
wprintln!(writer, "Free Offset: {}", undo_hdr.free)?;
wprintln!(
writer,
"Used Bytes: {}",
undo_hdr.free.saturating_sub(undo_hdr.start)
)?;
if let Some(seg_hdr) = UndoSegmentHeader::parse(page_data) {
wprintln!(writer, "Segment State: {}", seg_hdr.state.name())?;
wprintln!(writer, "Last Log Offset: {}", seg_hdr.last_log)?;
}
}
}
wprintln!(writer)?;
let ps = page_size as usize;
if page_data.len() >= ps {
let trailer_offset = ps - 8;
if let Some(trailer) = crate::innodb::page::FilTrailer::parse(&page_data[trailer_offset..])
{
wprintln!(writer, "=== TRAILER: Page {}", header.page_number)?;
wprintln!(writer, "Old-style Checksum: {}", trailer.checksum)?;
wprintln!(writer, "Low 32 bits of LSN: {}", trailer.lsn_low32)?;
wprintln!(writer, "Byte End: {}", format_offset(byte_end))?;
if verbose {
let csum_result = checksum::validate_checksum(page_data, page_size, None);
let status = if csum_result.valid {
"OK".green().to_string()
} else {
"MISMATCH".red().to_string()
};
wprintln!(
writer,
"Checksum Status: {} ({:?})",
status,
csum_result.algorithm
)?;
let lsn_valid = checksum::validate_lsn(page_data, page_size);
let lsn_status = if lsn_valid {
"OK".green().to_string()
} else {
"MISMATCH".red().to_string()
};
wprintln!(writer, "LSN Consistency: {}", lsn_status)?;
}
}
}
Ok(())
}
fn print_index_header(
idx: &IndexHeader,
page_num: u32,
verbose: bool,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
wprintln!(writer, "=== INDEX Header: Page {}", page_num)?;
wprintln!(writer, "Index ID: {}", idx.index_id)?;
wprintln!(writer, "Node Level: {}", idx.level)?;
if idx.max_trx_id > 0 {
wprintln!(writer, "Max Transaction ID: {}", idx.max_trx_id)?;
} else {
wprintln!(writer, "-- Secondary Index")?;
}
wprintln!(writer, "Directory Slots: {}", idx.n_dir_slots)?;
if verbose {
wprintln!(writer, "-- Number of slots in page directory")?;
}
wprintln!(writer, "Heap Top: {}", idx.heap_top)?;
if verbose {
wprintln!(writer, "-- Pointer to record heap top")?;
}
wprintln!(writer, "Records in Page: {}", idx.n_recs)?;
wprintln!(
writer,
"Records in Heap: {} (compact: {})",
idx.n_heap(),
idx.is_compact()
)?;
if verbose {
wprintln!(writer, "-- Number of records in heap")?;
}
wprintln!(writer, "Start of Free Record List: {}", idx.free)?;
wprintln!(writer, "Garbage Bytes: {}", idx.garbage)?;
if verbose {
wprintln!(writer, "-- Number of bytes in deleted records.")?;
}
wprintln!(writer, "Last Insert: {}", idx.last_insert)?;
wprintln!(
writer,
"Last Insert Direction: {} - {}",
idx.direction,
idx.direction_name()
)?;
wprintln!(writer, "Inserts in this direction: {}", idx.n_direction)?;
if verbose {
wprintln!(
writer,
"-- Number of consecutive inserts in this direction."
)?;
}
Ok(())
}
fn print_fseg_headers(
page_data: &[u8],
page_num: u32,
idx: &IndexHeader,
verbose: bool,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
wprintln!(
writer,
"=== FSEG_HDR - File Segment Header: Page {}",
page_num
)?;
if let Some(leaf) = FsegHeader::parse_leaf(page_data) {
wprintln!(writer, "Inode Space ID: {}", leaf.space_id)?;
wprintln!(writer, "Inode Page Number: {}", leaf.page_no)?;
wprintln!(writer, "Inode Offset: {}", leaf.offset)?;
}
if idx.is_leaf() {
if let Some(internal) = FsegHeader::parse_internal(page_data) {
wprintln!(writer, "Non-leaf Space ID: {}", internal.space_id)?;
if verbose {
wprintln!(writer, "Non-leaf Page Number: {}", internal.page_no)?;
wprintln!(writer, "Non-leaf Offset: {}", internal.offset)?;
}
}
}
Ok(())
}
fn print_system_records(
page_data: &[u8],
page_num: u32,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
let sys = match SystemRecords::parse(page_data) {
Some(s) => s,
None => return Ok(()),
};
wprintln!(writer, "=== INDEX System Records: Page {}", page_num)?;
wprintln!(
writer,
"Index Record Status: {} - (Decimal: {}) {}",
sys.rec_status,
sys.rec_status,
sys.rec_status_name()
)?;
wprintln!(writer, "Number of records owned: {}", sys.n_owned)?;
wprintln!(writer, "Deleted: {}", if sys.deleted { "1" } else { "0" })?;
wprintln!(writer, "Heap Number: {}", sys.heap_no)?;
wprintln!(writer, "Next Record Offset (Infimum): {}", sys.infimum_next)?;
wprintln!(
writer,
"Next Record Offset (Supremum): {}",
sys.supremum_next
)?;
wprintln!(
writer,
"Left-most node on non-leaf level: {}",
if sys.min_rec { "1" } else { "0" }
)?;
Ok(())
}
fn print_fsp_header_detail(
fsp: &FspHeader,
page0: &[u8],
verbose: bool,
vendor_info: &crate::innodb::vendor::VendorInfo,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
wprintln!(writer, "=== File Header")?;
wprintln!(writer, "Vendor: {}", vendor_info)?;
wprintln!(writer, "Space ID: {}", fsp.space_id)?;
if verbose {
wprintln!(writer, "-- Offset 38, Length 4")?;
}
wprintln!(writer, "Size: {}", fsp.size)?;
wprintln!(writer, "Flags: {}", fsp.flags)?;
wprintln!(
writer,
"Page Free Limit: {} (this should always be 64 on a single-table file)",
fsp.free_limit
)?;
let comp = compression::detect_compression(fsp.flags, Some(vendor_info));
let enc = encryption::detect_encryption(fsp.flags, Some(vendor_info));
if comp != compression::CompressionAlgorithm::None {
wprintln!(writer, "Compression: {}", comp)?;
}
if enc != encryption::EncryptionAlgorithm::None {
wprintln!(writer, "Encryption: {}", enc)?;
if let Some(info) = encryption::parse_encryption_info(
page0,
fsp.page_size_from_flags_with_vendor(vendor_info),
) {
let version_desc = match info.magic_version {
1 => "V1",
2 => "V2",
3 => "V3 (MySQL 8.0.5+)",
_ => "Unknown",
};
wprintln!(writer, " Master Key ID: {}", info.master_key_id)?;
wprintln!(writer, " Server UUID: {}", info.server_uuid)?;
wprintln!(writer, " Magic: {}", version_desc)?;
}
}
let seg_id_offset = crate::innodb::constants::FIL_PAGE_DATA + 72;
if page0.len() >= seg_id_offset + 8 {
use byteorder::ByteOrder;
let seg_id = byteorder::BigEndian::read_u64(&page0[seg_id_offset..]);
wprintln!(writer, "First Unused Segment ID: {}", seg_id)?;
}
Ok(())
}
fn print_lob_chain_if_applicable(
page_data: &[u8],
page_num: u64,
ts: &mut Tablespace,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
use crate::innodb::lob;
let header = match FilHeader::parse(page_data) {
Some(h) => h,
None => return Ok(()),
};
let is_lob_start = matches!(
header.page_type,
PageType::Blob | PageType::ZBlob | PageType::LobFirst
);
if !is_lob_start {
return Ok(());
}
match lob::walk_lob_chain(ts, page_num, 10000) {
Ok(Some(chain)) => {
wprintln!(writer)?;
wprintln!(
writer,
"=== LOB Chain: Page {} ({}, {} pages, {} bytes total)",
page_num,
chain.chain_type,
chain.page_count,
chain.total_data_len
)?;
for (i, cp) in chain.pages.iter().enumerate() {
wprintln!(
writer,
" [{:>3}] Page {:<8} {:.<20} {} bytes",
i,
cp.page_no,
cp.page_type,
cp.data_len
)?;
}
}
Ok(None) => {}
Err(e) => {
wprintln!(writer, " LOB chain error: {}", e)?;
}
}
Ok(())
}
fn matches_page_type_filter(page_type: &PageType, filter: &str) -> bool {
let filter_upper = filter.to_uppercase();
let type_name = page_type.name();
if type_name == filter_upper {
return true;
}
match filter_upper.as_str() {
"UNDO" => *page_type == PageType::UndoLog,
"BLOB" => matches!(
page_type,
PageType::Blob | PageType::ZBlob | PageType::ZBlob2
),
"LOB" => matches!(
page_type,
PageType::LobIndex
| PageType::LobData
| PageType::LobFirst
| PageType::ZlobFirst
| PageType::ZlobData
| PageType::ZlobIndex
| PageType::ZlobFrag
| PageType::ZlobFragEntry
),
"ZLOB" => matches!(
page_type,
PageType::ZlobFirst
| PageType::ZlobData
| PageType::ZlobIndex
| PageType::ZlobFrag
| PageType::ZlobFragEntry
),
"SDI" => matches!(
page_type,
PageType::Sdi | PageType::SdiBlob | PageType::SdiZblob
),
"COMPRESSED" | "COMP" => matches!(
page_type,
PageType::Compressed
| PageType::CompressedEncrypted
| PageType::PageCompressed
| PageType::PageCompressedEncrypted
),
"ENCRYPTED" | "ENC" => matches!(
page_type,
PageType::Encrypted
| PageType::CompressedEncrypted
| PageType::EncryptedRtree
| PageType::PageCompressedEncrypted
),
"INSTANT" => *page_type == PageType::Instant,
_ => type_name.contains(&filter_upper),
}
}