use std::collections::BTreeMap;
use std::io::Write;
use std::path::Path;
use byteorder::{BigEndian, ByteOrder};
use colored::Colorize;
use serde::Serialize;
use crate::cli::wprintln;
use crate::innodb::constants::*;
use crate::innodb::page::FilHeader;
use crate::util::fs::find_tablespace_files;
use crate::IdbError;
pub struct InfoOptions {
pub ibdata: bool,
pub lsn_check: bool,
pub datadir: Option<String>,
pub database: Option<String>,
pub table: Option<String>,
pub host: Option<String>,
pub port: Option<u16>,
pub user: Option<String>,
pub password: Option<String>,
pub defaults_file: Option<String>,
pub tablespace_map: bool,
pub json: bool,
pub page_size: Option<u32>,
pub mmap: bool,
}
#[derive(Serialize)]
struct IbdataInfoJson {
ibdata_file: String,
page_checksum: u32,
page_number: u32,
page_type: u16,
lsn: u64,
flush_lsn: u64,
space_id: u32,
#[serde(skip_serializing_if = "Option::is_none")]
redo_checkpoint_1_lsn: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
redo_checkpoint_2_lsn: Option<u64>,
}
#[derive(Serialize)]
struct LsnCheckJson {
ibdata_lsn: u64,
redo_checkpoint_lsn: u64,
in_sync: bool,
}
#[derive(Serialize)]
struct TablespaceMapJson {
datadir: String,
tablespaces: Vec<TablespaceMapEntryJson>,
}
#[derive(Serialize)]
struct TablespaceMapEntryJson {
file: String,
space_id: u32,
}
pub fn execute(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
if opts.tablespace_map {
return execute_tablespace_map(opts, writer);
}
if opts.ibdata || opts.lsn_check {
let datadir = opts.datadir.as_deref().unwrap_or("/var/lib/mysql");
let datadir_path = std::path::Path::new(datadir);
if !datadir_path.is_dir() {
return Err(IdbError::Argument(format!(
"Data directory does not exist: {}",
datadir
)));
}
if opts.ibdata {
return execute_ibdata(opts, datadir_path, writer);
}
if opts.lsn_check {
return execute_lsn_check(opts, datadir_path, writer);
}
}
#[cfg(feature = "mysql")]
{
if opts.database.is_some() || opts.table.is_some() {
return execute_table_info(opts, writer);
}
}
#[cfg(not(feature = "mysql"))]
{
if opts.database.is_some() || opts.table.is_some() {
return Err(IdbError::Argument(
"MySQL support not compiled. Rebuild with: cargo build --features mysql"
.to_string(),
));
}
}
wprintln!(writer, "Usage:")?;
wprintln!(
writer,
" inno info --ibdata -d <datadir> Read ibdata1 page 0 header"
)?;
wprintln!(
writer,
" inno info --lsn-check -d <datadir> Compare ibdata1 and redo log LSNs"
)?;
wprintln!(
writer,
" inno info --tablespace-map -d <datadir> Map .ibd files to tablespace IDs"
)?;
wprintln!(writer, " inno info -D <database> -t <table> Show table/index info (requires --features mysql)")?;
Ok(())
}
fn execute_tablespace_map(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
let datadir_str = opts.datadir.as_deref().ok_or_else(|| {
IdbError::Argument("--tablespace-map requires a data directory (-d <datadir>)".to_string())
})?;
let datadir = Path::new(datadir_str);
if !datadir.is_dir() {
return Err(IdbError::Argument(format!(
"Data directory does not exist: {}",
datadir_str
)));
}
let ibd_files = find_tablespace_files(datadir, &["ibd"], None)?;
if ibd_files.is_empty() {
if opts.json {
let result = TablespaceMapJson {
datadir: datadir_str.to_string(),
tablespaces: Vec::new(),
};
let json = serde_json::to_string_pretty(&result)
.map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
wprintln!(writer, "{}", json)?;
} else {
wprintln!(writer, "No .ibd files found in {}", datadir_str)?;
}
return Ok(());
}
let mut results: BTreeMap<String, u32> = BTreeMap::new();
for ibd_path in &ibd_files {
let path_str = ibd_path.to_string_lossy();
let mut ts = match crate::cli::open_tablespace(&path_str, opts.page_size, opts.mmap) {
Ok(t) => t,
Err(_) => continue,
};
let space_id = match ts.fsp_header() {
Some(fsp) => fsp.space_id,
None => {
match ts.read_page(0) {
Ok(page0) => {
if page0.len() >= FIL_PAGE_DATA + 4 {
BigEndian::read_u32(&page0[FIL_PAGE_DATA..])
} else {
continue;
}
}
Err(_) => continue,
}
}
};
let display_path = ibd_path
.strip_prefix(datadir)
.unwrap_or(ibd_path)
.to_string_lossy()
.to_string();
results.insert(display_path, space_id);
}
if opts.json {
let tablespaces: Vec<TablespaceMapEntryJson> = results
.iter()
.map(|(path, &space_id)| TablespaceMapEntryJson {
file: path.clone(),
space_id,
})
.collect();
let result = TablespaceMapJson {
datadir: datadir_str.to_string(),
tablespaces,
};
let json = serde_json::to_string_pretty(&result)
.map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
wprintln!(writer, "{}", json)?;
} else {
let max_path_len = results.keys().map(|p| p.len()).max().unwrap_or(4);
let header_path = "FILE";
let header_id = "SPACE_ID";
let path_width = max_path_len.max(header_path.len());
wprintln!(
writer,
"{:<width$} {}",
header_path,
header_id,
width = path_width
)?;
wprintln!(
writer,
"{:<width$} {}",
"-".repeat(path_width),
"-".repeat(header_id.len()),
width = path_width
)?;
for (path, space_id) in &results {
wprintln!(writer, "{:<width$} {}", path, space_id, width = path_width)?;
}
wprintln!(writer)?;
wprintln!(writer, "Total: {} tablespace(s)", results.len())?;
}
Ok(())
}
fn execute_ibdata(
opts: &InfoOptions,
datadir: &std::path::Path,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
let ibdata_path = datadir.join("ibdata1");
if !ibdata_path.exists() {
return Err(IdbError::Io(format!(
"ibdata1 not found in {}",
datadir.display()
)));
}
let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
let header = FilHeader::parse(&page0)
.ok_or_else(|| IdbError::Parse("Cannot parse ibdata1 page 0 FIL header".to_string()))?;
let (cp1_lsn, cp2_lsn) = read_redo_checkpoint_lsns(datadir);
if opts.json {
let info = IbdataInfoJson {
ibdata_file: ibdata_path.display().to_string(),
page_checksum: header.checksum,
page_number: header.page_number,
page_type: header.page_type.as_u16(),
lsn: header.lsn,
flush_lsn: header.flush_lsn,
space_id: header.space_id,
redo_checkpoint_1_lsn: cp1_lsn,
redo_checkpoint_2_lsn: cp2_lsn,
};
let json = serde_json::to_string_pretty(&info)
.map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
wprintln!(writer, "{}", json)?;
return Ok(());
}
wprintln!(writer, "{}", "ibdata1 Page 0 Header".bold())?;
wprintln!(writer, " File: {}", ibdata_path.display())?;
wprintln!(writer, " Checksum: {}", header.checksum)?;
wprintln!(writer, " Page No: {}", header.page_number)?;
wprintln!(
writer,
" Page Type: {} ({})",
header.page_type.as_u16(),
header.page_type.name()
)?;
wprintln!(writer, " LSN: {}", header.lsn)?;
wprintln!(writer, " Flush LSN: {}", header.flush_lsn)?;
wprintln!(writer, " Space ID: {}", header.space_id)?;
wprintln!(writer)?;
if let Some(lsn) = cp1_lsn {
wprintln!(writer, "Redo Log Checkpoint 1 LSN: {}", lsn)?;
}
if let Some(lsn) = cp2_lsn {
wprintln!(writer, "Redo Log Checkpoint 2 LSN: {}", lsn)?;
}
Ok(())
}
fn execute_lsn_check(
opts: &InfoOptions,
datadir: &std::path::Path,
writer: &mut dyn Write,
) -> Result<(), IdbError> {
let ibdata_path = datadir.join("ibdata1");
if !ibdata_path.exists() {
return Err(IdbError::Io(format!(
"ibdata1 not found in {}",
datadir.display()
)));
}
let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
let ibdata_lsn = BigEndian::read_u64(&page0[FIL_PAGE_LSN..]);
let (cp1_lsn, _cp2_lsn) = read_redo_checkpoint_lsns(datadir);
let redo_lsn = cp1_lsn.unwrap_or(0);
let in_sync = ibdata_lsn == redo_lsn;
if opts.json {
let check = LsnCheckJson {
ibdata_lsn,
redo_checkpoint_lsn: redo_lsn,
in_sync,
};
let json = serde_json::to_string_pretty(&check)
.map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
wprintln!(writer, "{}", json)?;
return Ok(());
}
wprintln!(writer, "{}", "LSN Sync Check".bold())?;
wprintln!(writer, " ibdata1 LSN: {}", ibdata_lsn)?;
wprintln!(writer, " Redo checkpoint LSN: {}", redo_lsn)?;
if in_sync {
wprintln!(writer, " Status: {}", "IN SYNC".green())?;
} else {
wprintln!(writer, " Status: {}", "OUT OF SYNC".red())?;
wprintln!(
writer,
" Difference: {} bytes",
ibdata_lsn.abs_diff(redo_lsn)
)?;
}
Ok(())
}
fn read_redo_checkpoint_lsns(datadir: &std::path::Path) -> (Option<u64>, Option<u64>) {
const CP1_OFFSET: u64 = 512 + 8;
const CP2_OFFSET: u64 = 1536 + 8;
let redo_dir = datadir.join("#innodb_redo");
if redo_dir.is_dir() {
if let Ok(entries) = std::fs::read_dir(&redo_dir) {
let mut redo_files: Vec<_> = entries
.filter_map(|e| e.ok())
.filter(|e| e.file_name().to_string_lossy().starts_with("#ib_redo"))
.collect();
redo_files.sort_by_key(|e| e.file_name());
if let Some(first) = redo_files.first() {
let path = first.path();
let cp1 = read_u64_at(&path, CP1_OFFSET);
let cp2 = read_u64_at(&path, CP2_OFFSET);
return (cp1, cp2);
}
}
}
let logfile0 = datadir.join("ib_logfile0");
if logfile0.exists() {
let cp1 = read_u64_at(&logfile0, CP1_OFFSET);
let cp2 = read_u64_at(&logfile0, CP2_OFFSET);
return (cp1, cp2);
}
(None, None)
}
fn read_file_bytes(
path: &std::path::Path,
offset: u64,
length: usize,
) -> Result<Vec<u8>, IdbError> {
use std::io::{Read, Seek, SeekFrom};
let mut file = std::fs::File::open(path)
.map_err(|e| IdbError::Io(format!("Cannot open {}: {}", path.display(), e)))?;
file.seek(SeekFrom::Start(offset))
.map_err(|e| IdbError::Io(format!("Cannot seek in {}: {}", path.display(), e)))?;
let mut buf = vec![0u8; length];
file.read_exact(&mut buf)
.map_err(|e| IdbError::Io(format!("Cannot read from {}: {}", path.display(), e)))?;
Ok(buf)
}
fn read_u64_at(path: &std::path::Path, offset: u64) -> Option<u64> {
let bytes = read_file_bytes(path, offset, 8).ok()?;
Some(BigEndian::read_u64(&bytes))
}
#[cfg(feature = "mysql")]
fn execute_table_info(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
use mysql_async::prelude::*;
let database = opts
.database
.as_deref()
.ok_or_else(|| IdbError::Argument("Database name required (-D <database>)".to_string()))?;
let table = opts
.table
.as_deref()
.ok_or_else(|| IdbError::Argument("Table name required (-t <table>)".to_string()))?;
let mut config = crate::util::mysql::MysqlConfig::default();
if let Some(ref df) = opts.defaults_file {
if let Some(parsed) = crate::util::mysql::parse_defaults_file(std::path::Path::new(df)) {
config = parsed;
}
} else if let Some(df) = crate::util::mysql::find_defaults_file() {
if let Some(parsed) = crate::util::mysql::parse_defaults_file(&df) {
config = parsed;
}
}
if let Some(ref h) = opts.host {
config.host = h.clone();
}
if let Some(p) = opts.port {
config.port = p;
}
if let Some(ref u) = opts.user {
config.user = u.clone();
}
if opts.password.is_some() {
config.password = opts.password.clone();
}
config.database = Some(database.to_string());
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.map_err(|e| IdbError::Io(format!("Cannot create async runtime: {}", e)))?;
rt.block_on(async {
let pool = mysql_async::Pool::new(config.to_opts());
let mut conn = pool
.get_conn()
.await
.map_err(|e| IdbError::Io(format!("MySQL connection failed: {}", e)))?;
let table_query = format!(
"SELECT SPACE, TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}'",
database, table
);
let table_rows: Vec<(u64, u64)> = conn
.query(&table_query)
.await
.unwrap_or_default();
if table_rows.is_empty() {
let sys_query = format!(
"SELECT SPACE, TABLE_ID FROM information_schema.innodb_sys_tables WHERE NAME = '{}/{}'",
database, table
);
let sys_rows: Vec<(u64, u64)> = conn
.query(&sys_query)
.await
.unwrap_or_default();
if sys_rows.is_empty() {
wprintln!(writer, "Table {}.{} not found in InnoDB system tables.", database, table)?;
pool.disconnect().await.ok();
return Ok(());
}
print_table_info(writer, database, table, &sys_rows)?;
} else {
print_table_info(writer, database, table, &table_rows)?;
}
let idx_query = format!(
"SELECT NAME, INDEX_ID, PAGE_NO FROM information_schema.innodb_indexes \
WHERE TABLE_ID = (SELECT TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}')",
database, table
);
let idx_rows: Vec<(String, u64, u64)> = conn
.query(&idx_query)
.await
.unwrap_or_default();
if !idx_rows.is_empty() {
wprintln!(writer)?;
wprintln!(writer, "{}", "Indexes:".bold())?;
for (name, index_id, root_page) in &idx_rows {
wprintln!(writer, " {} (index_id={}, root_page={})", name, index_id, root_page)?;
}
}
let status_rows: Vec<(String, String, String)> = conn
.query("SHOW ENGINE INNODB STATUS")
.await
.unwrap_or_default();
if let Some((_type, _name, status)) = status_rows.first() {
wprintln!(writer)?;
wprintln!(writer, "{}", "InnoDB Status:".bold())?;
for line in status.lines() {
if line.starts_with("Log sequence number") || line.starts_with("Log flushed up to") {
wprintln!(writer, " {}", line.trim())?;
}
if line.starts_with("Trx id counter") {
wprintln!(writer, " {}", line.trim())?;
}
}
}
pool.disconnect().await.ok();
Ok(())
})
}
#[cfg(feature = "mysql")]
fn print_table_info(
writer: &mut dyn Write,
database: &str,
table: &str,
rows: &[(u64, u64)],
) -> Result<(), IdbError> {
wprintln!(
writer,
"{}",
format!("Table: {}.{}", database, table).bold()
)?;
for (space_id, table_id) in rows {
wprintln!(writer, " Space ID: {}", space_id)?;
wprintln!(writer, " Table ID: {}", table_id)?;
}
Ok(())
}