1use std::io::Write;
2
3use byteorder::{BigEndian, ByteOrder};
4use colored::Colorize;
5use serde::Serialize;
6
7use crate::cli::wprintln;
8use crate::innodb::constants::*;
9use crate::innodb::page::FilHeader;
10use crate::IdbError;
11
12pub struct InfoOptions {
14 pub ibdata: bool,
16 pub lsn_check: bool,
18 pub datadir: Option<String>,
20 pub database: Option<String>,
22 pub table: Option<String>,
24 pub host: Option<String>,
26 pub port: Option<u16>,
28 pub user: Option<String>,
30 pub password: Option<String>,
32 pub defaults_file: Option<String>,
34 pub json: bool,
36 pub page_size: Option<u32>,
38}
39
40#[derive(Serialize)]
41struct IbdataInfoJson {
42 ibdata_file: String,
43 page_checksum: u32,
44 page_number: u32,
45 page_type: u16,
46 lsn: u64,
47 flush_lsn: u64,
48 space_id: u32,
49 #[serde(skip_serializing_if = "Option::is_none")]
50 redo_checkpoint_1_lsn: Option<u64>,
51 #[serde(skip_serializing_if = "Option::is_none")]
52 redo_checkpoint_2_lsn: Option<u64>,
53}
54
55#[derive(Serialize)]
56struct LsnCheckJson {
57 ibdata_lsn: u64,
58 redo_checkpoint_lsn: u64,
59 in_sync: bool,
60}
61
62pub fn execute(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
85 if opts.ibdata || opts.lsn_check {
86 let datadir = opts.datadir.as_deref().unwrap_or("/var/lib/mysql");
87 let datadir_path = std::path::Path::new(datadir);
88
89 if !datadir_path.is_dir() {
90 return Err(IdbError::Argument(format!(
91 "Data directory does not exist: {}",
92 datadir
93 )));
94 }
95
96 if opts.ibdata {
97 return execute_ibdata(opts, datadir_path, writer);
98 }
99 if opts.lsn_check {
100 return execute_lsn_check(opts, datadir_path, writer);
101 }
102 }
103
104 #[cfg(feature = "mysql")]
105 {
106 if opts.database.is_some() || opts.table.is_some() {
107 return execute_table_info(opts, writer);
108 }
109 }
110
111 #[cfg(not(feature = "mysql"))]
112 {
113 if opts.database.is_some() || opts.table.is_some() {
114 return Err(IdbError::Argument(
115 "MySQL support not compiled. Rebuild with: cargo build --features mysql".to_string(),
116 ));
117 }
118 }
119
120 wprintln!(writer, "Usage:")?;
122 wprintln!(writer, " idb info --ibdata -d <datadir> Read ibdata1 page 0 header")?;
123 wprintln!(writer, " idb info --lsn-check -d <datadir> Compare ibdata1 and redo log LSNs")?;
124 wprintln!(writer, " idb info -D <database> -t <table> Show table/index info (requires --features mysql)")?;
125 Ok(())
126}
127
128fn execute_ibdata(opts: &InfoOptions, datadir: &std::path::Path, writer: &mut dyn Write) -> Result<(), IdbError> {
129 let ibdata_path = datadir.join("ibdata1");
130 if !ibdata_path.exists() {
131 return Err(IdbError::Io(format!(
132 "ibdata1 not found in {}",
133 datadir.display()
134 )));
135 }
136
137 let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
139 let header = FilHeader::parse(&page0).ok_or_else(|| {
140 IdbError::Parse("Cannot parse ibdata1 page 0 FIL header".to_string())
141 })?;
142
143 let (cp1_lsn, cp2_lsn) = read_redo_checkpoint_lsns(datadir);
145
146 if opts.json {
147 let info = IbdataInfoJson {
148 ibdata_file: ibdata_path.display().to_string(),
149 page_checksum: header.checksum,
150 page_number: header.page_number,
151 page_type: header.page_type.as_u16(),
152 lsn: header.lsn,
153 flush_lsn: header.flush_lsn,
154 space_id: header.space_id,
155 redo_checkpoint_1_lsn: cp1_lsn,
156 redo_checkpoint_2_lsn: cp2_lsn,
157 };
158 let json = serde_json::to_string_pretty(&info)
159 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
160 wprintln!(writer, "{}", json)?;
161 return Ok(());
162 }
163
164 wprintln!(writer, "{}", "ibdata1 Page 0 Header".bold())?;
165 wprintln!(writer, " File: {}", ibdata_path.display())?;
166 wprintln!(writer, " Checksum: {}", header.checksum)?;
167 wprintln!(writer, " Page No: {}", header.page_number)?;
168 wprintln!(writer, " Page Type: {} ({})", header.page_type.as_u16(), header.page_type.name())?;
169 wprintln!(writer, " LSN: {}", header.lsn)?;
170 wprintln!(writer, " Flush LSN: {}", header.flush_lsn)?;
171 wprintln!(writer, " Space ID: {}", header.space_id)?;
172 wprintln!(writer)?;
173
174 if let Some(lsn) = cp1_lsn {
175 wprintln!(writer, "Redo Log Checkpoint 1 LSN: {}", lsn)?;
176 }
177 if let Some(lsn) = cp2_lsn {
178 wprintln!(writer, "Redo Log Checkpoint 2 LSN: {}", lsn)?;
179 }
180
181 Ok(())
182}
183
184fn execute_lsn_check(opts: &InfoOptions, datadir: &std::path::Path, writer: &mut dyn Write) -> Result<(), IdbError> {
185 let ibdata_path = datadir.join("ibdata1");
186 if !ibdata_path.exists() {
187 return Err(IdbError::Io(format!(
188 "ibdata1 not found in {}",
189 datadir.display()
190 )));
191 }
192
193 let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
195 let ibdata_lsn = BigEndian::read_u64(&page0[FIL_PAGE_LSN..]);
196
197 let (cp1_lsn, _cp2_lsn) = read_redo_checkpoint_lsns(datadir);
199
200 let redo_lsn = cp1_lsn.unwrap_or(0);
201 let in_sync = ibdata_lsn == redo_lsn;
202
203 if opts.json {
204 let check = LsnCheckJson {
205 ibdata_lsn,
206 redo_checkpoint_lsn: redo_lsn,
207 in_sync,
208 };
209 let json = serde_json::to_string_pretty(&check)
210 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
211 wprintln!(writer, "{}", json)?;
212 return Ok(());
213 }
214
215 wprintln!(writer, "{}", "LSN Sync Check".bold())?;
216 wprintln!(writer, " ibdata1 LSN: {}", ibdata_lsn)?;
217 wprintln!(writer, " Redo checkpoint LSN: {}", redo_lsn)?;
218
219 if in_sync {
220 wprintln!(writer, " Status: {}", "IN SYNC".green())?;
221 } else {
222 wprintln!(writer, " Status: {}", "OUT OF SYNC".red())?;
223 wprintln!(
224 writer,
225 " Difference: {} bytes",
226 ibdata_lsn.abs_diff(redo_lsn)
227 )?;
228 }
229
230 Ok(())
231}
232
233fn read_redo_checkpoint_lsns(datadir: &std::path::Path) -> (Option<u64>, Option<u64>) {
238 const CP1_OFFSET: u64 = 512 + 8;
241 const CP2_OFFSET: u64 = 1536 + 8;
242
243 let redo_dir = datadir.join("#innodb_redo");
245 if redo_dir.is_dir() {
246 if let Ok(entries) = std::fs::read_dir(&redo_dir) {
248 let mut redo_files: Vec<_> = entries
249 .filter_map(|e| e.ok())
250 .filter(|e| {
251 e.file_name()
252 .to_string_lossy()
253 .starts_with("#ib_redo")
254 })
255 .collect();
256 redo_files.sort_by_key(|e| e.file_name());
257 if let Some(first) = redo_files.first() {
258 let path = first.path();
259 let cp1 = read_u64_at(&path, CP1_OFFSET);
260 let cp2 = read_u64_at(&path, CP2_OFFSET);
261 return (cp1, cp2);
262 }
263 }
264 }
265
266 let logfile0 = datadir.join("ib_logfile0");
268 if logfile0.exists() {
269 let cp1 = read_u64_at(&logfile0, CP1_OFFSET);
270 let cp2 = read_u64_at(&logfile0, CP2_OFFSET);
271 return (cp1, cp2);
272 }
273
274 (None, None)
275}
276
277fn read_file_bytes(
278 path: &std::path::Path,
279 offset: u64,
280 length: usize,
281) -> Result<Vec<u8>, IdbError> {
282 use std::io::{Read, Seek, SeekFrom};
283
284 let mut file = std::fs::File::open(path)
285 .map_err(|e| IdbError::Io(format!("Cannot open {}: {}", path.display(), e)))?;
286
287 file.seek(SeekFrom::Start(offset))
288 .map_err(|e| IdbError::Io(format!("Cannot seek in {}: {}", path.display(), e)))?;
289
290 let mut buf = vec![0u8; length];
291 file.read_exact(&mut buf)
292 .map_err(|e| IdbError::Io(format!("Cannot read from {}: {}", path.display(), e)))?;
293
294 Ok(buf)
295}
296
297fn read_u64_at(path: &std::path::Path, offset: u64) -> Option<u64> {
298 let bytes = read_file_bytes(path, offset, 8).ok()?;
299 Some(BigEndian::read_u64(&bytes))
300}
301
302#[cfg(feature = "mysql")]
304fn execute_table_info(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
305 use mysql_async::prelude::*;
306
307 let database = opts.database.as_deref().ok_or_else(|| {
308 IdbError::Argument("Database name required (-D <database>)".to_string())
309 })?;
310 let table = opts.table.as_deref().ok_or_else(|| {
311 IdbError::Argument("Table name required (-t <table>)".to_string())
312 })?;
313
314 let mut config = crate::util::mysql::MysqlConfig::default();
316
317 if let Some(ref df) = opts.defaults_file {
319 if let Some(parsed) = crate::util::mysql::parse_defaults_file(std::path::Path::new(df)) {
320 config = parsed;
321 }
322 } else if let Some(df) = crate::util::mysql::find_defaults_file() {
323 if let Some(parsed) = crate::util::mysql::parse_defaults_file(&df) {
324 config = parsed;
325 }
326 }
327
328 if let Some(ref h) = opts.host {
330 config.host = h.clone();
331 }
332 if let Some(p) = opts.port {
333 config.port = p;
334 }
335 if let Some(ref u) = opts.user {
336 config.user = u.clone();
337 }
338 if opts.password.is_some() {
339 config.password = opts.password.clone();
340 }
341 config.database = Some(database.to_string());
342
343 let rt = tokio::runtime::Builder::new_current_thread()
344 .enable_all()
345 .build()
346 .map_err(|e| IdbError::Io(format!("Cannot create async runtime: {}", e)))?;
347
348 rt.block_on(async {
349 let pool = mysql_async::Pool::new(config.to_opts());
350 let mut conn = pool
351 .get_conn()
352 .await
353 .map_err(|e| IdbError::Io(format!("MySQL connection failed: {}", e)))?;
354
355 let table_query = format!(
357 "SELECT SPACE, TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}'",
358 database, table
359 );
360 let table_rows: Vec<(u64, u64)> = conn
361 .query(&table_query)
362 .await
363 .unwrap_or_default();
364
365 if table_rows.is_empty() {
366 let sys_query = format!(
368 "SELECT SPACE, TABLE_ID FROM information_schema.innodb_sys_tables WHERE NAME = '{}/{}'",
369 database, table
370 );
371 let sys_rows: Vec<(u64, u64)> = conn
372 .query(&sys_query)
373 .await
374 .unwrap_or_default();
375
376 if sys_rows.is_empty() {
377 wprintln!(writer, "Table {}.{} not found in InnoDB system tables.", database, table)?;
378 pool.disconnect().await.ok();
379 return Ok(());
380 }
381
382 print_table_info(writer, database, table, &sys_rows)?;
383 } else {
384 print_table_info(writer, database, table, &table_rows)?;
385 }
386
387 let idx_query = format!(
389 "SELECT NAME, INDEX_ID, PAGE_NO FROM information_schema.innodb_indexes \
390 WHERE TABLE_ID = (SELECT TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}')",
391 database, table
392 );
393 let idx_rows: Vec<(String, u64, u64)> = conn
394 .query(&idx_query)
395 .await
396 .unwrap_or_default();
397
398 if !idx_rows.is_empty() {
399 wprintln!(writer)?;
400 wprintln!(writer, "{}", "Indexes:".bold())?;
401 for (name, index_id, root_page) in &idx_rows {
402 wprintln!(writer, " {} (index_id={}, root_page={})", name, index_id, root_page)?;
403 }
404 }
405
406 let status_rows: Vec<(String, String, String)> = conn
408 .query("SHOW ENGINE INNODB STATUS")
409 .await
410 .unwrap_or_default();
411
412 if let Some((_type, _name, status)) = status_rows.first() {
413 wprintln!(writer)?;
414 wprintln!(writer, "{}", "InnoDB Status:".bold())?;
415 for line in status.lines() {
416 if line.starts_with("Log sequence number") || line.starts_with("Log flushed up to") {
417 wprintln!(writer, " {}", line.trim())?;
418 }
419 if line.starts_with("Trx id counter") {
420 wprintln!(writer, " {}", line.trim())?;
421 }
422 }
423 }
424
425 pool.disconnect().await.ok();
426 Ok(())
427 })
428}
429
430#[cfg(feature = "mysql")]
431fn print_table_info(writer: &mut dyn Write, database: &str, table: &str, rows: &[(u64, u64)]) -> Result<(), IdbError> {
432 wprintln!(writer, "{}", format!("Table: {}.{}", database, table).bold())?;
433 for (space_id, table_id) in rows {
434 wprintln!(writer, " Space ID: {}", space_id)?;
435 wprintln!(writer, " Table ID: {}", table_id)?;
436 }
437 Ok(())
438}