1use std::collections::BTreeMap;
2use std::io::Write;
3use std::path::Path;
4
5use byteorder::{BigEndian, ByteOrder};
6use colored::Colorize;
7use serde::Serialize;
8
9use crate::cli::wprintln;
10use crate::innodb::constants::*;
11use crate::innodb::page::FilHeader;
12use crate::util::fs::find_tablespace_files;
13use crate::IdbError;
14
15pub struct InfoOptions {
17 pub ibdata: bool,
19 pub lsn_check: bool,
21 pub datadir: Option<String>,
23 pub database: Option<String>,
25 pub table: Option<String>,
27 pub host: Option<String>,
29 pub port: Option<u16>,
31 pub user: Option<String>,
33 pub password: Option<String>,
35 pub defaults_file: Option<String>,
37 pub tablespace_map: bool,
39 pub json: bool,
41 pub page_size: Option<u32>,
43 pub mmap: bool,
45}
46
47#[derive(Serialize)]
48struct IbdataInfoJson {
49 ibdata_file: String,
50 page_checksum: u32,
51 page_number: u32,
52 page_type: u16,
53 lsn: u64,
54 flush_lsn: u64,
55 space_id: u32,
56 #[serde(skip_serializing_if = "Option::is_none")]
57 redo_checkpoint_1_lsn: Option<u64>,
58 #[serde(skip_serializing_if = "Option::is_none")]
59 redo_checkpoint_2_lsn: Option<u64>,
60}
61
62#[derive(Serialize)]
63struct LsnCheckJson {
64 ibdata_lsn: u64,
65 redo_checkpoint_lsn: u64,
66 in_sync: bool,
67}
68
69#[derive(Serialize)]
70struct TablespaceMapJson {
71 datadir: String,
72 tablespaces: Vec<TablespaceMapEntryJson>,
73}
74
75#[derive(Serialize)]
76struct TablespaceMapEntryJson {
77 file: String,
78 space_id: u32,
79}
80
81pub fn execute(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
107 if opts.tablespace_map {
108 return execute_tablespace_map(opts, writer);
109 }
110
111 if opts.ibdata || opts.lsn_check {
112 let datadir = opts.datadir.as_deref().unwrap_or("/var/lib/mysql");
113 let datadir_path = std::path::Path::new(datadir);
114
115 if !datadir_path.is_dir() {
116 return Err(IdbError::Argument(format!(
117 "Data directory does not exist: {}",
118 datadir
119 )));
120 }
121
122 if opts.ibdata {
123 return execute_ibdata(opts, datadir_path, writer);
124 }
125 if opts.lsn_check {
126 return execute_lsn_check(opts, datadir_path, writer);
127 }
128 }
129
130 #[cfg(feature = "mysql")]
131 {
132 if opts.database.is_some() || opts.table.is_some() {
133 return execute_table_info(opts, writer);
134 }
135 }
136
137 #[cfg(not(feature = "mysql"))]
138 {
139 if opts.database.is_some() || opts.table.is_some() {
140 return Err(IdbError::Argument(
141 "MySQL support not compiled. Rebuild with: cargo build --features mysql"
142 .to_string(),
143 ));
144 }
145 }
146
147 wprintln!(writer, "Usage:")?;
149 wprintln!(
150 writer,
151 " inno info --ibdata -d <datadir> Read ibdata1 page 0 header"
152 )?;
153 wprintln!(
154 writer,
155 " inno info --lsn-check -d <datadir> Compare ibdata1 and redo log LSNs"
156 )?;
157 wprintln!(
158 writer,
159 " inno info --tablespace-map -d <datadir> Map .ibd files to tablespace IDs"
160 )?;
161 wprintln!(writer, " inno info -D <database> -t <table> Show table/index info (requires --features mysql)")?;
162 Ok(())
163}
164
165fn execute_tablespace_map(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
166 let datadir_str = opts.datadir.as_deref().ok_or_else(|| {
167 IdbError::Argument("--tablespace-map requires a data directory (-d <datadir>)".to_string())
168 })?;
169 let datadir = Path::new(datadir_str);
170 if !datadir.is_dir() {
171 return Err(IdbError::Argument(format!(
172 "Data directory does not exist: {}",
173 datadir_str
174 )));
175 }
176
177 let ibd_files = find_tablespace_files(datadir, &["ibd"], None)?;
178
179 if ibd_files.is_empty() {
180 if opts.json {
181 let result = TablespaceMapJson {
182 datadir: datadir_str.to_string(),
183 tablespaces: Vec::new(),
184 };
185 let json = serde_json::to_string_pretty(&result)
186 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
187 wprintln!(writer, "{}", json)?;
188 } else {
189 wprintln!(writer, "No .ibd files found in {}", datadir_str)?;
190 }
191 return Ok(());
192 }
193
194 let mut results: BTreeMap<String, u32> = BTreeMap::new();
196
197 for ibd_path in &ibd_files {
198 let path_str = ibd_path.to_string_lossy();
199 let mut ts = match crate::cli::open_tablespace(&path_str, opts.page_size, opts.mmap) {
200 Ok(t) => t,
201 Err(_) => continue,
202 };
203
204 let space_id = match ts.fsp_header() {
205 Some(fsp) => fsp.space_id,
206 None => {
207 match ts.read_page(0) {
209 Ok(page0) => {
210 if page0.len() >= FIL_PAGE_DATA + 4 {
211 BigEndian::read_u32(&page0[FIL_PAGE_DATA..])
212 } else {
213 continue;
214 }
215 }
216 Err(_) => continue,
217 }
218 }
219 };
220
221 let display_path = ibd_path
222 .strip_prefix(datadir)
223 .unwrap_or(ibd_path)
224 .to_string_lossy()
225 .to_string();
226
227 results.insert(display_path, space_id);
228 }
229
230 if opts.json {
231 let tablespaces: Vec<TablespaceMapEntryJson> = results
232 .iter()
233 .map(|(path, &space_id)| TablespaceMapEntryJson {
234 file: path.clone(),
235 space_id,
236 })
237 .collect();
238
239 let result = TablespaceMapJson {
240 datadir: datadir_str.to_string(),
241 tablespaces,
242 };
243
244 let json = serde_json::to_string_pretty(&result)
245 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
246 wprintln!(writer, "{}", json)?;
247 } else {
248 let max_path_len = results.keys().map(|p| p.len()).max().unwrap_or(4);
250 let header_path = "FILE";
251 let header_id = "SPACE_ID";
252 let path_width = max_path_len.max(header_path.len());
253
254 wprintln!(
255 writer,
256 "{:<width$} {}",
257 header_path,
258 header_id,
259 width = path_width
260 )?;
261 wprintln!(
262 writer,
263 "{:<width$} {}",
264 "-".repeat(path_width),
265 "-".repeat(header_id.len()),
266 width = path_width
267 )?;
268
269 for (path, space_id) in &results {
270 wprintln!(writer, "{:<width$} {}", path, space_id, width = path_width)?;
271 }
272
273 wprintln!(writer)?;
274 wprintln!(writer, "Total: {} tablespace(s)", results.len())?;
275 }
276
277 Ok(())
278}
279
280fn execute_ibdata(
281 opts: &InfoOptions,
282 datadir: &std::path::Path,
283 writer: &mut dyn Write,
284) -> Result<(), IdbError> {
285 let ibdata_path = datadir.join("ibdata1");
286 if !ibdata_path.exists() {
287 return Err(IdbError::Io(format!(
288 "ibdata1 not found in {}",
289 datadir.display()
290 )));
291 }
292
293 let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
295 let header = FilHeader::parse(&page0)
296 .ok_or_else(|| IdbError::Parse("Cannot parse ibdata1 page 0 FIL header".to_string()))?;
297
298 let (cp1_lsn, cp2_lsn) = read_redo_checkpoint_lsns(datadir);
300
301 if opts.json {
302 let info = IbdataInfoJson {
303 ibdata_file: ibdata_path.display().to_string(),
304 page_checksum: header.checksum,
305 page_number: header.page_number,
306 page_type: header.page_type.as_u16(),
307 lsn: header.lsn,
308 flush_lsn: header.flush_lsn,
309 space_id: header.space_id,
310 redo_checkpoint_1_lsn: cp1_lsn,
311 redo_checkpoint_2_lsn: cp2_lsn,
312 };
313 let json = serde_json::to_string_pretty(&info)
314 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
315 wprintln!(writer, "{}", json)?;
316 return Ok(());
317 }
318
319 wprintln!(writer, "{}", "ibdata1 Page 0 Header".bold())?;
320 wprintln!(writer, " File: {}", ibdata_path.display())?;
321 wprintln!(writer, " Checksum: {}", header.checksum)?;
322 wprintln!(writer, " Page No: {}", header.page_number)?;
323 wprintln!(
324 writer,
325 " Page Type: {} ({})",
326 header.page_type.as_u16(),
327 header.page_type.name()
328 )?;
329 wprintln!(writer, " LSN: {}", header.lsn)?;
330 wprintln!(writer, " Flush LSN: {}", header.flush_lsn)?;
331 wprintln!(writer, " Space ID: {}", header.space_id)?;
332 wprintln!(writer)?;
333
334 if let Some(lsn) = cp1_lsn {
335 wprintln!(writer, "Redo Log Checkpoint 1 LSN: {}", lsn)?;
336 }
337 if let Some(lsn) = cp2_lsn {
338 wprintln!(writer, "Redo Log Checkpoint 2 LSN: {}", lsn)?;
339 }
340
341 Ok(())
342}
343
344fn execute_lsn_check(
345 opts: &InfoOptions,
346 datadir: &std::path::Path,
347 writer: &mut dyn Write,
348) -> Result<(), IdbError> {
349 let ibdata_path = datadir.join("ibdata1");
350 if !ibdata_path.exists() {
351 return Err(IdbError::Io(format!(
352 "ibdata1 not found in {}",
353 datadir.display()
354 )));
355 }
356
357 let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
359 let ibdata_lsn = BigEndian::read_u64(&page0[FIL_PAGE_LSN..]);
360
361 let (cp1_lsn, _cp2_lsn) = read_redo_checkpoint_lsns(datadir);
363
364 let redo_lsn = cp1_lsn.unwrap_or(0);
365 let in_sync = ibdata_lsn == redo_lsn;
366
367 if opts.json {
368 let check = LsnCheckJson {
369 ibdata_lsn,
370 redo_checkpoint_lsn: redo_lsn,
371 in_sync,
372 };
373 let json = serde_json::to_string_pretty(&check)
374 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
375 wprintln!(writer, "{}", json)?;
376 return Ok(());
377 }
378
379 wprintln!(writer, "{}", "LSN Sync Check".bold())?;
380 wprintln!(writer, " ibdata1 LSN: {}", ibdata_lsn)?;
381 wprintln!(writer, " Redo checkpoint LSN: {}", redo_lsn)?;
382
383 if in_sync {
384 wprintln!(writer, " Status: {}", "IN SYNC".green())?;
385 } else {
386 wprintln!(writer, " Status: {}", "OUT OF SYNC".red())?;
387 wprintln!(
388 writer,
389 " Difference: {} bytes",
390 ibdata_lsn.abs_diff(redo_lsn)
391 )?;
392 }
393
394 Ok(())
395}
396
397fn read_redo_checkpoint_lsns(datadir: &std::path::Path) -> (Option<u64>, Option<u64>) {
402 const CP1_OFFSET: u64 = 512 + 8;
405 const CP2_OFFSET: u64 = 1536 + 8;
406
407 let redo_dir = datadir.join("#innodb_redo");
409 if redo_dir.is_dir() {
410 if let Ok(entries) = std::fs::read_dir(&redo_dir) {
412 let mut redo_files: Vec<_> = entries
413 .filter_map(|e| e.ok())
414 .filter(|e| e.file_name().to_string_lossy().starts_with("#ib_redo"))
415 .collect();
416 redo_files.sort_by_key(|e| e.file_name());
417 if let Some(first) = redo_files.first() {
418 let path = first.path();
419 let cp1 = read_u64_at(&path, CP1_OFFSET);
420 let cp2 = read_u64_at(&path, CP2_OFFSET);
421 return (cp1, cp2);
422 }
423 }
424 }
425
426 let logfile0 = datadir.join("ib_logfile0");
428 if logfile0.exists() {
429 let cp1 = read_u64_at(&logfile0, CP1_OFFSET);
430 let cp2 = read_u64_at(&logfile0, CP2_OFFSET);
431 return (cp1, cp2);
432 }
433
434 (None, None)
435}
436
437fn read_file_bytes(
438 path: &std::path::Path,
439 offset: u64,
440 length: usize,
441) -> Result<Vec<u8>, IdbError> {
442 use std::io::{Read, Seek, SeekFrom};
443
444 let mut file = std::fs::File::open(path)
445 .map_err(|e| IdbError::Io(format!("Cannot open {}: {}", path.display(), e)))?;
446
447 file.seek(SeekFrom::Start(offset))
448 .map_err(|e| IdbError::Io(format!("Cannot seek in {}: {}", path.display(), e)))?;
449
450 let mut buf = vec![0u8; length];
451 file.read_exact(&mut buf)
452 .map_err(|e| IdbError::Io(format!("Cannot read from {}: {}", path.display(), e)))?;
453
454 Ok(buf)
455}
456
457fn read_u64_at(path: &std::path::Path, offset: u64) -> Option<u64> {
458 let bytes = read_file_bytes(path, offset, 8).ok()?;
459 Some(BigEndian::read_u64(&bytes))
460}
461
462#[cfg(feature = "mysql")]
464fn execute_table_info(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
465 use mysql_async::prelude::*;
466
467 let database = opts
468 .database
469 .as_deref()
470 .ok_or_else(|| IdbError::Argument("Database name required (-D <database>)".to_string()))?;
471 let table = opts
472 .table
473 .as_deref()
474 .ok_or_else(|| IdbError::Argument("Table name required (-t <table>)".to_string()))?;
475
476 let mut config = crate::util::mysql::MysqlConfig::default();
478
479 if let Some(ref df) = opts.defaults_file {
481 if let Some(parsed) = crate::util::mysql::parse_defaults_file(std::path::Path::new(df)) {
482 config = parsed;
483 }
484 } else if let Some(df) = crate::util::mysql::find_defaults_file() {
485 if let Some(parsed) = crate::util::mysql::parse_defaults_file(&df) {
486 config = parsed;
487 }
488 }
489
490 if let Some(ref h) = opts.host {
492 config.host = h.clone();
493 }
494 if let Some(p) = opts.port {
495 config.port = p;
496 }
497 if let Some(ref u) = opts.user {
498 config.user = u.clone();
499 }
500 if opts.password.is_some() {
501 config.password = opts.password.clone();
502 }
503 config.database = Some(database.to_string());
504
505 let rt = tokio::runtime::Builder::new_current_thread()
506 .enable_all()
507 .build()
508 .map_err(|e| IdbError::Io(format!("Cannot create async runtime: {}", e)))?;
509
510 rt.block_on(async {
511 let pool = mysql_async::Pool::new(config.to_opts());
512 let mut conn = pool
513 .get_conn()
514 .await
515 .map_err(|e| IdbError::Io(format!("MySQL connection failed: {}", e)))?;
516
517 let table_query = format!(
519 "SELECT SPACE, TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}'",
520 database, table
521 );
522 let table_rows: Vec<(u64, u64)> = conn
523 .query(&table_query)
524 .await
525 .unwrap_or_default();
526
527 if table_rows.is_empty() {
528 let sys_query = format!(
530 "SELECT SPACE, TABLE_ID FROM information_schema.innodb_sys_tables WHERE NAME = '{}/{}'",
531 database, table
532 );
533 let sys_rows: Vec<(u64, u64)> = conn
534 .query(&sys_query)
535 .await
536 .unwrap_or_default();
537
538 if sys_rows.is_empty() {
539 wprintln!(writer, "Table {}.{} not found in InnoDB system tables.", database, table)?;
540 pool.disconnect().await.ok();
541 return Ok(());
542 }
543
544 print_table_info(writer, database, table, &sys_rows)?;
545 } else {
546 print_table_info(writer, database, table, &table_rows)?;
547 }
548
549 let idx_query = format!(
551 "SELECT NAME, INDEX_ID, PAGE_NO FROM information_schema.innodb_indexes \
552 WHERE TABLE_ID = (SELECT TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}')",
553 database, table
554 );
555 let idx_rows: Vec<(String, u64, u64)> = conn
556 .query(&idx_query)
557 .await
558 .unwrap_or_default();
559
560 if !idx_rows.is_empty() {
561 wprintln!(writer)?;
562 wprintln!(writer, "{}", "Indexes:".bold())?;
563 for (name, index_id, root_page) in &idx_rows {
564 wprintln!(writer, " {} (index_id={}, root_page={})", name, index_id, root_page)?;
565 }
566 }
567
568 let status_rows: Vec<(String, String, String)> = conn
570 .query("SHOW ENGINE INNODB STATUS")
571 .await
572 .unwrap_or_default();
573
574 if let Some((_type, _name, status)) = status_rows.first() {
575 wprintln!(writer)?;
576 wprintln!(writer, "{}", "InnoDB Status:".bold())?;
577 for line in status.lines() {
578 if line.starts_with("Log sequence number") || line.starts_with("Log flushed up to") {
579 wprintln!(writer, " {}", line.trim())?;
580 }
581 if line.starts_with("Trx id counter") {
582 wprintln!(writer, " {}", line.trim())?;
583 }
584 }
585 }
586
587 pool.disconnect().await.ok();
588 Ok(())
589 })
590}
591
592#[cfg(feature = "mysql")]
593fn print_table_info(
594 writer: &mut dyn Write,
595 database: &str,
596 table: &str,
597 rows: &[(u64, u64)],
598) -> Result<(), IdbError> {
599 wprintln!(
600 writer,
601 "{}",
602 format!("Table: {}.{}", database, table).bold()
603 )?;
604 for (space_id, table_id) in rows {
605 wprintln!(writer, " Space ID: {}", space_id)?;
606 wprintln!(writer, " Table ID: {}", table_id)?;
607 }
608 Ok(())
609}