1use std::io::Write;
2
3use byteorder::{BigEndian, ByteOrder};
4use colored::Colorize;
5use serde::Serialize;
6
7use crate::cli::wprintln;
8use crate::innodb::constants::*;
9use crate::innodb::page::FilHeader;
10use crate::IdbError;
11
12pub struct InfoOptions {
13 pub ibdata: bool,
14 pub lsn_check: bool,
15 pub datadir: Option<String>,
16 pub database: Option<String>,
17 pub table: Option<String>,
18 pub host: Option<String>,
19 pub port: Option<u16>,
20 pub user: Option<String>,
21 pub password: Option<String>,
22 pub defaults_file: Option<String>,
23 pub json: bool,
24 pub page_size: Option<u32>,
25}
26
27#[derive(Serialize)]
28struct IbdataInfoJson {
29 ibdata_file: String,
30 page_checksum: u32,
31 page_number: u32,
32 page_type: u16,
33 lsn: u64,
34 flush_lsn: u64,
35 space_id: u32,
36 #[serde(skip_serializing_if = "Option::is_none")]
37 redo_checkpoint_1_lsn: Option<u64>,
38 #[serde(skip_serializing_if = "Option::is_none")]
39 redo_checkpoint_2_lsn: Option<u64>,
40}
41
42#[derive(Serialize)]
43struct LsnCheckJson {
44 ibdata_lsn: u64,
45 redo_checkpoint_lsn: u64,
46 in_sync: bool,
47}
48
49pub fn execute(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
50 if opts.ibdata || opts.lsn_check {
51 let datadir = opts.datadir.as_deref().unwrap_or("/var/lib/mysql");
52 let datadir_path = std::path::Path::new(datadir);
53
54 if !datadir_path.is_dir() {
55 return Err(IdbError::Argument(format!(
56 "Data directory does not exist: {}",
57 datadir
58 )));
59 }
60
61 if opts.ibdata {
62 return execute_ibdata(opts, datadir_path, writer);
63 }
64 if opts.lsn_check {
65 return execute_lsn_check(opts, datadir_path, writer);
66 }
67 }
68
69 #[cfg(feature = "mysql")]
70 {
71 if opts.database.is_some() || opts.table.is_some() {
72 return execute_table_info(opts, writer);
73 }
74 }
75
76 #[cfg(not(feature = "mysql"))]
77 {
78 if opts.database.is_some() || opts.table.is_some() {
79 return Err(IdbError::Argument(
80 "MySQL support not compiled. Rebuild with: cargo build --features mysql".to_string(),
81 ));
82 }
83 }
84
85 wprintln!(writer, "Usage:")?;
87 wprintln!(writer, " idb info --ibdata -d <datadir> Read ibdata1 page 0 header")?;
88 wprintln!(writer, " idb info --lsn-check -d <datadir> Compare ibdata1 and redo log LSNs")?;
89 wprintln!(writer, " idb info -D <database> -t <table> Show table/index info (requires --features mysql)")?;
90 Ok(())
91}
92
93fn execute_ibdata(opts: &InfoOptions, datadir: &std::path::Path, writer: &mut dyn Write) -> Result<(), IdbError> {
94 let ibdata_path = datadir.join("ibdata1");
95 if !ibdata_path.exists() {
96 return Err(IdbError::Io(format!(
97 "ibdata1 not found in {}",
98 datadir.display()
99 )));
100 }
101
102 let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
104 let header = FilHeader::parse(&page0).ok_or_else(|| {
105 IdbError::Parse("Cannot parse ibdata1 page 0 FIL header".to_string())
106 })?;
107
108 let (cp1_lsn, cp2_lsn) = read_redo_checkpoint_lsns(datadir);
110
111 if opts.json {
112 let info = IbdataInfoJson {
113 ibdata_file: ibdata_path.display().to_string(),
114 page_checksum: header.checksum,
115 page_number: header.page_number,
116 page_type: header.page_type.as_u16(),
117 lsn: header.lsn,
118 flush_lsn: header.flush_lsn,
119 space_id: header.space_id,
120 redo_checkpoint_1_lsn: cp1_lsn,
121 redo_checkpoint_2_lsn: cp2_lsn,
122 };
123 let json = serde_json::to_string_pretty(&info)
124 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
125 wprintln!(writer, "{}", json)?;
126 return Ok(());
127 }
128
129 wprintln!(writer, "{}", "ibdata1 Page 0 Header".bold())?;
130 wprintln!(writer, " File: {}", ibdata_path.display())?;
131 wprintln!(writer, " Checksum: {}", header.checksum)?;
132 wprintln!(writer, " Page No: {}", header.page_number)?;
133 wprintln!(writer, " Page Type: {} ({})", header.page_type.as_u16(), header.page_type.name())?;
134 wprintln!(writer, " LSN: {}", header.lsn)?;
135 wprintln!(writer, " Flush LSN: {}", header.flush_lsn)?;
136 wprintln!(writer, " Space ID: {}", header.space_id)?;
137 wprintln!(writer)?;
138
139 if let Some(lsn) = cp1_lsn {
140 wprintln!(writer, "Redo Log Checkpoint 1 LSN: {}", lsn)?;
141 }
142 if let Some(lsn) = cp2_lsn {
143 wprintln!(writer, "Redo Log Checkpoint 2 LSN: {}", lsn)?;
144 }
145
146 Ok(())
147}
148
149fn execute_lsn_check(opts: &InfoOptions, datadir: &std::path::Path, writer: &mut dyn Write) -> Result<(), IdbError> {
150 let ibdata_path = datadir.join("ibdata1");
151 if !ibdata_path.exists() {
152 return Err(IdbError::Io(format!(
153 "ibdata1 not found in {}",
154 datadir.display()
155 )));
156 }
157
158 let page0 = read_file_bytes(&ibdata_path, 0, SIZE_PAGE_DEFAULT as usize)?;
160 let ibdata_lsn = BigEndian::read_u64(&page0[FIL_PAGE_LSN..]);
161
162 let (cp1_lsn, _cp2_lsn) = read_redo_checkpoint_lsns(datadir);
164
165 let redo_lsn = cp1_lsn.unwrap_or(0);
166 let in_sync = ibdata_lsn == redo_lsn;
167
168 if opts.json {
169 let check = LsnCheckJson {
170 ibdata_lsn,
171 redo_checkpoint_lsn: redo_lsn,
172 in_sync,
173 };
174 let json = serde_json::to_string_pretty(&check)
175 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
176 wprintln!(writer, "{}", json)?;
177 return Ok(());
178 }
179
180 wprintln!(writer, "{}", "LSN Sync Check".bold())?;
181 wprintln!(writer, " ibdata1 LSN: {}", ibdata_lsn)?;
182 wprintln!(writer, " Redo checkpoint LSN: {}", redo_lsn)?;
183
184 if in_sync {
185 wprintln!(writer, " Status: {}", "IN SYNC".green())?;
186 } else {
187 wprintln!(writer, " Status: {}", "OUT OF SYNC".red())?;
188 wprintln!(
189 writer,
190 " Difference: {} bytes",
191 ibdata_lsn.abs_diff(redo_lsn)
192 )?;
193 }
194
195 Ok(())
196}
197
198fn read_redo_checkpoint_lsns(datadir: &std::path::Path) -> (Option<u64>, Option<u64>) {
203 const CP1_OFFSET: u64 = 512 + 8;
206 const CP2_OFFSET: u64 = 1536 + 8;
207
208 let redo_dir = datadir.join("#innodb_redo");
210 if redo_dir.is_dir() {
211 if let Ok(entries) = std::fs::read_dir(&redo_dir) {
213 let mut redo_files: Vec<_> = entries
214 .filter_map(|e| e.ok())
215 .filter(|e| {
216 e.file_name()
217 .to_string_lossy()
218 .starts_with("#ib_redo")
219 })
220 .collect();
221 redo_files.sort_by_key(|e| e.file_name());
222 if let Some(first) = redo_files.first() {
223 let path = first.path();
224 let cp1 = read_u64_at(&path, CP1_OFFSET);
225 let cp2 = read_u64_at(&path, CP2_OFFSET);
226 return (cp1, cp2);
227 }
228 }
229 }
230
231 let logfile0 = datadir.join("ib_logfile0");
233 if logfile0.exists() {
234 let cp1 = read_u64_at(&logfile0, CP1_OFFSET);
235 let cp2 = read_u64_at(&logfile0, CP2_OFFSET);
236 return (cp1, cp2);
237 }
238
239 (None, None)
240}
241
242fn read_file_bytes(
243 path: &std::path::Path,
244 offset: u64,
245 length: usize,
246) -> Result<Vec<u8>, IdbError> {
247 use std::io::{Read, Seek, SeekFrom};
248
249 let mut file = std::fs::File::open(path)
250 .map_err(|e| IdbError::Io(format!("Cannot open {}: {}", path.display(), e)))?;
251
252 file.seek(SeekFrom::Start(offset))
253 .map_err(|e| IdbError::Io(format!("Cannot seek in {}: {}", path.display(), e)))?;
254
255 let mut buf = vec![0u8; length];
256 file.read_exact(&mut buf)
257 .map_err(|e| IdbError::Io(format!("Cannot read from {}: {}", path.display(), e)))?;
258
259 Ok(buf)
260}
261
262fn read_u64_at(path: &std::path::Path, offset: u64) -> Option<u64> {
263 let bytes = read_file_bytes(path, offset, 8).ok()?;
264 Some(BigEndian::read_u64(&bytes))
265}
266
267#[cfg(feature = "mysql")]
269fn execute_table_info(opts: &InfoOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
270 use mysql_async::prelude::*;
271
272 let database = opts.database.as_deref().ok_or_else(|| {
273 IdbError::Argument("Database name required (-D <database>)".to_string())
274 })?;
275 let table = opts.table.as_deref().ok_or_else(|| {
276 IdbError::Argument("Table name required (-t <table>)".to_string())
277 })?;
278
279 let mut config = crate::util::mysql::MysqlConfig::default();
281
282 if let Some(ref df) = opts.defaults_file {
284 if let Some(parsed) = crate::util::mysql::parse_defaults_file(std::path::Path::new(df)) {
285 config = parsed;
286 }
287 } else if let Some(df) = crate::util::mysql::find_defaults_file() {
288 if let Some(parsed) = crate::util::mysql::parse_defaults_file(&df) {
289 config = parsed;
290 }
291 }
292
293 if let Some(ref h) = opts.host {
295 config.host = h.clone();
296 }
297 if let Some(p) = opts.port {
298 config.port = p;
299 }
300 if let Some(ref u) = opts.user {
301 config.user = u.clone();
302 }
303 if opts.password.is_some() {
304 config.password = opts.password.clone();
305 }
306 config.database = Some(database.to_string());
307
308 let rt = tokio::runtime::Builder::new_current_thread()
309 .enable_all()
310 .build()
311 .map_err(|e| IdbError::Io(format!("Cannot create async runtime: {}", e)))?;
312
313 rt.block_on(async {
314 let pool = mysql_async::Pool::new(config.to_opts());
315 let mut conn = pool
316 .get_conn()
317 .await
318 .map_err(|e| IdbError::Io(format!("MySQL connection failed: {}", e)))?;
319
320 let table_query = format!(
322 "SELECT SPACE, TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}'",
323 database, table
324 );
325 let table_rows: Vec<(u64, u64)> = conn
326 .query(&table_query)
327 .await
328 .unwrap_or_default();
329
330 if table_rows.is_empty() {
331 let sys_query = format!(
333 "SELECT SPACE, TABLE_ID FROM information_schema.innodb_sys_tables WHERE NAME = '{}/{}'",
334 database, table
335 );
336 let sys_rows: Vec<(u64, u64)> = conn
337 .query(&sys_query)
338 .await
339 .unwrap_or_default();
340
341 if sys_rows.is_empty() {
342 wprintln!(writer, "Table {}.{} not found in InnoDB system tables.", database, table)?;
343 pool.disconnect().await.ok();
344 return Ok(());
345 }
346
347 print_table_info(writer, database, table, &sys_rows)?;
348 } else {
349 print_table_info(writer, database, table, &table_rows)?;
350 }
351
352 let idx_query = format!(
354 "SELECT NAME, INDEX_ID, PAGE_NO FROM information_schema.innodb_indexes \
355 WHERE TABLE_ID = (SELECT TABLE_ID FROM information_schema.innodb_tables WHERE NAME = '{}/{}')",
356 database, table
357 );
358 let idx_rows: Vec<(String, u64, u64)> = conn
359 .query(&idx_query)
360 .await
361 .unwrap_or_default();
362
363 if !idx_rows.is_empty() {
364 wprintln!(writer)?;
365 wprintln!(writer, "{}", "Indexes:".bold())?;
366 for (name, index_id, root_page) in &idx_rows {
367 wprintln!(writer, " {} (index_id={}, root_page={})", name, index_id, root_page)?;
368 }
369 }
370
371 let status_rows: Vec<(String, String, String)> = conn
373 .query("SHOW ENGINE INNODB STATUS")
374 .await
375 .unwrap_or_default();
376
377 if let Some((_type, _name, status)) = status_rows.first() {
378 wprintln!(writer)?;
379 wprintln!(writer, "{}", "InnoDB Status:".bold())?;
380 for line in status.lines() {
381 if line.starts_with("Log sequence number") || line.starts_with("Log flushed up to") {
382 wprintln!(writer, " {}", line.trim())?;
383 }
384 if line.starts_with("Trx id counter") {
385 wprintln!(writer, " {}", line.trim())?;
386 }
387 }
388 }
389
390 pool.disconnect().await.ok();
391 Ok(())
392 })
393}
394
395#[cfg(feature = "mysql")]
396fn print_table_info(writer: &mut dyn Write, database: &str, table: &str, rows: &[(u64, u64)]) -> Result<(), IdbError> {
397 wprintln!(writer, "{}", format!("Table: {}.{}", database, table).bold())?;
398 for (space_id, table_id) in rows {
399 wprintln!(writer, " Space ID: {}", space_id)?;
400 wprintln!(writer, " Table ID: {}", table_id)?;
401 }
402 Ok(())
403}