Skip to main content

idb/cli/
log.rs

1use std::io::Write;
2
3use colored::Colorize;
4use serde::Serialize;
5
6use crate::cli::wprintln;
7use crate::innodb::log::{
8    validate_log_block_checksum, LogBlockHeader, LogFile, LogFileHeader,
9    MlogRecordType, LOG_BLOCK_HDR_SIZE, LOG_BLOCK_SIZE, LOG_FILE_HDR_BLOCKS,
10};
11use crate::IdbError;
12
13/// Options for the `inno log` subcommand.
14pub struct LogOptions {
15    /// Path to the redo log file (`ib_logfile0`, `ib_logfile1`, or `#ib_redo*`).
16    pub file: String,
17    /// Limit output to the first N data blocks.
18    pub blocks: Option<u64>,
19    /// Skip blocks that contain no redo log data.
20    pub no_empty: bool,
21    /// Show MLOG record types within each data block.
22    pub verbose: bool,
23    /// Emit output as JSON.
24    pub json: bool,
25}
26
27#[derive(Serialize)]
28struct LogSummaryJson {
29    file: String,
30    file_size: u64,
31    total_blocks: u64,
32    data_blocks: u64,
33    header: LogFileHeader,
34    checkpoint_1: Option<crate::innodb::log::LogCheckpoint>,
35    checkpoint_2: Option<crate::innodb::log::LogCheckpoint>,
36    blocks: Vec<BlockJson>,
37}
38
39#[derive(Serialize)]
40struct BlockJson {
41    block_index: u64,
42    block_no: u32,
43    flush_flag: bool,
44    data_len: u16,
45    first_rec_group: u16,
46    checkpoint_no: u32,
47    checksum_valid: bool,
48    record_types: Vec<String>,
49}
50
51/// Analyze the structure of an InnoDB redo log file.
52///
53/// InnoDB redo logs are organized as a sequence of 512-byte blocks. The first
54/// four blocks are reserved: block 0 is the **log file header** (group ID,
55/// start LSN, file number, creator string), blocks 1 and 3 are **checkpoint
56/// records** (checkpoint number, LSN, offset, buffer size, archived LSN), and
57/// block 2 is reserved/unused. All remaining blocks are **data blocks**
58/// containing the actual redo log records.
59///
60/// This command reads and displays all three sections. For data blocks, each
61/// block's header is decoded to show the block number, data length,
62/// first-record-group offset, checkpoint number, flush flag, and CRC-32C
63/// checksum validation status.
64///
65/// With `--verbose`, the payload bytes of each non-empty data block are
66/// scanned for MLOG record type bytes (e.g., `MLOG_REC_INSERT`,
67/// `MLOG_UNDO_INSERT`, `MLOG_WRITE_STRING`) and a frequency summary is
68/// printed. Use `--blocks N` to limit output to the first N data blocks,
69/// or `--no-empty` to skip blocks with zero data length.
70pub fn execute(opts: &LogOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
71    let mut log = LogFile::open(&opts.file)?;
72
73    let header = log.read_header()?;
74    let cp1 = log.read_checkpoint(0).ok();
75    let cp2 = log.read_checkpoint(1).ok();
76
77    if opts.json {
78        return execute_json(opts, &mut log, header, cp1, cp2, writer);
79    }
80
81    // Print file info
82    wprintln!(writer, "{}", "InnoDB Redo Log File".bold())?;
83    wprintln!(writer, "  File:       {}", opts.file)?;
84    wprintln!(writer, "  Size:       {} bytes", log.file_size())?;
85    wprintln!(writer, "  Blocks:     {} total ({} data)", log.block_count(), log.data_block_count())?;
86    wprintln!(writer)?;
87
88    // Print header
89    wprintln!(writer, "{}", "Log File Header (block 0)".bold())?;
90    wprintln!(writer, "  Group ID:   {}", header.group_id)?;
91    wprintln!(writer, "  Start LSN:  {}", header.start_lsn)?;
92    wprintln!(writer, "  File No:    {}", header.file_no)?;
93    if !header.created_by.is_empty() {
94        wprintln!(writer, "  Created by: {}", header.created_by)?;
95    }
96    wprintln!(writer)?;
97
98    // Print checkpoints
99    print_checkpoint(writer, "Checkpoint 1 (block 1)", &cp1)?;
100    print_checkpoint(writer, "Checkpoint 2 (block 3)", &cp2)?;
101
102    // Iterate data blocks
103    let data_blocks = log.data_block_count();
104    let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
105
106    if limit > 0 {
107        wprintln!(writer, "{}", "Data Blocks".bold())?;
108    }
109
110    let mut displayed = 0u64;
111    let mut empty_skipped = 0u64;
112
113    for i in 0..limit {
114        let block_idx = LOG_FILE_HDR_BLOCKS + i;
115        let block_data = log.read_block(block_idx)?;
116
117        let hdr = match LogBlockHeader::parse(&block_data) {
118            Some(h) => h,
119            None => continue,
120        };
121
122        // Skip empty blocks if --no-empty
123        if opts.no_empty && !hdr.has_data() {
124            empty_skipped += 1;
125            continue;
126        }
127
128        let checksum_ok = validate_log_block_checksum(&block_data);
129        let checksum_str = if checksum_ok {
130            "OK".green().to_string()
131        } else {
132            "INVALID".red().to_string()
133        };
134
135        let flush_str = if hdr.flush_flag { " FLUSH" } else { "" };
136
137        wprintln!(
138            writer,
139            "  Block {:>6}  no={:<10} len={:<5} first_rec={:<5} chk_no={:<10} csum={}{}",
140            block_idx, hdr.block_no, hdr.data_len, hdr.first_rec_group, hdr.checkpoint_no,
141            checksum_str, flush_str,
142        )?;
143
144        // Verbose: show MLOG record types
145        if opts.verbose && hdr.has_data() {
146            print_record_types(writer, &block_data, &hdr)?;
147        }
148
149        displayed += 1;
150    }
151
152    if opts.no_empty && empty_skipped > 0 {
153        wprintln!(writer, "  ({} empty blocks skipped)", empty_skipped)?;
154    }
155
156    if displayed > 0 || empty_skipped > 0 {
157        wprintln!(writer)?;
158    }
159
160    wprintln!(
161        writer,
162        "Displayed {} data blocks{}",
163        displayed,
164        if limit < data_blocks {
165            format!(" (of {})", data_blocks)
166        } else {
167            String::new()
168        }
169    )?;
170
171    Ok(())
172}
173
174fn print_checkpoint(writer: &mut dyn Write, label: &str, cp: &Option<crate::innodb::log::LogCheckpoint>) -> Result<(), IdbError> {
175    wprintln!(writer, "{}", label.bold())?;
176    match cp {
177        Some(cp) => {
178            wprintln!(writer, "  Number:       {}", cp.number)?;
179            wprintln!(writer, "  LSN:          {}", cp.lsn)?;
180            wprintln!(writer, "  Offset:       {}", cp.offset)?;
181            wprintln!(writer, "  Buffer size:  {}", cp.buf_size)?;
182            if cp.archived_lsn > 0 {
183                wprintln!(writer, "  Archived LSN: {}", cp.archived_lsn)?;
184            }
185        }
186        None => {
187            wprintln!(writer, "  {}", "(not present or unreadable)".yellow())?;
188        }
189    }
190    wprintln!(writer)?;
191    Ok(())
192}
193
194fn print_record_types(writer: &mut dyn Write, block_data: &[u8], hdr: &LogBlockHeader) -> Result<(), IdbError> {
195    let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
196    if data_end <= LOG_BLOCK_HDR_SIZE {
197        return Ok(());
198    }
199
200    let mut types: Vec<MlogRecordType> = Vec::new();
201    let mut pos = LOG_BLOCK_HDR_SIZE;
202
203    while pos < data_end {
204        let type_byte = block_data[pos];
205        // The single-record flag is bit 7 of the type byte
206        let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
207        types.push(rec_type);
208        // We can't fully decode record lengths without schema info,
209        // so just scan byte-by-byte for type bytes
210        pos += 1;
211    }
212
213    if !types.is_empty() {
214        // Count occurrences
215        let mut counts: std::collections::BTreeMap<String, usize> = std::collections::BTreeMap::new();
216        for t in &types {
217            *counts.entry(t.to_string()).or_insert(0) += 1;
218        }
219        let summary: Vec<String> = counts
220            .iter()
221            .map(|(name, count)| format!("{}({})", name, count))
222            .collect();
223        wprintln!(writer, "    record types: {}", summary.join(", "))?;
224    }
225
226    Ok(())
227}
228
229fn execute_json(
230    opts: &LogOptions,
231    log: &mut LogFile,
232    header: LogFileHeader,
233    cp1: Option<crate::innodb::log::LogCheckpoint>,
234    cp2: Option<crate::innodb::log::LogCheckpoint>,
235    writer: &mut dyn Write,
236) -> Result<(), IdbError> {
237    let data_blocks = log.data_block_count();
238    let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
239
240    let mut blocks_json = Vec::new();
241
242    for i in 0..limit {
243        let block_idx = LOG_FILE_HDR_BLOCKS + i;
244        let block_data = log.read_block(block_idx)?;
245
246        let hdr = match LogBlockHeader::parse(&block_data) {
247            Some(h) => h,
248            None => continue,
249        };
250
251        if opts.no_empty && !hdr.has_data() {
252            continue;
253        }
254
255        let checksum_ok = validate_log_block_checksum(&block_data);
256
257        let record_types = if opts.verbose && hdr.has_data() {
258            collect_record_type_names(&block_data, &hdr)
259        } else {
260            Vec::new()
261        };
262
263        blocks_json.push(BlockJson {
264            block_index: block_idx,
265            block_no: hdr.block_no,
266            flush_flag: hdr.flush_flag,
267            data_len: hdr.data_len,
268            first_rec_group: hdr.first_rec_group,
269            checkpoint_no: hdr.checkpoint_no,
270            checksum_valid: checksum_ok,
271            record_types,
272        });
273    }
274
275    let summary = LogSummaryJson {
276        file: opts.file.clone(),
277        file_size: log.file_size(),
278        total_blocks: log.block_count(),
279        data_blocks: log.data_block_count(),
280        header,
281        checkpoint_1: cp1,
282        checkpoint_2: cp2,
283        blocks: blocks_json,
284    };
285
286    let json = serde_json::to_string_pretty(&summary)
287        .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
288    wprintln!(writer, "{}", json)?;
289
290    Ok(())
291}
292
293fn collect_record_type_names(block_data: &[u8], hdr: &LogBlockHeader) -> Vec<String> {
294    let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
295    if data_end <= LOG_BLOCK_HDR_SIZE {
296        return Vec::new();
297    }
298
299    let mut names = Vec::new();
300    let mut pos = LOG_BLOCK_HDR_SIZE;
301
302    while pos < data_end {
303        let type_byte = block_data[pos];
304        let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
305        names.push(rec_type.to_string());
306        pos += 1;
307    }
308
309    names
310}