1use std::io::Write;
2
3use colored::Colorize;
4use serde::Serialize;
5
6use crate::cli::wprintln;
7use crate::innodb::log::{
8 validate_log_block_checksum, LogBlockHeader, LogFile, LogFileHeader, MlogRecordType,
9 LOG_BLOCK_HDR_SIZE, LOG_BLOCK_SIZE, LOG_FILE_HDR_BLOCKS,
10};
11use crate::IdbError;
12
13pub struct LogOptions {
15 pub file: String,
17 pub blocks: Option<u64>,
19 pub no_empty: bool,
21 pub verbose: bool,
23 pub json: bool,
25}
26
27#[derive(Serialize)]
28struct LogSummaryJson {
29 file: String,
30 file_size: u64,
31 total_blocks: u64,
32 data_blocks: u64,
33 header: LogFileHeader,
34 checkpoint_1: Option<crate::innodb::log::LogCheckpoint>,
35 checkpoint_2: Option<crate::innodb::log::LogCheckpoint>,
36 blocks: Vec<BlockJson>,
37}
38
39#[derive(Serialize)]
40struct BlockJson {
41 block_index: u64,
42 block_no: u32,
43 flush_flag: bool,
44 data_len: u16,
45 first_rec_group: u16,
46 epoch_no: u32,
47 checksum_valid: bool,
48 record_types: Vec<String>,
49}
50
51pub fn execute(opts: &LogOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
70 let mut log = LogFile::open(&opts.file)?;
71
72 let header = log.read_header()?;
73 let cp1 = log.read_checkpoint(0).ok();
74 let cp2 = log.read_checkpoint(1).ok();
75
76 if opts.json {
77 return execute_json(opts, &mut log, header, cp1, cp2, writer);
78 }
79
80 wprintln!(writer, "{}", "InnoDB Redo Log File".bold())?;
82 wprintln!(writer, " File: {}", opts.file)?;
83 wprintln!(writer, " Size: {} bytes", log.file_size())?;
84 wprintln!(
85 writer,
86 " Blocks: {} total ({} data)",
87 log.block_count(),
88 log.data_block_count()
89 )?;
90 wprintln!(writer)?;
91
92 let vendor = crate::innodb::vendor::detect_vendor_from_created_by(&header.created_by);
94 let is_mariadb = vendor == crate::innodb::vendor::InnoDbVendor::MariaDB;
95
96 wprintln!(writer, "{}", "Log File Header (block 0)".bold())?;
98 wprintln!(writer, " Format: {}", header.format_version)?;
99 wprintln!(writer, " Start LSN: {}", header.start_lsn)?;
100 if header.log_uuid != 0 {
101 wprintln!(writer, " Log UUID: 0x{:08X}", header.log_uuid)?;
102 }
103 if !header.created_by.is_empty() {
104 wprintln!(writer, " Created by: {}", header.created_by)?;
105 }
106 wprintln!(writer, " Vendor: {}", vendor)?;
107 if is_mariadb {
108 wprintln!(
109 writer,
110 " {}",
111 "Note: MLOG record types are not decoded for MariaDB redo logs".yellow()
112 )?;
113 }
114 wprintln!(writer)?;
115
116 print_checkpoint(writer, "Checkpoint 1 (block 1)", &cp1)?;
118 print_checkpoint(writer, "Checkpoint 2 (block 3)", &cp2)?;
119
120 let data_blocks = log.data_block_count();
122 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
123
124 if limit > 0 {
125 wprintln!(writer, "{}", "Data Blocks".bold())?;
126 }
127
128 let mut displayed = 0u64;
129 let mut empty_skipped = 0u64;
130
131 for i in 0..limit {
132 let block_idx = LOG_FILE_HDR_BLOCKS + i;
133 let block_data = log.read_block(block_idx)?;
134
135 let hdr = match LogBlockHeader::parse(&block_data) {
136 Some(h) => h,
137 None => continue,
138 };
139
140 if opts.no_empty && !hdr.has_data() {
142 empty_skipped += 1;
143 continue;
144 }
145
146 let checksum_ok = validate_log_block_checksum(&block_data);
147 let checksum_str = if checksum_ok {
148 "OK".green().to_string()
149 } else {
150 "INVALID".red().to_string()
151 };
152
153 let flush_str = if hdr.flush_flag { " FLUSH" } else { "" };
154
155 wprintln!(
156 writer,
157 " Block {:>6} no={:<10} len={:<5} first_rec={:<5} epoch={:<10} csum={}{}",
158 block_idx,
159 hdr.block_no,
160 hdr.data_len,
161 hdr.first_rec_group,
162 hdr.epoch_no,
163 checksum_str,
164 flush_str,
165 )?;
166
167 if opts.verbose && hdr.has_data() && !is_mariadb {
169 print_record_types(writer, &block_data, &hdr)?;
170 }
171
172 displayed += 1;
173 }
174
175 if opts.no_empty && empty_skipped > 0 {
176 wprintln!(writer, " ({} empty blocks skipped)", empty_skipped)?;
177 }
178
179 if displayed > 0 || empty_skipped > 0 {
180 wprintln!(writer)?;
181 }
182
183 wprintln!(
184 writer,
185 "Displayed {} data blocks{}",
186 displayed,
187 if limit < data_blocks {
188 format!(" (of {})", data_blocks)
189 } else {
190 String::new()
191 }
192 )?;
193
194 Ok(())
195}
196
197fn print_checkpoint(
198 writer: &mut dyn Write,
199 label: &str,
200 cp: &Option<crate::innodb::log::LogCheckpoint>,
201) -> Result<(), IdbError> {
202 wprintln!(writer, "{}", label.bold())?;
203 match cp {
204 Some(cp) => {
205 if cp.number > 0 {
206 wprintln!(writer, " Number: {}", cp.number)?;
207 }
208 wprintln!(writer, " LSN: {}", cp.lsn)?;
209 if cp.offset > 0 {
210 wprintln!(writer, " Offset: {}", cp.offset)?;
211 }
212 if cp.buf_size > 0 {
213 wprintln!(writer, " Buffer size: {}", cp.buf_size)?;
214 }
215 if cp.archived_lsn > 0 {
216 wprintln!(writer, " Archived LSN: {}", cp.archived_lsn)?;
217 }
218 }
219 None => {
220 wprintln!(writer, " {}", "(not present or unreadable)".yellow())?;
221 }
222 }
223 wprintln!(writer)?;
224 Ok(())
225}
226
227fn print_record_types(
228 writer: &mut dyn Write,
229 block_data: &[u8],
230 hdr: &LogBlockHeader,
231) -> Result<(), IdbError> {
232 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
233 if data_end <= LOG_BLOCK_HDR_SIZE {
234 return Ok(());
235 }
236
237 let mut types: Vec<MlogRecordType> = Vec::new();
238 let mut pos = LOG_BLOCK_HDR_SIZE;
239
240 while pos < data_end {
241 let type_byte = block_data[pos];
242 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
244 types.push(rec_type);
245 pos += 1;
248 }
249
250 if !types.is_empty() {
251 let mut counts: std::collections::BTreeMap<String, usize> =
253 std::collections::BTreeMap::new();
254 for t in &types {
255 *counts.entry(t.to_string()).or_insert(0) += 1;
256 }
257 let summary: Vec<String> = counts
258 .iter()
259 .map(|(name, count)| format!("{}({})", name, count))
260 .collect();
261 wprintln!(writer, " record types: {}", summary.join(", "))?;
262 }
263
264 Ok(())
265}
266
267fn execute_json(
268 opts: &LogOptions,
269 log: &mut LogFile,
270 header: LogFileHeader,
271 cp1: Option<crate::innodb::log::LogCheckpoint>,
272 cp2: Option<crate::innodb::log::LogCheckpoint>,
273 writer: &mut dyn Write,
274) -> Result<(), IdbError> {
275 let data_blocks = log.data_block_count();
276 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
277
278 let mut blocks_json = Vec::new();
279
280 for i in 0..limit {
281 let block_idx = LOG_FILE_HDR_BLOCKS + i;
282 let block_data = log.read_block(block_idx)?;
283
284 let hdr = match LogBlockHeader::parse(&block_data) {
285 Some(h) => h,
286 None => continue,
287 };
288
289 if opts.no_empty && !hdr.has_data() {
290 continue;
291 }
292
293 let checksum_ok = validate_log_block_checksum(&block_data);
294
295 let is_mariadb = crate::innodb::vendor::detect_vendor_from_created_by(&header.created_by)
296 == crate::innodb::vendor::InnoDbVendor::MariaDB;
297 let record_types = if opts.verbose && hdr.has_data() && !is_mariadb {
298 collect_record_type_names(&block_data, &hdr)
299 } else {
300 Vec::new()
301 };
302
303 blocks_json.push(BlockJson {
304 block_index: block_idx,
305 block_no: hdr.block_no,
306 flush_flag: hdr.flush_flag,
307 data_len: hdr.data_len,
308 first_rec_group: hdr.first_rec_group,
309 epoch_no: hdr.epoch_no,
310 checksum_valid: checksum_ok,
311 record_types,
312 });
313 }
314
315 let summary = LogSummaryJson {
316 file: opts.file.clone(),
317 file_size: log.file_size(),
318 total_blocks: log.block_count(),
319 data_blocks: log.data_block_count(),
320 header,
321 checkpoint_1: cp1,
322 checkpoint_2: cp2,
323 blocks: blocks_json,
324 };
325
326 let json = serde_json::to_string_pretty(&summary)
327 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
328 wprintln!(writer, "{}", json)?;
329
330 Ok(())
331}
332
333fn collect_record_type_names(block_data: &[u8], hdr: &LogBlockHeader) -> Vec<String> {
334 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
335 if data_end <= LOG_BLOCK_HDR_SIZE {
336 return Vec::new();
337 }
338
339 let mut names = Vec::new();
340 let mut pos = LOG_BLOCK_HDR_SIZE;
341
342 while pos < data_end {
343 let type_byte = block_data[pos];
344 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
345 names.push(rec_type.to_string());
346 pos += 1;
347 }
348
349 names
350}