1use std::io::Write;
2
3use colored::Colorize;
4use serde::Serialize;
5
6use crate::cli::wprintln;
7use crate::innodb::log::{
8 validate_log_block_checksum, LogBlockHeader, LogFile, LogFileHeader, MlogRecordType,
9 LOG_BLOCK_HDR_SIZE, LOG_BLOCK_SIZE, LOG_FILE_HDR_BLOCKS,
10};
11use crate::IdbError;
12
13pub struct LogOptions {
15 pub file: String,
17 pub blocks: Option<u64>,
19 pub no_empty: bool,
21 pub verbose: bool,
23 pub json: bool,
25}
26
27#[derive(Serialize)]
28struct LogSummaryJson {
29 file: String,
30 file_size: u64,
31 total_blocks: u64,
32 data_blocks: u64,
33 header: LogFileHeader,
34 checkpoint_1: Option<crate::innodb::log::LogCheckpoint>,
35 checkpoint_2: Option<crate::innodb::log::LogCheckpoint>,
36 blocks: Vec<BlockJson>,
37}
38
39#[derive(Serialize)]
40struct BlockJson {
41 block_index: u64,
42 block_no: u32,
43 flush_flag: bool,
44 data_len: u16,
45 first_rec_group: u16,
46 checkpoint_no: u32,
47 checksum_valid: bool,
48 record_types: Vec<String>,
49}
50
51pub fn execute(opts: &LogOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
71 let mut log = LogFile::open(&opts.file)?;
72
73 let header = log.read_header()?;
74 let cp1 = log.read_checkpoint(0).ok();
75 let cp2 = log.read_checkpoint(1).ok();
76
77 if opts.json {
78 return execute_json(opts, &mut log, header, cp1, cp2, writer);
79 }
80
81 wprintln!(writer, "{}", "InnoDB Redo Log File".bold())?;
83 wprintln!(writer, " File: {}", opts.file)?;
84 wprintln!(writer, " Size: {} bytes", log.file_size())?;
85 wprintln!(
86 writer,
87 " Blocks: {} total ({} data)",
88 log.block_count(),
89 log.data_block_count()
90 )?;
91 wprintln!(writer)?;
92
93 let vendor = crate::innodb::vendor::detect_vendor_from_created_by(&header.created_by);
95 let is_mariadb = vendor == crate::innodb::vendor::InnoDbVendor::MariaDB;
96
97 wprintln!(writer, "{}", "Log File Header (block 0)".bold())?;
99 wprintln!(writer, " Group ID: {}", header.group_id)?;
100 wprintln!(writer, " Start LSN: {}", header.start_lsn)?;
101 wprintln!(writer, " File No: {}", header.file_no)?;
102 if !header.created_by.is_empty() {
103 wprintln!(writer, " Created by: {}", header.created_by)?;
104 }
105 wprintln!(writer, " Vendor: {}", vendor)?;
106 if is_mariadb {
107 wprintln!(
108 writer,
109 " {}",
110 "Note: MLOG record types are not decoded for MariaDB redo logs"
111 .yellow()
112 )?;
113 }
114 wprintln!(writer)?;
115
116 print_checkpoint(writer, "Checkpoint 1 (block 1)", &cp1)?;
118 print_checkpoint(writer, "Checkpoint 2 (block 3)", &cp2)?;
119
120 let data_blocks = log.data_block_count();
122 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
123
124 if limit > 0 {
125 wprintln!(writer, "{}", "Data Blocks".bold())?;
126 }
127
128 let mut displayed = 0u64;
129 let mut empty_skipped = 0u64;
130
131 for i in 0..limit {
132 let block_idx = LOG_FILE_HDR_BLOCKS + i;
133 let block_data = log.read_block(block_idx)?;
134
135 let hdr = match LogBlockHeader::parse(&block_data) {
136 Some(h) => h,
137 None => continue,
138 };
139
140 if opts.no_empty && !hdr.has_data() {
142 empty_skipped += 1;
143 continue;
144 }
145
146 let checksum_ok = validate_log_block_checksum(&block_data);
147 let checksum_str = if checksum_ok {
148 "OK".green().to_string()
149 } else {
150 "INVALID".red().to_string()
151 };
152
153 let flush_str = if hdr.flush_flag { " FLUSH" } else { "" };
154
155 wprintln!(
156 writer,
157 " Block {:>6} no={:<10} len={:<5} first_rec={:<5} chk_no={:<10} csum={}{}",
158 block_idx,
159 hdr.block_no,
160 hdr.data_len,
161 hdr.first_rec_group,
162 hdr.checkpoint_no,
163 checksum_str,
164 flush_str,
165 )?;
166
167 if opts.verbose && hdr.has_data() && !is_mariadb {
169 print_record_types(writer, &block_data, &hdr)?;
170 }
171
172 displayed += 1;
173 }
174
175 if opts.no_empty && empty_skipped > 0 {
176 wprintln!(writer, " ({} empty blocks skipped)", empty_skipped)?;
177 }
178
179 if displayed > 0 || empty_skipped > 0 {
180 wprintln!(writer)?;
181 }
182
183 wprintln!(
184 writer,
185 "Displayed {} data blocks{}",
186 displayed,
187 if limit < data_blocks {
188 format!(" (of {})", data_blocks)
189 } else {
190 String::new()
191 }
192 )?;
193
194 Ok(())
195}
196
197fn print_checkpoint(
198 writer: &mut dyn Write,
199 label: &str,
200 cp: &Option<crate::innodb::log::LogCheckpoint>,
201) -> Result<(), IdbError> {
202 wprintln!(writer, "{}", label.bold())?;
203 match cp {
204 Some(cp) => {
205 wprintln!(writer, " Number: {}", cp.number)?;
206 wprintln!(writer, " LSN: {}", cp.lsn)?;
207 wprintln!(writer, " Offset: {}", cp.offset)?;
208 wprintln!(writer, " Buffer size: {}", cp.buf_size)?;
209 if cp.archived_lsn > 0 {
210 wprintln!(writer, " Archived LSN: {}", cp.archived_lsn)?;
211 }
212 }
213 None => {
214 wprintln!(writer, " {}", "(not present or unreadable)".yellow())?;
215 }
216 }
217 wprintln!(writer)?;
218 Ok(())
219}
220
221fn print_record_types(
222 writer: &mut dyn Write,
223 block_data: &[u8],
224 hdr: &LogBlockHeader,
225) -> Result<(), IdbError> {
226 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
227 if data_end <= LOG_BLOCK_HDR_SIZE {
228 return Ok(());
229 }
230
231 let mut types: Vec<MlogRecordType> = Vec::new();
232 let mut pos = LOG_BLOCK_HDR_SIZE;
233
234 while pos < data_end {
235 let type_byte = block_data[pos];
236 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
238 types.push(rec_type);
239 pos += 1;
242 }
243
244 if !types.is_empty() {
245 let mut counts: std::collections::BTreeMap<String, usize> =
247 std::collections::BTreeMap::new();
248 for t in &types {
249 *counts.entry(t.to_string()).or_insert(0) += 1;
250 }
251 let summary: Vec<String> = counts
252 .iter()
253 .map(|(name, count)| format!("{}({})", name, count))
254 .collect();
255 wprintln!(writer, " record types: {}", summary.join(", "))?;
256 }
257
258 Ok(())
259}
260
261fn execute_json(
262 opts: &LogOptions,
263 log: &mut LogFile,
264 header: LogFileHeader,
265 cp1: Option<crate::innodb::log::LogCheckpoint>,
266 cp2: Option<crate::innodb::log::LogCheckpoint>,
267 writer: &mut dyn Write,
268) -> Result<(), IdbError> {
269 let data_blocks = log.data_block_count();
270 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
271
272 let mut blocks_json = Vec::new();
273
274 for i in 0..limit {
275 let block_idx = LOG_FILE_HDR_BLOCKS + i;
276 let block_data = log.read_block(block_idx)?;
277
278 let hdr = match LogBlockHeader::parse(&block_data) {
279 Some(h) => h,
280 None => continue,
281 };
282
283 if opts.no_empty && !hdr.has_data() {
284 continue;
285 }
286
287 let checksum_ok = validate_log_block_checksum(&block_data);
288
289 let is_mariadb = crate::innodb::vendor::detect_vendor_from_created_by(&header.created_by)
290 == crate::innodb::vendor::InnoDbVendor::MariaDB;
291 let record_types = if opts.verbose && hdr.has_data() && !is_mariadb {
292 collect_record_type_names(&block_data, &hdr)
293 } else {
294 Vec::new()
295 };
296
297 blocks_json.push(BlockJson {
298 block_index: block_idx,
299 block_no: hdr.block_no,
300 flush_flag: hdr.flush_flag,
301 data_len: hdr.data_len,
302 first_rec_group: hdr.first_rec_group,
303 checkpoint_no: hdr.checkpoint_no,
304 checksum_valid: checksum_ok,
305 record_types,
306 });
307 }
308
309 let summary = LogSummaryJson {
310 file: opts.file.clone(),
311 file_size: log.file_size(),
312 total_blocks: log.block_count(),
313 data_blocks: log.data_block_count(),
314 header,
315 checkpoint_1: cp1,
316 checkpoint_2: cp2,
317 blocks: blocks_json,
318 };
319
320 let json = serde_json::to_string_pretty(&summary)
321 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
322 wprintln!(writer, "{}", json)?;
323
324 Ok(())
325}
326
327fn collect_record_type_names(block_data: &[u8], hdr: &LogBlockHeader) -> Vec<String> {
328 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
329 if data_end <= LOG_BLOCK_HDR_SIZE {
330 return Vec::new();
331 }
332
333 let mut names = Vec::new();
334 let mut pos = LOG_BLOCK_HDR_SIZE;
335
336 while pos < data_end {
337 let type_byte = block_data[pos];
338 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
339 names.push(rec_type.to_string());
340 pos += 1;
341 }
342
343 names
344}