1use std::io::Write;
2
3use colored::Colorize;
4use serde::Serialize;
5
6use crate::cli::wprintln;
7use crate::innodb::log::{
8 validate_log_block_checksum, LogBlockHeader, LogFile, LogFileHeader, MlogRecordType,
9 LOG_BLOCK_HDR_SIZE, LOG_BLOCK_SIZE, LOG_FILE_HDR_BLOCKS,
10};
11use crate::IdbError;
12
13pub struct LogOptions {
15 pub file: String,
17 pub blocks: Option<u64>,
19 pub no_empty: bool,
21 pub verbose: bool,
23 pub json: bool,
25}
26
27#[derive(Serialize)]
28struct LogSummaryJson {
29 file: String,
30 file_size: u64,
31 total_blocks: u64,
32 data_blocks: u64,
33 header: LogFileHeader,
34 checkpoint_1: Option<crate::innodb::log::LogCheckpoint>,
35 checkpoint_2: Option<crate::innodb::log::LogCheckpoint>,
36 blocks: Vec<BlockJson>,
37}
38
39#[derive(Serialize)]
40struct BlockJson {
41 block_index: u64,
42 block_no: u32,
43 flush_flag: bool,
44 data_len: u16,
45 first_rec_group: u16,
46 checkpoint_no: u32,
47 checksum_valid: bool,
48 record_types: Vec<String>,
49}
50
51pub fn execute(opts: &LogOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
71 let mut log = LogFile::open(&opts.file)?;
72
73 let header = log.read_header()?;
74 let cp1 = log.read_checkpoint(0).ok();
75 let cp2 = log.read_checkpoint(1).ok();
76
77 if opts.json {
78 return execute_json(opts, &mut log, header, cp1, cp2, writer);
79 }
80
81 wprintln!(writer, "{}", "InnoDB Redo Log File".bold())?;
83 wprintln!(writer, " File: {}", opts.file)?;
84 wprintln!(writer, " Size: {} bytes", log.file_size())?;
85 wprintln!(
86 writer,
87 " Blocks: {} total ({} data)",
88 log.block_count(),
89 log.data_block_count()
90 )?;
91 wprintln!(writer)?;
92
93 let vendor = crate::innodb::vendor::detect_vendor_from_created_by(&header.created_by);
95 let is_mariadb = vendor == crate::innodb::vendor::InnoDbVendor::MariaDB;
96
97 wprintln!(writer, "{}", "Log File Header (block 0)".bold())?;
99 wprintln!(writer, " Group ID: {}", header.group_id)?;
100 wprintln!(writer, " Start LSN: {}", header.start_lsn)?;
101 wprintln!(writer, " File No: {}", header.file_no)?;
102 if !header.created_by.is_empty() {
103 wprintln!(writer, " Created by: {}", header.created_by)?;
104 }
105 wprintln!(writer, " Vendor: {}", vendor)?;
106 if is_mariadb {
107 wprintln!(
108 writer,
109 " {}",
110 "Note: MLOG record types are not decoded for MariaDB redo logs".yellow()
111 )?;
112 }
113 wprintln!(writer)?;
114
115 print_checkpoint(writer, "Checkpoint 1 (block 1)", &cp1)?;
117 print_checkpoint(writer, "Checkpoint 2 (block 3)", &cp2)?;
118
119 let data_blocks = log.data_block_count();
121 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
122
123 if limit > 0 {
124 wprintln!(writer, "{}", "Data Blocks".bold())?;
125 }
126
127 let mut displayed = 0u64;
128 let mut empty_skipped = 0u64;
129
130 for i in 0..limit {
131 let block_idx = LOG_FILE_HDR_BLOCKS + i;
132 let block_data = log.read_block(block_idx)?;
133
134 let hdr = match LogBlockHeader::parse(&block_data) {
135 Some(h) => h,
136 None => continue,
137 };
138
139 if opts.no_empty && !hdr.has_data() {
141 empty_skipped += 1;
142 continue;
143 }
144
145 let checksum_ok = validate_log_block_checksum(&block_data);
146 let checksum_str = if checksum_ok {
147 "OK".green().to_string()
148 } else {
149 "INVALID".red().to_string()
150 };
151
152 let flush_str = if hdr.flush_flag { " FLUSH" } else { "" };
153
154 wprintln!(
155 writer,
156 " Block {:>6} no={:<10} len={:<5} first_rec={:<5} chk_no={:<10} csum={}{}",
157 block_idx,
158 hdr.block_no,
159 hdr.data_len,
160 hdr.first_rec_group,
161 hdr.checkpoint_no,
162 checksum_str,
163 flush_str,
164 )?;
165
166 if opts.verbose && hdr.has_data() && !is_mariadb {
168 print_record_types(writer, &block_data, &hdr)?;
169 }
170
171 displayed += 1;
172 }
173
174 if opts.no_empty && empty_skipped > 0 {
175 wprintln!(writer, " ({} empty blocks skipped)", empty_skipped)?;
176 }
177
178 if displayed > 0 || empty_skipped > 0 {
179 wprintln!(writer)?;
180 }
181
182 wprintln!(
183 writer,
184 "Displayed {} data blocks{}",
185 displayed,
186 if limit < data_blocks {
187 format!(" (of {})", data_blocks)
188 } else {
189 String::new()
190 }
191 )?;
192
193 Ok(())
194}
195
196fn print_checkpoint(
197 writer: &mut dyn Write,
198 label: &str,
199 cp: &Option<crate::innodb::log::LogCheckpoint>,
200) -> Result<(), IdbError> {
201 wprintln!(writer, "{}", label.bold())?;
202 match cp {
203 Some(cp) => {
204 wprintln!(writer, " Number: {}", cp.number)?;
205 wprintln!(writer, " LSN: {}", cp.lsn)?;
206 wprintln!(writer, " Offset: {}", cp.offset)?;
207 wprintln!(writer, " Buffer size: {}", cp.buf_size)?;
208 if cp.archived_lsn > 0 {
209 wprintln!(writer, " Archived LSN: {}", cp.archived_lsn)?;
210 }
211 }
212 None => {
213 wprintln!(writer, " {}", "(not present or unreadable)".yellow())?;
214 }
215 }
216 wprintln!(writer)?;
217 Ok(())
218}
219
220fn print_record_types(
221 writer: &mut dyn Write,
222 block_data: &[u8],
223 hdr: &LogBlockHeader,
224) -> Result<(), IdbError> {
225 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
226 if data_end <= LOG_BLOCK_HDR_SIZE {
227 return Ok(());
228 }
229
230 let mut types: Vec<MlogRecordType> = Vec::new();
231 let mut pos = LOG_BLOCK_HDR_SIZE;
232
233 while pos < data_end {
234 let type_byte = block_data[pos];
235 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
237 types.push(rec_type);
238 pos += 1;
241 }
242
243 if !types.is_empty() {
244 let mut counts: std::collections::BTreeMap<String, usize> =
246 std::collections::BTreeMap::new();
247 for t in &types {
248 *counts.entry(t.to_string()).or_insert(0) += 1;
249 }
250 let summary: Vec<String> = counts
251 .iter()
252 .map(|(name, count)| format!("{}({})", name, count))
253 .collect();
254 wprintln!(writer, " record types: {}", summary.join(", "))?;
255 }
256
257 Ok(())
258}
259
260fn execute_json(
261 opts: &LogOptions,
262 log: &mut LogFile,
263 header: LogFileHeader,
264 cp1: Option<crate::innodb::log::LogCheckpoint>,
265 cp2: Option<crate::innodb::log::LogCheckpoint>,
266 writer: &mut dyn Write,
267) -> Result<(), IdbError> {
268 let data_blocks = log.data_block_count();
269 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
270
271 let mut blocks_json = Vec::new();
272
273 for i in 0..limit {
274 let block_idx = LOG_FILE_HDR_BLOCKS + i;
275 let block_data = log.read_block(block_idx)?;
276
277 let hdr = match LogBlockHeader::parse(&block_data) {
278 Some(h) => h,
279 None => continue,
280 };
281
282 if opts.no_empty && !hdr.has_data() {
283 continue;
284 }
285
286 let checksum_ok = validate_log_block_checksum(&block_data);
287
288 let is_mariadb = crate::innodb::vendor::detect_vendor_from_created_by(&header.created_by)
289 == crate::innodb::vendor::InnoDbVendor::MariaDB;
290 let record_types = if opts.verbose && hdr.has_data() && !is_mariadb {
291 collect_record_type_names(&block_data, &hdr)
292 } else {
293 Vec::new()
294 };
295
296 blocks_json.push(BlockJson {
297 block_index: block_idx,
298 block_no: hdr.block_no,
299 flush_flag: hdr.flush_flag,
300 data_len: hdr.data_len,
301 first_rec_group: hdr.first_rec_group,
302 checkpoint_no: hdr.checkpoint_no,
303 checksum_valid: checksum_ok,
304 record_types,
305 });
306 }
307
308 let summary = LogSummaryJson {
309 file: opts.file.clone(),
310 file_size: log.file_size(),
311 total_blocks: log.block_count(),
312 data_blocks: log.data_block_count(),
313 header,
314 checkpoint_1: cp1,
315 checkpoint_2: cp2,
316 blocks: blocks_json,
317 };
318
319 let json = serde_json::to_string_pretty(&summary)
320 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
321 wprintln!(writer, "{}", json)?;
322
323 Ok(())
324}
325
326fn collect_record_type_names(block_data: &[u8], hdr: &LogBlockHeader) -> Vec<String> {
327 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
328 if data_end <= LOG_BLOCK_HDR_SIZE {
329 return Vec::new();
330 }
331
332 let mut names = Vec::new();
333 let mut pos = LOG_BLOCK_HDR_SIZE;
334
335 while pos < data_end {
336 let type_byte = block_data[pos];
337 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
338 names.push(rec_type.to_string());
339 pos += 1;
340 }
341
342 names
343}