1use std::io::Write;
2
3use colored::Colorize;
4use serde::Serialize;
5
6use crate::cli::wprintln;
7use crate::innodb::log::{
8 validate_log_block_checksum, LogBlockHeader, LogFile, LogFileHeader, MlogRecordType,
9 LOG_BLOCK_HDR_SIZE, LOG_BLOCK_SIZE, LOG_FILE_HDR_BLOCKS,
10};
11use crate::IdbError;
12
13pub struct LogOptions {
15 pub file: String,
17 pub blocks: Option<u64>,
19 pub no_empty: bool,
21 pub verbose: bool,
23 pub json: bool,
25}
26
27#[derive(Serialize)]
28struct LogSummaryJson {
29 file: String,
30 file_size: u64,
31 total_blocks: u64,
32 data_blocks: u64,
33 header: LogFileHeader,
34 checkpoint_1: Option<crate::innodb::log::LogCheckpoint>,
35 checkpoint_2: Option<crate::innodb::log::LogCheckpoint>,
36 blocks: Vec<BlockJson>,
37}
38
39#[derive(Serialize)]
40struct BlockJson {
41 block_index: u64,
42 block_no: u32,
43 flush_flag: bool,
44 data_len: u16,
45 first_rec_group: u16,
46 checkpoint_no: u32,
47 checksum_valid: bool,
48 record_types: Vec<String>,
49}
50
51pub fn execute(opts: &LogOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
71 let mut log = LogFile::open(&opts.file)?;
72
73 let header = log.read_header()?;
74 let cp1 = log.read_checkpoint(0).ok();
75 let cp2 = log.read_checkpoint(1).ok();
76
77 if opts.json {
78 return execute_json(opts, &mut log, header, cp1, cp2, writer);
79 }
80
81 wprintln!(writer, "{}", "InnoDB Redo Log File".bold())?;
83 wprintln!(writer, " File: {}", opts.file)?;
84 wprintln!(writer, " Size: {} bytes", log.file_size())?;
85 wprintln!(
86 writer,
87 " Blocks: {} total ({} data)",
88 log.block_count(),
89 log.data_block_count()
90 )?;
91 wprintln!(writer)?;
92
93 wprintln!(writer, "{}", "Log File Header (block 0)".bold())?;
95 wprintln!(writer, " Group ID: {}", header.group_id)?;
96 wprintln!(writer, " Start LSN: {}", header.start_lsn)?;
97 wprintln!(writer, " File No: {}", header.file_no)?;
98 if !header.created_by.is_empty() {
99 wprintln!(writer, " Created by: {}", header.created_by)?;
100 }
101 wprintln!(writer)?;
102
103 print_checkpoint(writer, "Checkpoint 1 (block 1)", &cp1)?;
105 print_checkpoint(writer, "Checkpoint 2 (block 3)", &cp2)?;
106
107 let data_blocks = log.data_block_count();
109 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
110
111 if limit > 0 {
112 wprintln!(writer, "{}", "Data Blocks".bold())?;
113 }
114
115 let mut displayed = 0u64;
116 let mut empty_skipped = 0u64;
117
118 for i in 0..limit {
119 let block_idx = LOG_FILE_HDR_BLOCKS + i;
120 let block_data = log.read_block(block_idx)?;
121
122 let hdr = match LogBlockHeader::parse(&block_data) {
123 Some(h) => h,
124 None => continue,
125 };
126
127 if opts.no_empty && !hdr.has_data() {
129 empty_skipped += 1;
130 continue;
131 }
132
133 let checksum_ok = validate_log_block_checksum(&block_data);
134 let checksum_str = if checksum_ok {
135 "OK".green().to_string()
136 } else {
137 "INVALID".red().to_string()
138 };
139
140 let flush_str = if hdr.flush_flag { " FLUSH" } else { "" };
141
142 wprintln!(
143 writer,
144 " Block {:>6} no={:<10} len={:<5} first_rec={:<5} chk_no={:<10} csum={}{}",
145 block_idx,
146 hdr.block_no,
147 hdr.data_len,
148 hdr.first_rec_group,
149 hdr.checkpoint_no,
150 checksum_str,
151 flush_str,
152 )?;
153
154 if opts.verbose && hdr.has_data() {
156 print_record_types(writer, &block_data, &hdr)?;
157 }
158
159 displayed += 1;
160 }
161
162 if opts.no_empty && empty_skipped > 0 {
163 wprintln!(writer, " ({} empty blocks skipped)", empty_skipped)?;
164 }
165
166 if displayed > 0 || empty_skipped > 0 {
167 wprintln!(writer)?;
168 }
169
170 wprintln!(
171 writer,
172 "Displayed {} data blocks{}",
173 displayed,
174 if limit < data_blocks {
175 format!(" (of {})", data_blocks)
176 } else {
177 String::new()
178 }
179 )?;
180
181 Ok(())
182}
183
184fn print_checkpoint(
185 writer: &mut dyn Write,
186 label: &str,
187 cp: &Option<crate::innodb::log::LogCheckpoint>,
188) -> Result<(), IdbError> {
189 wprintln!(writer, "{}", label.bold())?;
190 match cp {
191 Some(cp) => {
192 wprintln!(writer, " Number: {}", cp.number)?;
193 wprintln!(writer, " LSN: {}", cp.lsn)?;
194 wprintln!(writer, " Offset: {}", cp.offset)?;
195 wprintln!(writer, " Buffer size: {}", cp.buf_size)?;
196 if cp.archived_lsn > 0 {
197 wprintln!(writer, " Archived LSN: {}", cp.archived_lsn)?;
198 }
199 }
200 None => {
201 wprintln!(writer, " {}", "(not present or unreadable)".yellow())?;
202 }
203 }
204 wprintln!(writer)?;
205 Ok(())
206}
207
208fn print_record_types(
209 writer: &mut dyn Write,
210 block_data: &[u8],
211 hdr: &LogBlockHeader,
212) -> Result<(), IdbError> {
213 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
214 if data_end <= LOG_BLOCK_HDR_SIZE {
215 return Ok(());
216 }
217
218 let mut types: Vec<MlogRecordType> = Vec::new();
219 let mut pos = LOG_BLOCK_HDR_SIZE;
220
221 while pos < data_end {
222 let type_byte = block_data[pos];
223 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
225 types.push(rec_type);
226 pos += 1;
229 }
230
231 if !types.is_empty() {
232 let mut counts: std::collections::BTreeMap<String, usize> =
234 std::collections::BTreeMap::new();
235 for t in &types {
236 *counts.entry(t.to_string()).or_insert(0) += 1;
237 }
238 let summary: Vec<String> = counts
239 .iter()
240 .map(|(name, count)| format!("{}({})", name, count))
241 .collect();
242 wprintln!(writer, " record types: {}", summary.join(", "))?;
243 }
244
245 Ok(())
246}
247
248fn execute_json(
249 opts: &LogOptions,
250 log: &mut LogFile,
251 header: LogFileHeader,
252 cp1: Option<crate::innodb::log::LogCheckpoint>,
253 cp2: Option<crate::innodb::log::LogCheckpoint>,
254 writer: &mut dyn Write,
255) -> Result<(), IdbError> {
256 let data_blocks = log.data_block_count();
257 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
258
259 let mut blocks_json = Vec::new();
260
261 for i in 0..limit {
262 let block_idx = LOG_FILE_HDR_BLOCKS + i;
263 let block_data = log.read_block(block_idx)?;
264
265 let hdr = match LogBlockHeader::parse(&block_data) {
266 Some(h) => h,
267 None => continue,
268 };
269
270 if opts.no_empty && !hdr.has_data() {
271 continue;
272 }
273
274 let checksum_ok = validate_log_block_checksum(&block_data);
275
276 let record_types = if opts.verbose && hdr.has_data() {
277 collect_record_type_names(&block_data, &hdr)
278 } else {
279 Vec::new()
280 };
281
282 blocks_json.push(BlockJson {
283 block_index: block_idx,
284 block_no: hdr.block_no,
285 flush_flag: hdr.flush_flag,
286 data_len: hdr.data_len,
287 first_rec_group: hdr.first_rec_group,
288 checkpoint_no: hdr.checkpoint_no,
289 checksum_valid: checksum_ok,
290 record_types,
291 });
292 }
293
294 let summary = LogSummaryJson {
295 file: opts.file.clone(),
296 file_size: log.file_size(),
297 total_blocks: log.block_count(),
298 data_blocks: log.data_block_count(),
299 header,
300 checkpoint_1: cp1,
301 checkpoint_2: cp2,
302 blocks: blocks_json,
303 };
304
305 let json = serde_json::to_string_pretty(&summary)
306 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
307 wprintln!(writer, "{}", json)?;
308
309 Ok(())
310}
311
312fn collect_record_type_names(block_data: &[u8], hdr: &LogBlockHeader) -> Vec<String> {
313 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
314 if data_end <= LOG_BLOCK_HDR_SIZE {
315 return Vec::new();
316 }
317
318 let mut names = Vec::new();
319 let mut pos = LOG_BLOCK_HDR_SIZE;
320
321 while pos < data_end {
322 let type_byte = block_data[pos];
323 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
324 names.push(rec_type.to_string());
325 pos += 1;
326 }
327
328 names
329}