1use std::io::Write;
2
3use colored::Colorize;
4use serde::Serialize;
5
6use crate::cli::wprintln;
7use crate::innodb::log::{
8 validate_log_block_checksum, LogBlockHeader, LogFile, LogFileHeader,
9 MlogRecordType, LOG_BLOCK_HDR_SIZE, LOG_BLOCK_SIZE, LOG_FILE_HDR_BLOCKS,
10};
11use crate::IdbError;
12
13pub struct LogOptions {
14 pub file: String,
15 pub blocks: Option<u64>,
16 pub no_empty: bool,
17 pub verbose: bool,
18 pub json: bool,
19}
20
21#[derive(Serialize)]
22struct LogSummaryJson {
23 file: String,
24 file_size: u64,
25 total_blocks: u64,
26 data_blocks: u64,
27 header: LogFileHeader,
28 checkpoint_1: Option<crate::innodb::log::LogCheckpoint>,
29 checkpoint_2: Option<crate::innodb::log::LogCheckpoint>,
30 blocks: Vec<BlockJson>,
31}
32
33#[derive(Serialize)]
34struct BlockJson {
35 block_index: u64,
36 block_no: u32,
37 flush_flag: bool,
38 data_len: u16,
39 first_rec_group: u16,
40 checkpoint_no: u32,
41 checksum_valid: bool,
42 record_types: Vec<String>,
43}
44
45pub fn execute(opts: &LogOptions, writer: &mut dyn Write) -> Result<(), IdbError> {
46 let mut log = LogFile::open(&opts.file)?;
47
48 let header = log.read_header()?;
49 let cp1 = log.read_checkpoint(0).ok();
50 let cp2 = log.read_checkpoint(1).ok();
51
52 if opts.json {
53 return execute_json(opts, &mut log, header, cp1, cp2, writer);
54 }
55
56 wprintln!(writer, "{}", "InnoDB Redo Log File".bold())?;
58 wprintln!(writer, " File: {}", opts.file)?;
59 wprintln!(writer, " Size: {} bytes", log.file_size())?;
60 wprintln!(writer, " Blocks: {} total ({} data)", log.block_count(), log.data_block_count())?;
61 wprintln!(writer)?;
62
63 wprintln!(writer, "{}", "Log File Header (block 0)".bold())?;
65 wprintln!(writer, " Group ID: {}", header.group_id)?;
66 wprintln!(writer, " Start LSN: {}", header.start_lsn)?;
67 wprintln!(writer, " File No: {}", header.file_no)?;
68 if !header.created_by.is_empty() {
69 wprintln!(writer, " Created by: {}", header.created_by)?;
70 }
71 wprintln!(writer)?;
72
73 print_checkpoint(writer, "Checkpoint 1 (block 1)", &cp1)?;
75 print_checkpoint(writer, "Checkpoint 2 (block 3)", &cp2)?;
76
77 let data_blocks = log.data_block_count();
79 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
80
81 if limit > 0 {
82 wprintln!(writer, "{}", "Data Blocks".bold())?;
83 }
84
85 let mut displayed = 0u64;
86 let mut empty_skipped = 0u64;
87
88 for i in 0..limit {
89 let block_idx = LOG_FILE_HDR_BLOCKS + i;
90 let block_data = log.read_block(block_idx)?;
91
92 let hdr = match LogBlockHeader::parse(&block_data) {
93 Some(h) => h,
94 None => continue,
95 };
96
97 if opts.no_empty && !hdr.has_data() {
99 empty_skipped += 1;
100 continue;
101 }
102
103 let checksum_ok = validate_log_block_checksum(&block_data);
104 let checksum_str = if checksum_ok {
105 "OK".green().to_string()
106 } else {
107 "INVALID".red().to_string()
108 };
109
110 let flush_str = if hdr.flush_flag { " FLUSH" } else { "" };
111
112 wprintln!(
113 writer,
114 " Block {:>6} no={:<10} len={:<5} first_rec={:<5} chk_no={:<10} csum={}{}",
115 block_idx, hdr.block_no, hdr.data_len, hdr.first_rec_group, hdr.checkpoint_no,
116 checksum_str, flush_str,
117 )?;
118
119 if opts.verbose && hdr.has_data() {
121 print_record_types(writer, &block_data, &hdr)?;
122 }
123
124 displayed += 1;
125 }
126
127 if opts.no_empty && empty_skipped > 0 {
128 wprintln!(writer, " ({} empty blocks skipped)", empty_skipped)?;
129 }
130
131 if displayed > 0 || empty_skipped > 0 {
132 wprintln!(writer)?;
133 }
134
135 wprintln!(
136 writer,
137 "Displayed {} data blocks{}",
138 displayed,
139 if limit < data_blocks {
140 format!(" (of {})", data_blocks)
141 } else {
142 String::new()
143 }
144 )?;
145
146 Ok(())
147}
148
149fn print_checkpoint(writer: &mut dyn Write, label: &str, cp: &Option<crate::innodb::log::LogCheckpoint>) -> Result<(), IdbError> {
150 wprintln!(writer, "{}", label.bold())?;
151 match cp {
152 Some(cp) => {
153 wprintln!(writer, " Number: {}", cp.number)?;
154 wprintln!(writer, " LSN: {}", cp.lsn)?;
155 wprintln!(writer, " Offset: {}", cp.offset)?;
156 wprintln!(writer, " Buffer size: {}", cp.buf_size)?;
157 if cp.archived_lsn > 0 {
158 wprintln!(writer, " Archived LSN: {}", cp.archived_lsn)?;
159 }
160 }
161 None => {
162 wprintln!(writer, " {}", "(not present or unreadable)".yellow())?;
163 }
164 }
165 wprintln!(writer)?;
166 Ok(())
167}
168
169fn print_record_types(writer: &mut dyn Write, block_data: &[u8], hdr: &LogBlockHeader) -> Result<(), IdbError> {
170 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
171 if data_end <= LOG_BLOCK_HDR_SIZE {
172 return Ok(());
173 }
174
175 let mut types: Vec<MlogRecordType> = Vec::new();
176 let mut pos = LOG_BLOCK_HDR_SIZE;
177
178 while pos < data_end {
179 let type_byte = block_data[pos];
180 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
182 types.push(rec_type);
183 pos += 1;
186 }
187
188 if !types.is_empty() {
189 let mut counts: std::collections::BTreeMap<String, usize> = std::collections::BTreeMap::new();
191 for t in &types {
192 *counts.entry(t.to_string()).or_insert(0) += 1;
193 }
194 let summary: Vec<String> = counts
195 .iter()
196 .map(|(name, count)| format!("{}({})", name, count))
197 .collect();
198 wprintln!(writer, " record types: {}", summary.join(", "))?;
199 }
200
201 Ok(())
202}
203
204fn execute_json(
205 opts: &LogOptions,
206 log: &mut LogFile,
207 header: LogFileHeader,
208 cp1: Option<crate::innodb::log::LogCheckpoint>,
209 cp2: Option<crate::innodb::log::LogCheckpoint>,
210 writer: &mut dyn Write,
211) -> Result<(), IdbError> {
212 let data_blocks = log.data_block_count();
213 let limit = opts.blocks.unwrap_or(data_blocks).min(data_blocks);
214
215 let mut blocks_json = Vec::new();
216
217 for i in 0..limit {
218 let block_idx = LOG_FILE_HDR_BLOCKS + i;
219 let block_data = log.read_block(block_idx)?;
220
221 let hdr = match LogBlockHeader::parse(&block_data) {
222 Some(h) => h,
223 None => continue,
224 };
225
226 if opts.no_empty && !hdr.has_data() {
227 continue;
228 }
229
230 let checksum_ok = validate_log_block_checksum(&block_data);
231
232 let record_types = if opts.verbose && hdr.has_data() {
233 collect_record_type_names(&block_data, &hdr)
234 } else {
235 Vec::new()
236 };
237
238 blocks_json.push(BlockJson {
239 block_index: block_idx,
240 block_no: hdr.block_no,
241 flush_flag: hdr.flush_flag,
242 data_len: hdr.data_len,
243 first_rec_group: hdr.first_rec_group,
244 checkpoint_no: hdr.checkpoint_no,
245 checksum_valid: checksum_ok,
246 record_types,
247 });
248 }
249
250 let summary = LogSummaryJson {
251 file: opts.file.clone(),
252 file_size: log.file_size(),
253 total_blocks: log.block_count(),
254 data_blocks: log.data_block_count(),
255 header,
256 checkpoint_1: cp1,
257 checkpoint_2: cp2,
258 blocks: blocks_json,
259 };
260
261 let json = serde_json::to_string_pretty(&summary)
262 .map_err(|e| IdbError::Parse(format!("JSON serialization error: {}", e)))?;
263 wprintln!(writer, "{}", json)?;
264
265 Ok(())
266}
267
268fn collect_record_type_names(block_data: &[u8], hdr: &LogBlockHeader) -> Vec<String> {
269 let data_end = (hdr.data_len as usize).min(LOG_BLOCK_SIZE - 4);
270 if data_end <= LOG_BLOCK_HDR_SIZE {
271 return Vec::new();
272 }
273
274 let mut names = Vec::new();
275 let mut pos = LOG_BLOCK_HDR_SIZE;
276
277 while pos < data_end {
278 let type_byte = block_data[pos];
279 let rec_type = MlogRecordType::from_u8(type_byte & 0x7F);
280 names.push(rec_type.to_string());
281 pos += 1;
282 }
283
284 names
285}