use crate::{
metadata::tables::{
enclog::EncLogRaw,
types::{RowWritable, TableInfoRef},
},
utils::write_le_at,
Result,
};
impl RowWritable for EncLogRaw {
fn row_write(
&self,
data: &mut [u8],
offset: &mut usize,
_rid: u32,
_sizes: &TableInfoRef,
) -> Result<()> {
write_le_at(data, offset, self.token_value)?;
write_le_at(data, offset, self.func_code)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
metadata::tables::types::{RowReadable, TableInfo, TableRow},
metadata::token::Token,
};
#[test]
fn test_round_trip_serialization() {
let original_row = EncLogRaw {
rid: 1,
token: Token::new(0x1E00_0001),
offset: 0,
token_value: 0x0602_0001, func_code: 0, };
let table_info = std::sync::Arc::new(TableInfo::new_test(
&[(crate::metadata::tables::TableId::EncLog, 100)],
false,
false,
false,
));
let row_size = <EncLogRaw as TableRow>::row_size(&table_info) as usize;
let mut buffer = vec![0u8; row_size];
let mut offset = 0;
original_row
.row_write(&mut buffer, &mut offset, 1, &table_info)
.expect("Serialization should succeed");
let mut read_offset = 0;
let deserialized_row = EncLogRaw::row_read(&buffer, &mut read_offset, 1, &table_info)
.expect("Deserialization should succeed");
assert_eq!(original_row.token_value, deserialized_row.token_value);
assert_eq!(original_row.func_code, deserialized_row.func_code);
assert_eq!(offset, row_size, "Offset should match expected row size");
assert_eq!(
read_offset, row_size,
"Read offset should match expected row size"
);
}
#[test]
fn test_known_binary_format() {
let enclog_entry = EncLogRaw {
rid: 1,
token: Token::new(0x1E00_0001),
offset: 0,
token_value: 0x0602_0001, func_code: 0, };
let table_info = std::sync::Arc::new(TableInfo::new_test(
&[(crate::metadata::tables::TableId::EncLog, 100)],
false,
false,
false,
));
let row_size = <EncLogRaw as TableRow>::row_size(&table_info) as usize;
let mut buffer = vec![0u8; row_size];
let mut offset = 0;
enclog_entry
.row_write(&mut buffer, &mut offset, 1, &table_info)
.expect("Serialization should succeed");
assert_eq!(row_size, 8, "Row size should be 8 bytes");
assert_eq!(buffer[0], 0x01);
assert_eq!(buffer[1], 0x00);
assert_eq!(buffer[2], 0x02);
assert_eq!(buffer[3], 0x06);
assert_eq!(buffer[4], 0x00);
assert_eq!(buffer[5], 0x00);
assert_eq!(buffer[6], 0x00);
assert_eq!(buffer[7], 0x00);
}
#[test]
fn test_different_operation_codes() {
let test_cases = vec![("Create", 0), ("Update", 1), ("Delete", 2)];
for (operation_name, func_code) in test_cases {
let enclog_entry = EncLogRaw {
rid: 1,
token: Token::new(0x1E00_0001),
offset: 0,
token_value: 0x0200_0005, func_code,
};
let table_info = std::sync::Arc::new(TableInfo::new_test(
&[(crate::metadata::tables::TableId::EncLog, 100)],
false,
false,
false,
));
let row_size = <EncLogRaw as TableRow>::row_size(&table_info) as usize;
let mut buffer = vec![0u8; row_size];
let mut offset = 0;
enclog_entry
.row_write(&mut buffer, &mut offset, 1, &table_info)
.unwrap_or_else(|_| panic!("Serialization should succeed for {operation_name}"));
let mut read_offset = 0;
let deserialized_row = EncLogRaw::row_read(&buffer, &mut read_offset, 1, &table_info)
.unwrap_or_else(|_| panic!("Deserialization should succeed for {operation_name}"));
assert_eq!(enclog_entry.token_value, deserialized_row.token_value);
assert_eq!(
enclog_entry.func_code, deserialized_row.func_code,
"Function code mismatch for {operation_name}"
);
}
}
#[test]
fn test_various_token_types() {
let test_cases = vec![
("TypeDef", 0x0200_0001), ("MethodDef", 0x0600_0010), ("Field", 0x0400_0025), ("Property", 0x1700_0003), ("Event", 0x1400_0007), ];
for (token_type, token_value) in test_cases {
let enclog_entry = EncLogRaw {
rid: 1,
token: Token::new(0x1E00_0001),
offset: 0,
token_value,
func_code: 1, };
let table_info = std::sync::Arc::new(TableInfo::new_test(
&[(crate::metadata::tables::TableId::EncLog, 100)],
false,
false,
false,
));
let row_size = <EncLogRaw as TableRow>::row_size(&table_info) as usize;
let mut buffer = vec![0u8; row_size];
let mut offset = 0;
enclog_entry
.row_write(&mut buffer, &mut offset, 1, &table_info)
.unwrap_or_else(|_| panic!("Serialization should succeed for {token_type}"));
let mut read_offset = 0;
let deserialized_row = EncLogRaw::row_read(&buffer, &mut read_offset, 1, &table_info)
.unwrap_or_else(|_| panic!("Deserialization should succeed for {token_type}"));
assert_eq!(
enclog_entry.token_value, deserialized_row.token_value,
"Token value mismatch for {token_type}"
);
assert_eq!(enclog_entry.func_code, deserialized_row.func_code);
}
}
#[test]
fn test_multiple_entries() {
let entries = [
EncLogRaw {
rid: 1,
token: Token::new(0x1E00_0001),
offset: 0,
token_value: 0x0600_0001, func_code: 0, },
EncLogRaw {
rid: 2,
token: Token::new(0x1E00_0002),
offset: 8,
token_value: 0x0600_0001, func_code: 1, },
EncLogRaw {
rid: 3,
token: Token::new(0x1E00_0003),
offset: 16,
token_value: 0x0400_0005, func_code: 2, },
];
let table_info = std::sync::Arc::new(TableInfo::new_test(
&[(crate::metadata::tables::TableId::EncLog, 100)],
false,
false,
false,
));
let row_size = <EncLogRaw as TableRow>::row_size(&table_info) as usize;
let mut buffer = vec![0u8; row_size * entries.len()];
let mut offset = 0;
for (i, entry) in entries.iter().enumerate() {
entry
.row_write(&mut buffer, &mut offset, (i + 1) as u32, &table_info)
.expect("Serialization should succeed");
}
let mut read_offset = 0;
for (i, original_entry) in entries.iter().enumerate() {
let deserialized_row =
EncLogRaw::row_read(&buffer, &mut read_offset, (i + 1) as u32, &table_info)
.expect("Deserialization should succeed");
assert_eq!(original_entry.token_value, deserialized_row.token_value);
assert_eq!(original_entry.func_code, deserialized_row.func_code);
}
}
}