use crate::checksum::jenkins_lookup3;
use crate::error::{Error, Result};
use crate::io::Cursor;
use crate::storage::Storage;
const FRHP_SIGNATURE: [u8; 4] = *b"FRHP";
const _FHDB_SIGNATURE: [u8; 4] = *b"FHDB";
const FHIB_SIGNATURE: [u8; 4] = *b"FHIB";
#[derive(Debug, Clone)]
pub struct FractalHeap {
pub heap_id_len: u16,
pub io_filters_len: u16,
pub flags: u8,
pub max_managed_object_size: u64,
pub next_huge_id: u64,
pub btree_huge_objects_address: u64,
pub free_space_managed_address: u64,
pub managed_space_amount: u64,
pub managed_alloc_amount: u64,
pub managed_iter_offset: u64,
pub managed_objects_count: u64,
pub huge_objects_size: u64,
pub huge_objects_count: u64,
pub tiny_objects_size: u64,
pub tiny_objects_count: u64,
pub table_width: u16,
pub starting_block_size: u64,
pub max_direct_block_size: u64,
pub max_heap_size: u16,
pub starting_row_root_indirect: u16,
pub root_block_address: u64,
pub current_rows_in_root_indirect: u16,
pub io_filter_size: Option<u64>,
pub io_filter_mask: Option<u32>,
pub io_filter_info: Vec<u8>,
}
impl FractalHeap {
pub fn parse(cursor: &mut Cursor, offset_size: u8, length_size: u8) -> Result<Self> {
let start = cursor.position();
let sig = cursor.read_bytes(4)?;
if sig != FRHP_SIGNATURE {
return Err(Error::InvalidFractalHeapSignature);
}
let version = cursor.read_u8()?;
if version != 0 {
return Err(Error::UnsupportedFractalHeapVersion(version));
}
let heap_id_len = cursor.read_u16_le()?;
let io_filters_len = cursor.read_u16_le()?;
let flags = cursor.read_u8()?;
let max_managed_object_size = cursor.read_u32_le()? as u64;
let next_huge_id = cursor.read_length(length_size)?;
let btree_huge_objects_address = cursor.read_offset(offset_size)?;
let free_space_managed_address = cursor.read_length(length_size)?;
let managed_space_amount = cursor.read_length(length_size)?;
let managed_alloc_amount = cursor.read_length(length_size)?;
let managed_iter_offset = cursor.read_length(length_size)?;
let managed_objects_count = cursor.read_length(length_size)?;
let huge_objects_size = cursor.read_length(length_size)?;
let huge_objects_count = cursor.read_length(length_size)?;
let tiny_objects_size = cursor.read_length(length_size)?;
let tiny_objects_count = cursor.read_length(length_size)?;
let table_width = cursor.read_u16_le()?;
let starting_block_size = cursor.read_length(length_size)?;
let max_direct_block_size = cursor.read_length(length_size)?;
let max_heap_size = cursor.read_u16_le()?;
let starting_row_root_indirect = cursor.read_u16_le()?;
let root_block_address = cursor.read_offset(offset_size)?;
let current_rows_in_root_indirect = cursor.read_u16_le()?;
let (io_filter_size, io_filter_mask) = if io_filters_len > 0 {
let size = cursor.read_length(length_size)?;
let mask = cursor.read_u32_le()?;
(Some(size), Some(mask))
} else {
(None, None)
};
let io_filter_info = if io_filters_len > 0 {
cursor.read_bytes(usize::from(io_filters_len))?.to_vec()
} else {
Vec::new()
};
let checksum_end = cursor.position();
let stored_checksum = cursor.read_u32_le()?;
let computed = jenkins_lookup3(&cursor.data()[start as usize..checksum_end as usize]);
if computed != stored_checksum {
return Err(Error::ChecksumMismatch {
expected: stored_checksum,
actual: computed,
});
}
Ok(FractalHeap {
heap_id_len,
io_filters_len,
flags,
max_managed_object_size,
next_huge_id,
btree_huge_objects_address,
free_space_managed_address,
managed_space_amount,
managed_alloc_amount,
managed_iter_offset,
managed_objects_count,
huge_objects_size,
huge_objects_count,
tiny_objects_size,
tiny_objects_count,
table_width,
starting_block_size,
max_direct_block_size,
max_heap_size,
starting_row_root_indirect,
root_block_address,
current_rows_in_root_indirect,
io_filter_size,
io_filter_mask,
io_filter_info,
})
}
pub fn parse_at_storage(
storage: &dyn Storage,
address: u64,
offset_size: u8,
length_size: u8,
) -> Result<Self> {
let max_header_len = 256usize;
let available = storage.len().saturating_sub(address);
let len = usize::try_from(available.min(max_header_len as u64)).map_err(|_| {
Error::InvalidData("fractal heap header exceeds platform usize capacity".into())
})?;
let bytes = storage.read_range(address, len)?;
let mut cursor = Cursor::new(bytes.as_ref());
Self::parse(&mut cursor, offset_size, length_size)
}
pub fn get_object(
&self,
heap_id: &[u8],
file_data: &[u8],
offset_size: u8,
length_size: u8,
) -> Result<Vec<u8>> {
match self.heap_id_kind(heap_id)? {
HeapIdKind::Managed => self.get_managed_object_impl(heap_id, file_data, offset_size),
HeapIdKind::Huge => self.get_huge_object(heap_id, file_data, offset_size, length_size),
HeapIdKind::Tiny => self.decode_tiny_object(heap_id),
}
}
pub fn get_object_storage(
&self,
heap_id: &[u8],
storage: &dyn Storage,
offset_size: u8,
length_size: u8,
) -> Result<Vec<u8>> {
match self.heap_id_kind(heap_id)? {
HeapIdKind::Managed => {
self.get_managed_object_storage_impl(heap_id, storage, offset_size)
}
HeapIdKind::Huge => {
self.get_huge_object_storage(heap_id, storage, offset_size, length_size)
}
HeapIdKind::Tiny => self.decode_tiny_object(heap_id),
}
}
pub fn get_managed_object(
&self,
heap_id: &[u8],
file_data: &[u8],
offset_size: u8,
length_size: u8,
) -> Result<Vec<u8>> {
self.get_object(heap_id, file_data, offset_size, length_size)
}
pub fn get_managed_object_storage(
&self,
heap_id: &[u8],
storage: &dyn Storage,
offset_size: u8,
length_size: u8,
) -> Result<Vec<u8>> {
self.get_object_storage(heap_id, storage, offset_size, length_size)
}
fn get_managed_object_impl(
&self,
heap_id: &[u8],
file_data: &[u8],
offset_size: u8,
) -> Result<Vec<u8>> {
if self.io_filters_len > 0 {
return Err(Error::Other(
"filtered fractal heap managed objects are not supported".to_string(),
));
}
let (heap_offset, obj_length) = self.decode_managed_heap_id(heap_id)?;
if obj_length == 0 {
return Ok(Vec::new());
}
let (block_address, block_offset_in_heap, _block_size) =
self.find_direct_block(heap_offset, file_data, offset_size)?;
let db_header_size = self.direct_block_header_size(offset_size);
let offset_in_block = heap_offset - block_offset_in_heap;
let data_start = block_address as usize + db_header_size + offset_in_block as usize;
let data_end = data_start + obj_length as usize;
if data_end > file_data.len() {
return Err(Error::UnexpectedEof {
offset: data_start as u64,
needed: obj_length,
available: file_data.len().saturating_sub(data_start) as u64,
});
}
Ok(file_data[data_start..data_end].to_vec())
}
fn get_managed_object_storage_impl(
&self,
heap_id: &[u8],
storage: &dyn Storage,
offset_size: u8,
) -> Result<Vec<u8>> {
if self.io_filters_len > 0 {
return Err(Error::Other(
"filtered fractal heap managed objects are not supported".to_string(),
));
}
let (heap_offset, obj_length) = self.decode_managed_heap_id(heap_id)?;
if obj_length == 0 {
return Ok(Vec::new());
}
let (block_address, block_offset_in_heap, _block_size) =
self.find_direct_block_storage(heap_offset, storage, offset_size)?;
let db_header_size = self.direct_block_header_size(offset_size);
let offset_in_block = heap_offset - block_offset_in_heap;
let data_start = block_address
.checked_add(u64::try_from(db_header_size).map_err(|_| {
Error::InvalidData("fractal heap direct block header too large".into())
})?)
.and_then(|value| value.checked_add(offset_in_block))
.ok_or(Error::OffsetOutOfBounds(block_address))?;
let len = usize::try_from(obj_length).map_err(|_| {
Error::InvalidData("fractal heap object exceeds platform usize capacity".into())
})?;
Ok(storage.read_range(data_start, len)?.to_vec())
}
fn get_huge_object(
&self,
heap_id: &[u8],
file_data: &[u8],
offset_size: u8,
length_size: u8,
) -> Result<Vec<u8>> {
let (address, length) = self.resolve_huge_object_location(
heap_id,
Some(file_data),
None,
offset_size,
length_size,
)?;
let start = usize::try_from(address).map_err(|_| Error::OffsetOutOfBounds(address))?;
let len = usize::try_from(length).map_err(|_| {
Error::InvalidData("huge fractal heap object exceeds platform usize capacity".into())
})?;
let end = start
.checked_add(len)
.ok_or(Error::OffsetOutOfBounds(address))?;
if end > file_data.len() {
return Err(Error::UnexpectedEof {
offset: address,
needed: length,
available: file_data.len().saturating_sub(start) as u64,
});
}
Ok(file_data[start..end].to_vec())
}
fn get_huge_object_storage(
&self,
heap_id: &[u8],
storage: &dyn Storage,
offset_size: u8,
length_size: u8,
) -> Result<Vec<u8>> {
let (address, length) = self.resolve_huge_object_location(
heap_id,
None,
Some(storage),
offset_size,
length_size,
)?;
let len = usize::try_from(length).map_err(|_| {
Error::InvalidData("huge fractal heap object exceeds platform usize capacity".into())
})?;
Ok(storage.read_range(address, len)?.to_vec())
}
fn resolve_huge_object_location(
&self,
heap_id: &[u8],
file_data: Option<&[u8]>,
storage: Option<&dyn Storage>,
offset_size: u8,
length_size: u8,
) -> Result<(u64, u64)> {
let direct_unfiltered_len = 1 + usize::from(offset_size) + usize::from(length_size);
let direct_filtered_len = direct_unfiltered_len + 4 + usize::from(length_size);
if self.io_filters_len > 0 && heap_id.len() >= direct_filtered_len {
return Err(Error::Other(
"filtered fractal heap huge objects are not supported".to_string(),
));
}
if self.io_filters_len == 0 && heap_id.len() >= direct_unfiltered_len {
let mut cursor = Cursor::new(&heap_id[1..]);
let address = cursor.read_offset(offset_size)?;
let length = cursor.read_length(length_size)?;
return Ok((address, length));
}
if heap_id.len() < 1 + usize::from(length_size) {
return Err(Error::InvalidData(
"huge fractal heap ID is too short".into(),
));
}
if Cursor::is_undefined_offset(self.btree_huge_objects_address, offset_size) {
return Err(Error::UndefinedAddress);
}
let mut key_cursor = Cursor::new(&heap_id[1..]);
let object_id = key_cursor.read_length(length_size)?;
let header = if let Some(storage) = storage {
crate::btree_v2::BTreeV2Header::parse_at_storage(
storage,
self.btree_huge_objects_address,
offset_size,
length_size,
)?
} else {
let data = file_data.expect("file_data must exist when storage is None");
let mut cursor = Cursor::new(data);
cursor.set_position(self.btree_huge_objects_address);
crate::btree_v2::BTreeV2Header::parse(&mut cursor, offset_size, length_size)?
};
let records = if let Some(storage) = storage {
crate::btree_v2::collect_btree_v2_records_storage(
storage,
&header,
offset_size,
length_size,
None,
&[],
None,
)?
} else {
crate::btree_v2::collect_btree_v2_records(
file_data.expect("file_data must exist when storage is None"),
&header,
offset_size,
length_size,
None,
&[],
None,
)?
};
for record in records {
match record {
crate::btree_v2::BTreeV2Record::HugeIndirectNonFiltered {
address,
length,
object_id: record_id,
} if record_id == object_id => return Ok((address, length)),
crate::btree_v2::BTreeV2Record::HugeIndirectFiltered {
object_id: record_id,
..
} if record_id == object_id => {
return Err(Error::Other(
"filtered fractal heap huge objects are not supported".to_string(),
));
}
_ => {}
}
}
Err(Error::InvalidData(format!(
"huge fractal heap object ID {} not found",
object_id
)))
}
fn decode_tiny_object(&self, heap_id: &[u8]) -> Result<Vec<u8>> {
let extended = self.heap_id_len > 18;
let (data_start, len) = if extended {
if heap_id.len() < 2 {
return Err(Error::InvalidData(
"extended tiny heap ID is too short".into(),
));
}
let encoded = (u16::from(heap_id[0] & 0x0F) << 8) | u16::from(heap_id[1]);
(2usize, usize::from(encoded) + 1)
} else {
(1usize, usize::from(heap_id[0] & 0x0F) + 1)
};
let data_end = data_start
.checked_add(len)
.ok_or_else(|| Error::InvalidData("tiny heap object length overflows".into()))?;
if data_end > heap_id.len() {
return Err(Error::InvalidData(format!(
"tiny heap object needs {} bytes, heap ID has {}",
data_end,
heap_id.len()
)));
}
Ok(heap_id[data_start..data_end].to_vec())
}
fn heap_id_kind(&self, heap_id: &[u8]) -> Result<HeapIdKind> {
if heap_id.is_empty() {
return Err(Error::InvalidData("empty fractal heap ID".into()));
}
let version = heap_id[0] >> 6;
if version != 0 {
return Err(Error::InvalidData(format!(
"unsupported fractal heap ID version {}",
version
)));
}
match (heap_id[0] >> 4) & 0x03 {
0 => Ok(HeapIdKind::Managed),
1 => Ok(HeapIdKind::Huge),
2 => Ok(HeapIdKind::Tiny),
other => Err(Error::InvalidData(format!(
"unknown fractal heap ID type {}",
other
))),
}
}
fn decode_managed_heap_id(&self, heap_id: &[u8]) -> Result<(u64, u64)> {
let (offset_bytes, length_bytes) = self.managed_id_widths();
let needed = 1 + offset_bytes + length_bytes;
if heap_id.len() < needed {
return Err(Error::InvalidData(format!(
"managed fractal heap ID too short: need {} bytes, have {}",
needed,
heap_id.len()
)));
}
let mut cursor = Cursor::new(&heap_id[1..needed]);
let heap_offset = cursor.read_uvar(offset_bytes)?;
let obj_length = cursor.read_uvar(length_bytes)?;
Ok((heap_offset, obj_length))
}
fn managed_id_widths(&self) -> (usize, usize) {
let offset_bytes = usize::from(self.max_heap_size).div_ceil(8).max(1);
let max_len = self.max_direct_block_size.min(self.max_managed_object_size);
let length_bytes = bytes_needed_to_encode(max_len).max(1);
(offset_bytes, length_bytes)
}
fn find_direct_block(
&self,
heap_offset: u64,
file_data: &[u8],
offset_size: u8,
) -> Result<(u64, u64, u64)> {
if Cursor::is_undefined_offset(self.root_block_address, offset_size) {
return Err(Error::UndefinedAddress);
}
if self.current_rows_in_root_indirect == 0 {
Ok((self.root_block_address, 0, self.starting_block_size))
} else {
self.find_direct_block_via_indirect(
self.root_block_address,
heap_offset,
file_data,
offset_size,
self.current_rows_in_root_indirect,
)
}
}
fn find_direct_block_storage(
&self,
heap_offset: u64,
storage: &dyn Storage,
offset_size: u8,
) -> Result<(u64, u64, u64)> {
if Cursor::is_undefined_offset(self.root_block_address, offset_size) {
return Err(Error::UndefinedAddress);
}
if self.current_rows_in_root_indirect == 0 {
Ok((self.root_block_address, 0, self.starting_block_size))
} else {
self.find_direct_block_via_indirect_storage(
self.root_block_address,
heap_offset,
storage,
offset_size,
self.current_rows_in_root_indirect,
)
}
}
fn find_direct_block_via_indirect(
&self,
indirect_address: u64,
heap_offset: u64,
file_data: &[u8],
offset_size: u8,
nrows: u16,
) -> Result<(u64, u64, u64)> {
let addr = indirect_address as usize;
if addr + 4 > file_data.len() {
return Err(Error::OffsetOutOfBounds(indirect_address));
}
if file_data[addr..addr + 4] != FHIB_SIGNATURE {
return Err(Error::InvalidData(format!(
"expected FHIB signature at offset {:#x}, got {:?}",
indirect_address,
&file_data[addr..addr + 4]
)));
}
let width = self.table_width as u64;
let mut running_offset: u64 = 0;
for row in 0..nrows as u64 {
let block_size = self.block_size_for_row(row);
let is_direct = block_size <= self.max_direct_block_size;
for col in 0..width {
let block_end = running_offset + block_size;
if heap_offset >= running_offset && heap_offset < block_end {
let entry_index = row * width + col;
let iblock_header_size =
4 + 1 + offset_size as u64 + (self.max_heap_size as u64).div_ceil(8);
let entry_addr_pos =
indirect_address + iblock_header_size + entry_index * offset_size as u64;
if entry_addr_pos as usize + offset_size as usize > file_data.len() {
return Err(Error::OffsetOutOfBounds(entry_addr_pos));
}
let mut cursor = Cursor::new(file_data);
cursor.set_position(entry_addr_pos);
let block_address = cursor.read_offset(offset_size)?;
if Cursor::is_undefined_offset(block_address, offset_size) {
return Err(Error::UndefinedAddress);
}
if is_direct {
return Ok((block_address, running_offset, block_size));
} else {
let sub_rows = self.rows_for_block_size(block_size);
return self.find_direct_block_via_indirect(
block_address,
heap_offset - running_offset,
file_data,
offset_size,
sub_rows,
);
}
}
running_offset = block_end;
}
}
Err(Error::InvalidData(format!(
"fractal heap offset {} not found in doubling table",
heap_offset
)))
}
fn find_direct_block_via_indirect_storage(
&self,
indirect_address: u64,
heap_offset: u64,
storage: &dyn Storage,
offset_size: u8,
nrows: u16,
) -> Result<(u64, u64, u64)> {
let sig = storage.read_range(indirect_address, 4)?;
if sig.as_ref() != FHIB_SIGNATURE {
return Err(Error::InvalidData(format!(
"expected FHIB signature at offset {:#x}, got {:?}",
indirect_address,
sig.as_ref()
)));
}
let width = self.table_width as u64;
let mut running_offset = 0u64;
for row in 0..u64::from(nrows) {
let block_size = self.block_size_for_row(row);
let is_direct = block_size <= self.max_direct_block_size;
for col in 0..width {
let block_end = running_offset + block_size;
if heap_offset >= running_offset && heap_offset < block_end {
let entry_index = row * width + col;
let iblock_header_size = 4
+ 1
+ u64::from(offset_size)
+ (u64::from(self.max_heap_size)).div_ceil(8);
let entry_addr_pos = indirect_address
+ iblock_header_size
+ entry_index * u64::from(offset_size);
let entry = storage.read_range(entry_addr_pos, usize::from(offset_size))?;
let mut cursor = Cursor::new(entry.as_ref());
let block_address = cursor.read_offset(offset_size)?;
if Cursor::is_undefined_offset(block_address, offset_size) {
return Err(Error::UndefinedAddress);
}
if is_direct {
return Ok((block_address, running_offset, block_size));
}
let sub_rows = self.rows_for_block_size(block_size);
return self.find_direct_block_via_indirect_storage(
block_address,
heap_offset - running_offset,
storage,
offset_size,
sub_rows,
);
}
running_offset = block_end;
}
}
Err(Error::InvalidData(format!(
"fractal heap offset {} not found in doubling table",
heap_offset
)))
}
fn block_size_for_row(&self, row: u64) -> u64 {
if row == 0 {
self.starting_block_size
} else {
self.starting_block_size * (1u64 << (row - 1))
}
}
fn rows_for_block_size(&self, total_size: u64) -> u16 {
let mut rows = 0u16;
let mut accum = 0u64;
let width = self.table_width as u64;
loop {
let bs = self.block_size_for_row(rows as u64);
let row_total = bs * width;
if accum + row_total > total_size {
break;
}
accum += row_total;
rows += 1;
if rows > 1000 {
break; }
}
rows
}
fn direct_block_header_size(&self, offset_size: u8) -> usize {
let offset_bytes = (self.max_heap_size as usize).div_ceil(8);
let base = 4 + 1 + offset_size as usize + offset_bytes;
if (self.flags & 0x02) != 0 {
base + 4
} else {
base
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum HeapIdKind {
Managed,
Huge,
Tiny,
}
fn bytes_needed_to_encode(value: u64) -> usize {
if value <= u8::MAX as u64 {
1
} else if value <= u16::MAX as u64 {
2
} else if value <= 0x00FF_FFFF {
3
} else if value <= u32::MAX as u64 {
4
} else if value <= 0x00FF_FFFF_FFFF {
5
} else if value <= 0x0000_FFFF_FFFF_FFFF {
6
} else if value <= 0x00FF_FFFF_FFFF_FFFF {
7
} else {
8
}
}
#[cfg(test)]
mod tests {
use super::*;
fn base_heap() -> FractalHeap {
FractalHeap {
heap_id_len: 8,
io_filters_len: 0,
flags: 0x02,
max_managed_object_size: 128,
next_huge_id: 0,
btree_huge_objects_address: u64::MAX,
free_space_managed_address: 0,
managed_space_amount: 0,
managed_alloc_amount: 0,
managed_iter_offset: 0,
managed_objects_count: 0,
huge_objects_size: 0,
huge_objects_count: 0,
tiny_objects_size: 0,
tiny_objects_count: 0,
table_width: 4,
starting_block_size: 256,
max_direct_block_size: 4096,
max_heap_size: 16,
starting_row_root_indirect: 0,
root_block_address: 0,
current_rows_in_root_indirect: 0,
io_filter_size: None,
io_filter_mask: None,
io_filter_info: Vec::new(),
}
}
#[test]
fn test_block_size_for_row() {
let heap = FractalHeap {
heap_id_len: 8,
io_filters_len: 0,
flags: 0x02,
max_managed_object_size: 0,
next_huge_id: 0,
btree_huge_objects_address: 0,
free_space_managed_address: 0,
managed_space_amount: 0,
managed_alloc_amount: 0,
managed_iter_offset: 0,
managed_objects_count: 0,
huge_objects_size: 0,
huge_objects_count: 0,
tiny_objects_size: 0,
tiny_objects_count: 0,
table_width: 4,
starting_block_size: 256,
max_direct_block_size: 4096,
max_heap_size: 16,
starting_row_root_indirect: 0,
root_block_address: 0,
current_rows_in_root_indirect: 0,
io_filter_size: None,
io_filter_mask: None,
io_filter_info: Vec::new(),
};
assert_eq!(heap.block_size_for_row(0), 256);
assert_eq!(heap.block_size_for_row(1), 256); assert_eq!(heap.block_size_for_row(2), 512); assert_eq!(heap.block_size_for_row(3), 1024); }
#[test]
fn test_get_tiny_object() {
let heap = base_heap();
let heap_id = [0x20 | 3, b't', b'i', b'n', b'y'];
let result = heap.get_object(&heap_id, &[], 8, 8).unwrap();
assert_eq!(result, b"tiny");
}
#[test]
fn test_get_huge_direct_object() {
let heap = base_heap();
let mut file_data = vec![0u8; 128];
file_data[64..68].copy_from_slice(b"huge");
let mut heap_id = Vec::new();
heap_id.push(0x10);
heap_id.extend_from_slice(&64u64.to_le_bytes());
heap_id.extend_from_slice(&4u64.to_le_bytes());
let result = heap.get_object(&heap_id, &file_data, 8, 8).unwrap();
assert_eq!(result, b"huge");
}
#[test]
fn test_direct_block_header_size() {
let heap = FractalHeap {
heap_id_len: 8,
io_filters_len: 0,
flags: 0x02,
max_managed_object_size: 0,
next_huge_id: 0,
btree_huge_objects_address: 0,
free_space_managed_address: 0,
managed_space_amount: 0,
managed_alloc_amount: 0,
managed_iter_offset: 0,
managed_objects_count: 0,
huge_objects_size: 0,
huge_objects_count: 0,
tiny_objects_size: 0,
tiny_objects_count: 0,
table_width: 4,
starting_block_size: 256,
max_direct_block_size: 4096,
max_heap_size: 16,
starting_row_root_indirect: 0,
root_block_address: 0,
current_rows_in_root_indirect: 0,
io_filter_size: None,
io_filter_mask: None,
io_filter_info: Vec::new(),
};
assert_eq!(heap.direct_block_header_size(8), 19);
assert_eq!(heap.direct_block_header_size(4), 15);
}
#[test]
fn test_get_managed_object_direct_root() {
let offset_size: u8 = 8;
let max_heap_size: u16 = 16;
let starting_block_size: u64 = 256;
let db_header_size = 19usize;
let block_address: u64 = 1000;
let heap = FractalHeap {
heap_id_len: 8,
io_filters_len: 0,
flags: 0x02,
max_managed_object_size: 128,
next_huge_id: 0,
btree_huge_objects_address: u64::MAX,
free_space_managed_address: 0,
managed_space_amount: starting_block_size,
managed_alloc_amount: starting_block_size,
managed_iter_offset: 0,
managed_objects_count: 1,
huge_objects_size: 0,
huge_objects_count: 0,
tiny_objects_size: 0,
tiny_objects_count: 0,
table_width: 4,
starting_block_size,
max_direct_block_size: 4096,
max_heap_size,
starting_row_root_indirect: 0,
root_block_address: block_address,
current_rows_in_root_indirect: 0,
io_filter_size: None,
io_filter_mask: None,
io_filter_info: Vec::new(),
};
let file_size = block_address as usize + starting_block_size as usize + 100;
let mut file_data = vec![0u8; file_size];
let ba = block_address as usize;
file_data[ba..ba + 4].copy_from_slice(b"FHDB");
file_data[ba + 4] = 0;
let obj_data = b"test object data";
let obj_start = ba + db_header_size; file_data[obj_start..obj_start + obj_data.len()].copy_from_slice(obj_data);
let heap_id = [0x00, 0x00, 0x00, 0x10];
let result = heap
.get_managed_object(&heap_id, &file_data, offset_size, 8)
.unwrap();
assert_eq!(result, obj_data);
}
}