pub const MAGIC: u32 = 0x47524D4C;
pub const VERSION: u32 = 2;
pub const MIN_READABLE_VERSION: u32 = 1;
pub const HEADER_SIZE: usize = 192;
pub const HEADER_SIZE_V1: usize = 136;
pub const ENDIAN_LITTLE: u8 = 1;
pub const ENDIAN_BIG: u8 = 2;
pub const DEFAULT_PAGE_SIZE: u32 = 4096;
pub const NODE_RECORD_SIZE: usize = 48;
pub const EDGE_RECORD_SIZE: usize = 56;
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct FileHeaderV1 {
pub magic: u32,
pub version: u32,
pub node_count: u64,
pub node_capacity: u64,
pub edge_count: u64,
pub edge_capacity: u64,
pub string_table_offset: u64,
pub string_table_end: u64,
pub property_arena_offset: u64,
pub arena_next_offset: u64,
pub free_node_head: u64,
pub free_edge_head: u64,
pub next_node_id: u64,
pub next_edge_id: u64,
pub schema_offset: u64,
pub schema_size: u64,
pub schema_version: u32,
pub _schema_reserved: [u8; 12],
}
impl FileHeaderV1 {
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= HEADER_SIZE_V1,
"Buffer too small for FileHeaderV1"
);
unsafe {
let ptr = bytes.as_ptr() as *const FileHeaderV1;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; HEADER_SIZE_V1] {
unsafe {
let ptr = self as *const FileHeaderV1 as *const u8;
let slice = std::slice::from_raw_parts(ptr, HEADER_SIZE_V1);
let mut result = [0u8; HEADER_SIZE_V1];
result.copy_from_slice(slice);
result
}
}
}
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct FileHeader {
pub magic: u32,
pub version: u32,
pub min_reader_version: u32,
pub page_size: u32,
pub flags: u32,
pub endianness: u8,
pub _padding1: [u8; 3],
pub node_count: u64,
pub node_capacity: u64,
pub edge_count: u64,
pub edge_capacity: u64,
pub string_table_offset: u64,
pub string_table_end: u64,
pub property_arena_offset: u64,
pub arena_next_offset: u64,
pub free_node_head: u64,
pub free_edge_head: u64,
pub next_node_id: u64,
pub next_edge_id: u64,
pub schema_offset: u64,
pub schema_size: u64,
pub schema_version: u32,
pub _schema_reserved: [u8; 12],
pub header_crc32: u32,
pub _reserved: [u8; 36],
}
impl FileHeader {
pub fn new() -> Self {
let mut header = Self {
magic: MAGIC,
version: VERSION,
min_reader_version: VERSION,
page_size: DEFAULT_PAGE_SIZE,
flags: 0,
endianness: ENDIAN_LITTLE,
_padding1: [0u8; 3],
node_count: 0,
node_capacity: 0,
edge_count: 0,
edge_capacity: 0,
string_table_offset: 0,
string_table_end: 0,
property_arena_offset: 0,
arena_next_offset: 0,
free_node_head: u64::MAX,
free_edge_head: u64::MAX,
next_node_id: 0,
next_edge_id: 0,
schema_offset: 0,
schema_size: 0,
schema_version: 0,
_schema_reserved: [0u8; 12],
header_crc32: 0,
_reserved: [0u8; 36],
};
header.header_crc32 = header.compute_crc32();
header
}
pub fn from_v1(v1: &FileHeaderV1) -> Self {
Self {
magic: v1.magic,
version: 1, min_reader_version: 1,
page_size: DEFAULT_PAGE_SIZE,
flags: 0,
endianness: ENDIAN_LITTLE,
_padding1: [0u8; 3],
node_count: v1.node_count,
node_capacity: v1.node_capacity,
edge_count: v1.edge_count,
edge_capacity: v1.edge_capacity,
string_table_offset: v1.string_table_offset,
string_table_end: v1.string_table_end,
property_arena_offset: v1.property_arena_offset,
arena_next_offset: v1.arena_next_offset,
free_node_head: v1.free_node_head,
free_edge_head: v1.free_edge_head,
next_node_id: v1.next_node_id,
next_edge_id: v1.next_edge_id,
schema_offset: v1.schema_offset,
schema_size: v1.schema_size,
schema_version: v1.schema_version,
_schema_reserved: v1._schema_reserved,
header_crc32: 0, _reserved: [0u8; 36],
}
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= HEADER_SIZE,
"Buffer too small for FileHeader"
);
unsafe {
let ptr = bytes.as_ptr() as *const FileHeader;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; HEADER_SIZE] {
unsafe {
let ptr = self as *const FileHeader as *const u8;
let slice = std::slice::from_raw_parts(ptr, HEADER_SIZE);
let mut result = [0u8; HEADER_SIZE];
result.copy_from_slice(slice);
result
}
}
pub fn compute_crc32(&self) -> u32 {
let bytes = self.to_bytes();
let crc_range = &bytes[0..152];
let mut hasher = crc32fast::Hasher::new();
hasher.update(crc_range);
hasher.finalize()
}
pub fn update_crc32(&mut self) {
self.header_crc32 = self.compute_crc32();
}
pub fn validate_crc32(&self) -> bool {
if self.version < 2 {
return true;
}
self.header_crc32 == self.compute_crc32()
}
#[inline]
pub fn query_store_offset(&self) -> u64 {
u64::from_le_bytes(self._reserved[0..8].try_into().unwrap())
}
#[inline]
pub fn set_query_store_offset(&mut self, offset: u64) {
self._reserved[0..8].copy_from_slice(&offset.to_le_bytes());
}
#[inline]
pub fn query_store_end(&self) -> u64 {
u64::from_le_bytes(self._reserved[8..16].try_into().unwrap())
}
#[inline]
pub fn set_query_store_end(&mut self, end: u64) {
self._reserved[8..16].copy_from_slice(&end.to_le_bytes());
}
#[inline]
pub fn query_count(&self) -> u32 {
u32::from_le_bytes(self._reserved[16..20].try_into().unwrap())
}
#[inline]
pub fn set_query_count(&mut self, count: u32) {
self._reserved[16..20].copy_from_slice(&count.to_le_bytes());
}
#[inline]
pub fn next_query_id(&self) -> u32 {
u32::from_le_bytes(self._reserved[20..24].try_into().unwrap())
}
#[inline]
pub fn set_next_query_id(&mut self, id: u32) {
self._reserved[20..24].copy_from_slice(&id.to_le_bytes());
}
#[inline]
pub fn has_query_region(&self) -> bool {
self.query_store_offset() != 0
}
}
impl Default for FileHeader {
fn default() -> Self {
Self::new()
}
}
pub const NODE_FLAG_DELETED: u32 = 0x0001;
pub const NODE_FLAG_INDEXED: u32 = 0x0002;
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct NodeRecord {
pub id: u64,
pub label_id: u32,
pub flags: u32,
pub first_out_edge: u64,
pub first_in_edge: u64,
pub prop_head: u64,
_padding: u64,
}
impl NodeRecord {
pub fn new(id: u64, label_id: u32) -> Self {
Self {
id,
label_id,
flags: 0,
first_out_edge: u64::MAX,
first_in_edge: u64::MAX,
prop_head: u64::MAX,
_padding: 0,
}
}
pub fn is_deleted(&self) -> bool {
self.flags & NODE_FLAG_DELETED != 0
}
pub fn mark_deleted(&mut self) {
self.flags |= NODE_FLAG_DELETED;
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= NODE_RECORD_SIZE,
"Buffer too small for NodeRecord"
);
unsafe {
let ptr = bytes.as_ptr() as *const NodeRecord;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; NODE_RECORD_SIZE] {
unsafe {
let ptr = self as *const NodeRecord as *const u8;
let slice = std::slice::from_raw_parts(ptr, NODE_RECORD_SIZE);
let mut result = [0u8; NODE_RECORD_SIZE];
result.copy_from_slice(slice);
result
}
}
}
pub const EDGE_FLAG_DELETED: u32 = 0x0001;
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct EdgeRecord {
pub id: u64,
pub label_id: u32,
pub flags: u32,
pub src: u64,
pub dst: u64,
pub next_out: u64,
pub next_in: u64,
pub prop_head: u64,
}
impl EdgeRecord {
pub fn new(id: u64, label_id: u32, src: u64, dst: u64) -> Self {
Self {
id,
label_id,
flags: 0,
src,
dst,
next_out: u64::MAX,
next_in: u64::MAX,
prop_head: u64::MAX,
}
}
pub fn is_deleted(&self) -> bool {
self.flags & EDGE_FLAG_DELETED != 0
}
pub fn mark_deleted(&mut self) {
self.flags |= EDGE_FLAG_DELETED;
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= EDGE_RECORD_SIZE,
"Buffer too small for EdgeRecord"
);
unsafe {
let ptr = bytes.as_ptr() as *const EdgeRecord;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; EDGE_RECORD_SIZE] {
unsafe {
let ptr = self as *const EdgeRecord as *const u8;
let slice = std::slice::from_raw_parts(ptr, EDGE_RECORD_SIZE);
let mut result = [0u8; EDGE_RECORD_SIZE];
result.copy_from_slice(slice);
result
}
}
}
pub const PROPERTY_ENTRY_HEADER_SIZE: usize = 17;
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct PropertyEntry {
pub key_id: u32,
pub value_type: u8,
pub value_len: u32,
pub next: u64,
}
impl PropertyEntry {
pub fn new(key_id: u32, value_type: u8, value_len: u32, next: u64) -> Self {
Self {
key_id,
value_type,
value_len,
next,
}
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= PROPERTY_ENTRY_HEADER_SIZE,
"Buffer too small for PropertyEntry"
);
unsafe {
let ptr = bytes.as_ptr() as *const PropertyEntry;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; PROPERTY_ENTRY_HEADER_SIZE] {
unsafe {
let ptr = self as *const PropertyEntry as *const u8;
let slice = std::slice::from_raw_parts(ptr, PROPERTY_ENTRY_HEADER_SIZE);
let mut result = [0u8; PROPERTY_ENTRY_HEADER_SIZE];
result.copy_from_slice(slice);
result
}
}
}
pub const STRING_ENTRY_HEADER_SIZE: usize = 8;
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct StringEntry {
pub id: u32,
pub len: u32,
}
impl StringEntry {
pub fn new(id: u32, len: u32) -> Self {
Self { id, len }
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= STRING_ENTRY_HEADER_SIZE,
"Buffer too small for StringEntry"
);
unsafe {
let ptr = bytes.as_ptr() as *const StringEntry;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; STRING_ENTRY_HEADER_SIZE] {
unsafe {
let ptr = self as *const StringEntry as *const u8;
let slice = std::slice::from_raw_parts(ptr, STRING_ENTRY_HEADER_SIZE);
let mut result = [0u8; STRING_ENTRY_HEADER_SIZE];
result.copy_from_slice(slice);
result
}
}
}
pub const QUERY_REGION_MAGIC: u32 = 0x51525953;
pub const QUERY_REGION_VERSION: u32 = 1;
pub const QUERY_REGION_HEADER_SIZE: usize = 16;
pub const QUERY_RECORD_HEADER_SIZE: usize = 36;
pub const PARAMETER_ENTRY_HEADER_SIZE: usize = 4;
pub const QUERY_FLAG_DELETED: u16 = 0x0001;
pub const QUERY_TYPE_MASK: u16 = 0x0006;
pub const QUERY_TYPE_SHIFT: u16 = 1;
pub const QUERY_TYPE_GREMLIN: u16 = 1;
pub const QUERY_TYPE_GQL: u16 = 2;
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct QueryRegionHeader {
pub magic: u32,
pub version: u32,
pub first_query: u64,
}
impl QueryRegionHeader {
pub fn new() -> Self {
Self {
magic: QUERY_REGION_MAGIC,
version: QUERY_REGION_VERSION,
first_query: u64::MAX,
}
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= QUERY_REGION_HEADER_SIZE,
"Buffer too small for QueryRegionHeader"
);
unsafe {
let ptr = bytes.as_ptr() as *const QueryRegionHeader;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; QUERY_REGION_HEADER_SIZE] {
unsafe {
let ptr = self as *const QueryRegionHeader as *const u8;
let slice = std::slice::from_raw_parts(ptr, QUERY_REGION_HEADER_SIZE);
let mut result = [0u8; QUERY_REGION_HEADER_SIZE];
result.copy_from_slice(slice);
result
}
}
pub fn is_valid(&self) -> bool {
self.magic == QUERY_REGION_MAGIC
}
}
impl Default for QueryRegionHeader {
fn default() -> Self {
Self::new()
}
}
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct QueryRecord {
pub id: u32,
pub flags: u16,
pub param_count: u16,
pub name_len: u16,
pub description_len: u16,
pub query_len: u32,
pub record_size: u32,
pub next: u64,
pub prev: u64,
}
impl QueryRecord {
pub fn new(
id: u32,
query_type: u16,
param_count: u16,
name_len: u16,
description_len: u16,
query_len: u32,
record_size: u32,
) -> Self {
let flags = (query_type << QUERY_TYPE_SHIFT) & QUERY_TYPE_MASK;
Self {
id,
flags,
param_count,
name_len,
description_len,
query_len,
record_size,
next: u64::MAX,
prev: u64::MAX,
}
}
pub fn is_deleted(&self) -> bool {
self.flags & QUERY_FLAG_DELETED != 0
}
pub fn mark_deleted(&mut self) {
self.flags |= QUERY_FLAG_DELETED;
}
pub fn query_type(&self) -> u16 {
(self.flags & QUERY_TYPE_MASK) >> QUERY_TYPE_SHIFT
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= QUERY_RECORD_HEADER_SIZE,
"Buffer too small for QueryRecord"
);
unsafe {
let ptr = bytes.as_ptr() as *const QueryRecord;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; QUERY_RECORD_HEADER_SIZE] {
unsafe {
let ptr = self as *const QueryRecord as *const u8;
let slice = std::slice::from_raw_parts(ptr, QUERY_RECORD_HEADER_SIZE);
let mut result = [0u8; QUERY_RECORD_HEADER_SIZE];
result.copy_from_slice(slice);
result
}
}
}
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct ParameterEntry {
pub name_len: u16,
pub value_type: u8,
pub _reserved: u8,
}
pub const PARAMETER_TYPE_ANY: u8 = 0xFF;
impl ParameterEntry {
pub fn new(name_len: u16, value_type: u8) -> Self {
Self {
name_len,
value_type,
_reserved: 0,
}
}
pub fn new_any(name_len: u16) -> Self {
Self::new(name_len, PARAMETER_TYPE_ANY)
}
pub fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len() >= PARAMETER_ENTRY_HEADER_SIZE,
"Buffer too small for ParameterEntry"
);
unsafe {
let ptr = bytes.as_ptr() as *const ParameterEntry;
ptr.read_unaligned()
}
}
pub fn to_bytes(&self) -> [u8; PARAMETER_ENTRY_HEADER_SIZE] {
unsafe {
let ptr = self as *const ParameterEntry as *const u8;
let slice = std::slice::from_raw_parts(ptr, PARAMETER_ENTRY_HEADER_SIZE);
let mut result = [0u8; PARAMETER_ENTRY_HEADER_SIZE];
result.copy_from_slice(slice);
result
}
}
pub fn total_size(&self) -> usize {
PARAMETER_ENTRY_HEADER_SIZE + self.name_len as usize
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_file_header_v1_size() {
assert_eq!(
std::mem::size_of::<FileHeaderV1>(),
HEADER_SIZE_V1,
"FileHeaderV1 size must be exactly 136 bytes"
);
}
#[test]
fn test_file_header_v1_alignment() {
assert_eq!(
std::mem::size_of::<FileHeaderV1>(),
4 + 4 + (14 * 8) + 4 + 12,
"FileHeaderV1 fields should sum to 136 bytes"
);
}
#[test]
fn test_file_header_size() {
assert_eq!(
std::mem::size_of::<FileHeader>(),
HEADER_SIZE,
"FileHeader size must be exactly 192 bytes"
);
}
#[test]
fn test_file_header_alignment() {
assert_eq!(
std::mem::size_of::<FileHeader>(),
12 + 8 + 4 + 32 + 64 + 32 + 4 + 36,
"FileHeader fields should sum to 192 bytes"
);
}
#[test]
fn test_file_header_default_values() {
let header = FileHeader::new();
let magic = header.magic;
let version = header.version;
let min_reader_version = header.min_reader_version;
let page_size = header.page_size;
let flags = header.flags;
let endianness = header.endianness;
let node_count = header.node_count;
let node_capacity = header.node_capacity;
let edge_count = header.edge_count;
let edge_capacity = header.edge_capacity;
let string_table_offset = header.string_table_offset;
let string_table_end = header.string_table_end;
let property_arena_offset = header.property_arena_offset;
let arena_next_offset = header.arena_next_offset;
let free_node_head = header.free_node_head;
let free_edge_head = header.free_edge_head;
let next_node_id = header.next_node_id;
let next_edge_id = header.next_edge_id;
let schema_offset = header.schema_offset;
let schema_size = header.schema_size;
let schema_version = header.schema_version;
assert_eq!(magic, MAGIC);
assert_eq!(version, VERSION);
assert_eq!(min_reader_version, VERSION);
assert_eq!(page_size, DEFAULT_PAGE_SIZE);
assert_eq!(flags, 0);
assert_eq!(endianness, ENDIAN_LITTLE);
assert_eq!(node_count, 0);
assert_eq!(node_capacity, 0);
assert_eq!(edge_count, 0);
assert_eq!(edge_capacity, 0);
assert_eq!(string_table_offset, 0);
assert_eq!(string_table_end, 0);
assert_eq!(property_arena_offset, 0);
assert_eq!(arena_next_offset, 0);
assert_eq!(free_node_head, u64::MAX);
assert_eq!(free_edge_head, u64::MAX);
assert_eq!(next_node_id, 0);
assert_eq!(next_edge_id, 0);
assert_eq!(schema_offset, 0);
assert_eq!(schema_size, 0);
assert_eq!(schema_version, 0);
}
#[test]
fn test_file_header_roundtrip() {
let mut header = FileHeader::new();
header.node_count = 100;
header.node_capacity = 1000;
header.edge_count = 500;
header.edge_capacity = 5000;
header.string_table_offset = 123456;
header.string_table_end = 124000;
header.property_arena_offset = 789012;
header.arena_next_offset = 800000;
header.free_node_head = 42;
header.free_edge_head = 99;
header.next_node_id = 150;
header.next_edge_id = 600;
header.schema_offset = 900000;
header.schema_size = 2048;
header.schema_version = 1;
header.update_crc32();
let orig_magic = header.magic;
let orig_version = header.version;
let orig_min_reader_version = header.min_reader_version;
let orig_page_size = header.page_size;
let orig_flags = header.flags;
let orig_endianness = header.endianness;
let orig_node_count = header.node_count;
let orig_node_capacity = header.node_capacity;
let orig_edge_count = header.edge_count;
let orig_edge_capacity = header.edge_capacity;
let orig_string_table_offset = header.string_table_offset;
let orig_string_table_end = header.string_table_end;
let orig_property_arena_offset = header.property_arena_offset;
let orig_arena_next_offset = header.arena_next_offset;
let orig_free_node_head = header.free_node_head;
let orig_free_edge_head = header.free_edge_head;
let orig_next_node_id = header.next_node_id;
let orig_next_edge_id = header.next_edge_id;
let orig_schema_offset = header.schema_offset;
let orig_schema_size = header.schema_size;
let orig_schema_version = header.schema_version;
let bytes = header.to_bytes();
assert_eq!(bytes.len(), HEADER_SIZE);
let recovered = FileHeader::from_bytes(&bytes);
let rec_magic = recovered.magic;
let rec_version = recovered.version;
let rec_min_reader_version = recovered.min_reader_version;
let rec_page_size = recovered.page_size;
let rec_flags = recovered.flags;
let rec_endianness = recovered.endianness;
let rec_node_count = recovered.node_count;
let rec_node_capacity = recovered.node_capacity;
let rec_edge_count = recovered.edge_count;
let rec_edge_capacity = recovered.edge_capacity;
let rec_string_table_offset = recovered.string_table_offset;
let rec_string_table_end = recovered.string_table_end;
let rec_property_arena_offset = recovered.property_arena_offset;
let rec_arena_next_offset = recovered.arena_next_offset;
let rec_free_node_head = recovered.free_node_head;
let rec_free_edge_head = recovered.free_edge_head;
let rec_next_node_id = recovered.next_node_id;
let rec_next_edge_id = recovered.next_edge_id;
let rec_schema_offset = recovered.schema_offset;
let rec_schema_size = recovered.schema_size;
let rec_schema_version = recovered.schema_version;
assert_eq!(rec_magic, orig_magic);
assert_eq!(rec_version, orig_version);
assert_eq!(rec_min_reader_version, orig_min_reader_version);
assert_eq!(rec_page_size, orig_page_size);
assert_eq!(rec_flags, orig_flags);
assert_eq!(rec_endianness, orig_endianness);
assert_eq!(rec_node_count, orig_node_count);
assert_eq!(rec_node_capacity, orig_node_capacity);
assert_eq!(rec_edge_count, orig_edge_count);
assert_eq!(rec_edge_capacity, orig_edge_capacity);
assert_eq!(rec_string_table_offset, orig_string_table_offset);
assert_eq!(rec_string_table_end, orig_string_table_end);
assert_eq!(rec_property_arena_offset, orig_property_arena_offset);
assert_eq!(rec_arena_next_offset, orig_arena_next_offset);
assert_eq!(rec_free_node_head, orig_free_node_head);
assert_eq!(rec_free_edge_head, orig_free_edge_head);
assert_eq!(rec_next_node_id, orig_next_node_id);
assert_eq!(rec_next_edge_id, orig_next_edge_id);
assert_eq!(rec_schema_offset, orig_schema_offset);
assert_eq!(rec_schema_size, orig_schema_size);
assert_eq!(rec_schema_version, orig_schema_version);
}
#[test]
fn test_file_header_transmute_safety() {
let header = FileHeader::new();
let bytes = header.to_bytes();
let _ = FileHeader::from_bytes(&bytes);
let magic_bytes: [u8; 4] = [bytes[0], bytes[1], bytes[2], bytes[3]];
let magic = u32::from_le_bytes(magic_bytes);
assert_eq!(magic, MAGIC);
let version_bytes: [u8; 4] = [bytes[4], bytes[5], bytes[6], bytes[7]];
let version = u32::from_le_bytes(version_bytes);
assert_eq!(version, VERSION);
}
#[test]
fn test_file_header_byte_order() {
let mut header = FileHeader::new();
header.node_count = 0x0102030405060708u64;
header.update_crc32();
let bytes = header.to_bytes();
let node_count_bytes: [u8; 8] = [
bytes[24], bytes[25], bytes[26], bytes[27], bytes[28], bytes[29], bytes[30], bytes[31],
];
assert_eq!(node_count_bytes[0], 0x08);
assert_eq!(node_count_bytes[7], 0x01);
}
#[test]
fn test_constants() {
assert_eq!(MAGIC, 0x47524D4C); assert_eq!(VERSION, 2);
assert_eq!(MIN_READABLE_VERSION, 1);
assert_eq!(HEADER_SIZE, 192);
assert_eq!(HEADER_SIZE_V1, 136);
assert_eq!(NODE_RECORD_SIZE, 48);
assert_eq!(EDGE_RECORD_SIZE, 56);
assert_eq!(DEFAULT_PAGE_SIZE, 4096);
assert_eq!(ENDIAN_LITTLE, 1);
assert_eq!(ENDIAN_BIG, 2);
}
#[test]
fn test_file_header_from_v1() {
let v1 = FileHeaderV1 {
magic: MAGIC,
version: 1,
node_count: 100,
node_capacity: 1000,
edge_count: 50,
edge_capacity: 500,
string_table_offset: 10000,
string_table_end: 12000,
property_arena_offset: 5000,
arena_next_offset: 6000,
free_node_head: 42,
free_edge_head: 99,
next_node_id: 101,
next_edge_id: 51,
schema_offset: 0,
schema_size: 0,
schema_version: 0,
_schema_reserved: [0u8; 12],
};
let v1_magic = v1.magic;
let v1_node_count = v1.node_count;
let v1_node_capacity = v1.node_capacity;
let v1_edge_count = v1.edge_count;
let v1_edge_capacity = v1.edge_capacity;
let v1_string_table_offset = v1.string_table_offset;
let v1_string_table_end = v1.string_table_end;
let v1_property_arena_offset = v1.property_arena_offset;
let v1_arena_next_offset = v1.arena_next_offset;
let v1_free_node_head = v1.free_node_head;
let v1_free_edge_head = v1.free_edge_head;
let v1_next_node_id = v1.next_node_id;
let v1_next_edge_id = v1.next_edge_id;
let v2 = FileHeader::from_v1(&v1);
let v2_magic = v2.magic;
let v2_version = v2.version;
let v2_node_count = v2.node_count;
let v2_node_capacity = v2.node_capacity;
let v2_edge_count = v2.edge_count;
let v2_edge_capacity = v2.edge_capacity;
let v2_string_table_offset = v2.string_table_offset;
let v2_string_table_end = v2.string_table_end;
let v2_property_arena_offset = v2.property_arena_offset;
let v2_arena_next_offset = v2.arena_next_offset;
let v2_free_node_head = v2.free_node_head;
let v2_free_edge_head = v2.free_edge_head;
let v2_next_node_id = v2.next_node_id;
let v2_next_edge_id = v2.next_edge_id;
let v2_min_reader_version = v2.min_reader_version;
let v2_page_size = v2.page_size;
let v2_flags = v2.flags;
let v2_endianness = v2.endianness;
assert_eq!(v2_magic, v1_magic);
assert_eq!(v2_version, 1); assert_eq!(v2_node_count, v1_node_count);
assert_eq!(v2_node_capacity, v1_node_capacity);
assert_eq!(v2_edge_count, v1_edge_count);
assert_eq!(v2_edge_capacity, v1_edge_capacity);
assert_eq!(v2_string_table_offset, v1_string_table_offset);
assert_eq!(v2_string_table_end, v1_string_table_end);
assert_eq!(v2_property_arena_offset, v1_property_arena_offset);
assert_eq!(v2_arena_next_offset, v1_arena_next_offset);
assert_eq!(v2_free_node_head, v1_free_node_head);
assert_eq!(v2_free_edge_head, v1_free_edge_head);
assert_eq!(v2_next_node_id, v1_next_node_id);
assert_eq!(v2_next_edge_id, v1_next_edge_id);
assert_eq!(v2_min_reader_version, 1);
assert_eq!(v2_page_size, DEFAULT_PAGE_SIZE);
assert_eq!(v2_flags, 0);
assert_eq!(v2_endianness, ENDIAN_LITTLE);
}
#[test]
fn test_file_header_crc32_validation() {
let header = FileHeader::new();
assert!(header.validate_crc32());
}
#[test]
fn test_file_header_crc32_detects_corruption() {
let mut header = FileHeader::new();
header.node_count = 12345;
assert!(!header.validate_crc32());
header.update_crc32();
assert!(header.validate_crc32());
}
#[test]
fn test_file_header_crc32_roundtrip() {
let mut header = FileHeader::new();
header.node_count = 100;
header.edge_count = 200;
header.update_crc32();
let bytes = header.to_bytes();
let recovered = FileHeader::from_bytes(&bytes);
assert!(recovered.validate_crc32());
}
#[test]
fn test_file_header_v1_skips_crc_validation() {
let v1 = FileHeaderV1 {
magic: MAGIC,
version: 1,
node_count: 0,
node_capacity: 0,
edge_count: 0,
edge_capacity: 0,
string_table_offset: 0,
string_table_end: 0,
property_arena_offset: 0,
arena_next_offset: 0,
free_node_head: u64::MAX,
free_edge_head: u64::MAX,
next_node_id: 0,
next_edge_id: 0,
schema_offset: 0,
schema_size: 0,
schema_version: 0,
_schema_reserved: [0u8; 12],
};
let v2 = FileHeader::from_v1(&v1);
assert!(v2.validate_crc32());
}
#[test]
fn test_node_record_size() {
assert_eq!(
std::mem::size_of::<NodeRecord>(),
NODE_RECORD_SIZE,
"NodeRecord size must be exactly 48 bytes"
);
}
#[test]
fn test_node_record_alignment() {
assert_eq!(
std::mem::size_of::<NodeRecord>(),
8 + 4 + 4 + 8 + 8 + 8 + 8,
"NodeRecord fields should sum to 48 bytes"
);
}
#[test]
fn test_node_record_new() {
let record = NodeRecord::new(42, 7);
let id = record.id;
let label_id = record.label_id;
let flags = record.flags;
let first_out_edge = record.first_out_edge;
let first_in_edge = record.first_in_edge;
let prop_head = record.prop_head;
assert_eq!(id, 42);
assert_eq!(label_id, 7);
assert_eq!(flags, 0);
assert_eq!(first_out_edge, u64::MAX);
assert_eq!(first_in_edge, u64::MAX);
assert_eq!(prop_head, u64::MAX);
assert!(!record.is_deleted());
}
#[test]
fn test_node_record_deleted_flag() {
let mut record = NodeRecord::new(0, 0);
assert!(!record.is_deleted());
record.mark_deleted();
assert!(record.is_deleted());
let flags = record.flags;
assert_eq!(flags & NODE_FLAG_DELETED, NODE_FLAG_DELETED);
}
#[test]
fn test_node_record_roundtrip() {
let mut record = NodeRecord::new(123, 456);
record.flags = 0x0003; record.first_out_edge = 789;
record.first_in_edge = 101112;
record.prop_head = 131415;
let orig_id = record.id;
let orig_label_id = record.label_id;
let orig_flags = record.flags;
let orig_first_out_edge = record.first_out_edge;
let orig_first_in_edge = record.first_in_edge;
let orig_prop_head = record.prop_head;
let bytes = record.to_bytes();
assert_eq!(bytes.len(), NODE_RECORD_SIZE);
let recovered = NodeRecord::from_bytes(&bytes);
let rec_id = recovered.id;
let rec_label_id = recovered.label_id;
let rec_flags = recovered.flags;
let rec_first_out_edge = recovered.first_out_edge;
let rec_first_in_edge = recovered.first_in_edge;
let rec_prop_head = recovered.prop_head;
assert_eq!(rec_id, orig_id);
assert_eq!(rec_label_id, orig_label_id);
assert_eq!(rec_flags, orig_flags);
assert_eq!(rec_first_out_edge, orig_first_out_edge);
assert_eq!(rec_first_in_edge, orig_first_in_edge);
assert_eq!(rec_prop_head, orig_prop_head);
}
#[test]
fn test_node_record_byte_order() {
let record = NodeRecord::new(0x0102030405060708u64, 0x090A0B0Cu32);
let bytes = record.to_bytes();
let id_bytes: [u8; 8] = [
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
];
assert_eq!(id_bytes[0], 0x08); assert_eq!(id_bytes[7], 0x01);
let label_id_bytes: [u8; 4] = [bytes[8], bytes[9], bytes[10], bytes[11]];
assert_eq!(label_id_bytes[0], 0x0C); assert_eq!(label_id_bytes[3], 0x09);
}
#[test]
fn test_node_record_no_unexpected_padding() {
let record = NodeRecord::new(0, 0);
let bytes = record.to_bytes();
assert_eq!(bytes.len(), 48);
}
#[test]
fn test_edge_record_size() {
assert_eq!(
std::mem::size_of::<EdgeRecord>(),
EDGE_RECORD_SIZE,
"EdgeRecord size must be exactly 56 bytes"
);
}
#[test]
fn test_edge_record_alignment() {
assert_eq!(
std::mem::size_of::<EdgeRecord>(),
8 + 4 + 4 + 8 + 8 + 8 + 8 + 8,
"EdgeRecord fields should sum to 56 bytes"
);
}
#[test]
fn test_edge_record_new() {
let record = EdgeRecord::new(42, 7, 100, 200);
let id = record.id;
let label_id = record.label_id;
let flags = record.flags;
let src = record.src;
let dst = record.dst;
let next_out = record.next_out;
let next_in = record.next_in;
let prop_head = record.prop_head;
assert_eq!(id, 42);
assert_eq!(label_id, 7);
assert_eq!(flags, 0);
assert_eq!(src, 100);
assert_eq!(dst, 200);
assert_eq!(next_out, u64::MAX);
assert_eq!(next_in, u64::MAX);
assert_eq!(prop_head, u64::MAX);
assert!(!record.is_deleted());
}
#[test]
fn test_edge_record_deleted_flag() {
let mut record = EdgeRecord::new(0, 0, 0, 0);
assert!(!record.is_deleted());
record.mark_deleted();
assert!(record.is_deleted());
let flags = record.flags;
assert_eq!(flags & EDGE_FLAG_DELETED, EDGE_FLAG_DELETED);
}
#[test]
fn test_edge_record_roundtrip() {
let mut record = EdgeRecord::new(123, 456, 789, 101112);
record.flags = 0x0001; record.next_out = 131415;
record.next_in = 161718;
record.prop_head = 192021;
let orig_id = record.id;
let orig_label_id = record.label_id;
let orig_flags = record.flags;
let orig_src = record.src;
let orig_dst = record.dst;
let orig_next_out = record.next_out;
let orig_next_in = record.next_in;
let orig_prop_head = record.prop_head;
let bytes = record.to_bytes();
assert_eq!(bytes.len(), EDGE_RECORD_SIZE);
let recovered = EdgeRecord::from_bytes(&bytes);
let rec_id = recovered.id;
let rec_label_id = recovered.label_id;
let rec_flags = recovered.flags;
let rec_src = recovered.src;
let rec_dst = recovered.dst;
let rec_next_out = recovered.next_out;
let rec_next_in = recovered.next_in;
let rec_prop_head = recovered.prop_head;
assert_eq!(rec_id, orig_id);
assert_eq!(rec_label_id, orig_label_id);
assert_eq!(rec_flags, orig_flags);
assert_eq!(rec_src, orig_src);
assert_eq!(rec_dst, orig_dst);
assert_eq!(rec_next_out, orig_next_out);
assert_eq!(rec_next_in, orig_next_in);
assert_eq!(rec_prop_head, orig_prop_head);
assert!(recovered.is_deleted());
}
#[test]
fn test_edge_record_byte_order() {
let record = EdgeRecord::new(
0x0102030405060708u64,
0x090A0B0Cu32,
0x0D0E0F1011121314u64,
0x1516171819202122u64,
);
let bytes = record.to_bytes();
let id_bytes: [u8; 8] = [
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
];
assert_eq!(id_bytes[0], 0x08); assert_eq!(id_bytes[7], 0x01);
let label_id_bytes: [u8; 4] = [bytes[8], bytes[9], bytes[10], bytes[11]];
assert_eq!(label_id_bytes[0], 0x0C); assert_eq!(label_id_bytes[3], 0x09);
let src_bytes: [u8; 8] = [
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22], bytes[23],
];
assert_eq!(src_bytes[0], 0x14); assert_eq!(src_bytes[7], 0x0D);
}
#[test]
fn test_edge_record_no_unexpected_padding() {
let record = EdgeRecord::new(0, 0, 0, 0);
let bytes = record.to_bytes();
assert_eq!(bytes.len(), 56);
}
#[test]
fn test_edge_record_linked_list_pointers() {
let mut edge1 = EdgeRecord::new(0, 0, 10, 20);
let mut edge2 = EdgeRecord::new(1, 0, 10, 30);
let mut edge3 = EdgeRecord::new(2, 0, 10, 40);
edge1.next_out = 1; edge2.next_out = 2; edge3.next_out = u64::MAX;
let next_out_1 = edge1.next_out;
let next_out_2 = edge2.next_out;
let next_out_3 = edge3.next_out;
assert_eq!(next_out_1, 1);
assert_eq!(next_out_2, 2);
assert_eq!(next_out_3, u64::MAX);
let bytes1 = edge1.to_bytes();
let recovered1 = EdgeRecord::from_bytes(&bytes1);
let recovered_next_out = recovered1.next_out;
assert_eq!(recovered_next_out, 1);
}
#[test]
fn test_flag_constants() {
assert_eq!(NODE_FLAG_DELETED, 0x0001);
assert_eq!(NODE_FLAG_INDEXED, 0x0002);
assert_eq!(EDGE_FLAG_DELETED, 0x0001);
let combined = NODE_FLAG_DELETED | NODE_FLAG_INDEXED;
assert_eq!(combined, 0x0003);
assert_ne!(combined & NODE_FLAG_DELETED, 0);
assert_ne!(combined & NODE_FLAG_INDEXED, 0);
}
#[test]
fn test_property_entry_size() {
assert_eq!(
std::mem::size_of::<PropertyEntry>(),
PROPERTY_ENTRY_HEADER_SIZE,
"PropertyEntry size must be exactly 17 bytes"
);
}
#[test]
fn test_property_entry_alignment() {
assert_eq!(
std::mem::size_of::<PropertyEntry>(),
4 + 1 + 4 + 8,
"PropertyEntry fields should sum to 17 bytes"
);
}
#[test]
fn test_property_entry_new() {
let entry = PropertyEntry::new(42, 5, 128, 1024);
let key_id = entry.key_id;
let value_type = entry.value_type;
let value_len = entry.value_len;
let next = entry.next;
assert_eq!(key_id, 42);
assert_eq!(value_type, 5);
assert_eq!(value_len, 128);
assert_eq!(next, 1024);
}
#[test]
fn test_property_entry_roundtrip() {
let entry = PropertyEntry::new(123, 7, 256, 4096);
let orig_key_id = entry.key_id;
let orig_value_type = entry.value_type;
let orig_value_len = entry.value_len;
let orig_next = entry.next;
let bytes = entry.to_bytes();
assert_eq!(bytes.len(), PROPERTY_ENTRY_HEADER_SIZE);
let recovered = PropertyEntry::from_bytes(&bytes);
let rec_key_id = recovered.key_id;
let rec_value_type = recovered.value_type;
let rec_value_len = recovered.value_len;
let rec_next = recovered.next;
assert_eq!(rec_key_id, orig_key_id);
assert_eq!(rec_value_type, orig_value_type);
assert_eq!(rec_value_len, orig_value_len);
assert_eq!(rec_next, orig_next);
}
#[test]
fn test_property_entry_byte_order() {
let entry = PropertyEntry::new(0x01020304u32, 0xAAu8, 0x05060708u32, 0x090A0B0C0D0E0F10u64);
let bytes = entry.to_bytes();
let key_id_bytes: [u8; 4] = [bytes[0], bytes[1], bytes[2], bytes[3]];
assert_eq!(key_id_bytes[0], 0x04); assert_eq!(key_id_bytes[3], 0x01);
assert_eq!(bytes[4], 0xAA);
let value_len_bytes: [u8; 4] = [bytes[5], bytes[6], bytes[7], bytes[8]];
assert_eq!(value_len_bytes[0], 0x08); assert_eq!(value_len_bytes[3], 0x05);
let next_bytes: [u8; 8] = [
bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14], bytes[15], bytes[16],
];
assert_eq!(next_bytes[0], 0x10); assert_eq!(next_bytes[7], 0x09);
}
#[test]
fn test_property_entry_end_of_list() {
let entry = PropertyEntry::new(1, 2, 3, u64::MAX);
let bytes = entry.to_bytes();
let recovered = PropertyEntry::from_bytes(&bytes);
let next = recovered.next;
assert_eq!(next, u64::MAX, "End-of-list marker should be u64::MAX");
}
#[test]
fn test_property_entry_no_unexpected_padding() {
let entry = PropertyEntry::new(0, 0, 0, 0);
let bytes = entry.to_bytes();
assert_eq!(bytes.len(), 17);
}
#[test]
fn test_string_entry_size() {
assert_eq!(
std::mem::size_of::<StringEntry>(),
STRING_ENTRY_HEADER_SIZE,
"StringEntry size must be exactly 8 bytes"
);
}
#[test]
fn test_string_entry_alignment() {
assert_eq!(
std::mem::size_of::<StringEntry>(),
4 + 4,
"StringEntry fields should sum to 8 bytes"
);
}
#[test]
fn test_string_entry_new() {
let entry = StringEntry::new(42, 128);
let id = entry.id;
let len = entry.len;
assert_eq!(id, 42);
assert_eq!(len, 128);
}
#[test]
fn test_string_entry_roundtrip() {
let entry = StringEntry::new(123, 456);
let orig_id = entry.id;
let orig_len = entry.len;
let bytes = entry.to_bytes();
assert_eq!(bytes.len(), STRING_ENTRY_HEADER_SIZE);
let recovered = StringEntry::from_bytes(&bytes);
let rec_id = recovered.id;
let rec_len = recovered.len;
assert_eq!(rec_id, orig_id);
assert_eq!(rec_len, orig_len);
}
#[test]
fn test_string_entry_byte_order() {
let entry = StringEntry::new(0x01020304u32, 0x05060708u32);
let bytes = entry.to_bytes();
let id_bytes: [u8; 4] = [bytes[0], bytes[1], bytes[2], bytes[3]];
assert_eq!(id_bytes[0], 0x04); assert_eq!(id_bytes[3], 0x01);
let len_bytes: [u8; 4] = [bytes[4], bytes[5], bytes[6], bytes[7]];
assert_eq!(len_bytes[0], 0x08); assert_eq!(len_bytes[3], 0x05);
}
#[test]
fn test_string_entry_zero_length() {
let entry = StringEntry::new(1, 0);
let bytes = entry.to_bytes();
let recovered = StringEntry::from_bytes(&bytes);
let len = recovered.len;
assert_eq!(len, 0, "Zero-length string should be valid");
}
#[test]
fn test_string_entry_large_length() {
let entry = StringEntry::new(1, u32::MAX);
let bytes = entry.to_bytes();
let recovered = StringEntry::from_bytes(&bytes);
let len = recovered.len;
assert_eq!(len, u32::MAX, "Large string length should be preserved");
}
#[test]
fn test_string_entry_no_unexpected_padding() {
let entry = StringEntry::new(0, 0);
let bytes = entry.to_bytes();
assert_eq!(bytes.len(), 8);
}
#[test]
fn test_all_record_sizes() {
assert_eq!(std::mem::size_of::<FileHeader>(), HEADER_SIZE);
assert_eq!(std::mem::size_of::<FileHeaderV1>(), HEADER_SIZE_V1);
assert_eq!(std::mem::size_of::<NodeRecord>(), NODE_RECORD_SIZE);
assert_eq!(std::mem::size_of::<EdgeRecord>(), EDGE_RECORD_SIZE);
assert_eq!(
std::mem::size_of::<PropertyEntry>(),
PROPERTY_ENTRY_HEADER_SIZE
);
assert_eq!(std::mem::size_of::<StringEntry>(), STRING_ENTRY_HEADER_SIZE);
}
#[test]
fn test_constant_values() {
assert_eq!(MAGIC, 0x47524D4C);
assert_eq!(VERSION, 2);
assert_eq!(MIN_READABLE_VERSION, 1);
assert_eq!(HEADER_SIZE, 192);
assert_eq!(HEADER_SIZE_V1, 136);
assert_eq!(NODE_RECORD_SIZE, 48);
assert_eq!(EDGE_RECORD_SIZE, 56);
assert_eq!(PROPERTY_ENTRY_HEADER_SIZE, 17);
assert_eq!(STRING_ENTRY_HEADER_SIZE, 8);
assert_eq!(DEFAULT_PAGE_SIZE, 4096);
assert_eq!(ENDIAN_LITTLE, 1);
assert_eq!(ENDIAN_BIG, 2);
}
#[test]
fn test_query_region_header_size() {
assert_eq!(
std::mem::size_of::<QueryRegionHeader>(),
QUERY_REGION_HEADER_SIZE,
"QueryRegionHeader size must be exactly 16 bytes"
);
}
#[test]
fn test_query_region_header_alignment() {
assert_eq!(
std::mem::size_of::<QueryRegionHeader>(),
4 + 4 + 8,
"QueryRegionHeader fields should sum to 16 bytes"
);
}
#[test]
fn test_query_region_header_new() {
let header = QueryRegionHeader::new();
let magic = header.magic;
let version = header.version;
let first_query = header.first_query;
assert_eq!(magic, QUERY_REGION_MAGIC);
assert_eq!(version, QUERY_REGION_VERSION);
assert_eq!(first_query, u64::MAX);
assert!(header.is_valid());
}
#[test]
fn test_query_region_header_roundtrip() {
let mut header = QueryRegionHeader::new();
header.first_query = 1024;
let orig_magic = header.magic;
let orig_version = header.version;
let orig_first_query = header.first_query;
let bytes = header.to_bytes();
assert_eq!(bytes.len(), QUERY_REGION_HEADER_SIZE);
let recovered = QueryRegionHeader::from_bytes(&bytes);
let rec_magic = recovered.magic;
let rec_version = recovered.version;
let rec_first_query = recovered.first_query;
assert_eq!(rec_magic, orig_magic);
assert_eq!(rec_version, orig_version);
assert_eq!(rec_first_query, orig_first_query);
}
#[test]
fn test_query_region_header_invalid_magic() {
let mut header = QueryRegionHeader::new();
header.magic = 0x12345678;
assert!(!header.is_valid());
}
#[test]
fn test_query_record_size() {
assert_eq!(
std::mem::size_of::<QueryRecord>(),
QUERY_RECORD_HEADER_SIZE,
"QueryRecord size must be exactly 36 bytes"
);
}
#[test]
fn test_query_record_alignment() {
assert_eq!(
std::mem::size_of::<QueryRecord>(),
4 + 2 + 2 + 2 + 2 + 4 + 4 + 8 + 8,
"QueryRecord fields should sum to 36 bytes"
);
}
#[test]
fn test_query_record_new() {
let record = QueryRecord::new(
42, QUERY_TYPE_GREMLIN, 2, 10, 50, 100, 200, );
let id = record.id;
let param_count = record.param_count;
let name_len = record.name_len;
let description_len = record.description_len;
let query_len = record.query_len;
let record_size = record.record_size;
let next = record.next;
let prev = record.prev;
assert_eq!(id, 42);
assert_eq!(record.query_type(), QUERY_TYPE_GREMLIN);
assert_eq!(param_count, 2);
assert_eq!(name_len, 10);
assert_eq!(description_len, 50);
assert_eq!(query_len, 100);
assert_eq!(record_size, 200);
assert_eq!(next, u64::MAX);
assert_eq!(prev, u64::MAX);
assert!(!record.is_deleted());
}
#[test]
fn test_query_record_query_types() {
let gremlin = QueryRecord::new(1, QUERY_TYPE_GREMLIN, 0, 0, 0, 0, 0);
let gql = QueryRecord::new(2, QUERY_TYPE_GQL, 0, 0, 0, 0, 0);
assert_eq!(gremlin.query_type(), QUERY_TYPE_GREMLIN);
assert_eq!(gql.query_type(), QUERY_TYPE_GQL);
}
#[test]
fn test_query_record_deleted_flag() {
let mut record = QueryRecord::new(1, QUERY_TYPE_GREMLIN, 0, 0, 0, 0, 0);
assert!(!record.is_deleted());
record.mark_deleted();
assert!(record.is_deleted());
assert_eq!(record.query_type(), QUERY_TYPE_GREMLIN);
}
#[test]
fn test_query_record_roundtrip() {
let mut record = QueryRecord::new(123, QUERY_TYPE_GQL, 3, 20, 100, 500, 700);
record.next = 1000;
record.prev = 500;
let orig_id = record.id;
let orig_flags = record.flags;
let orig_param_count = record.param_count;
let orig_name_len = record.name_len;
let orig_description_len = record.description_len;
let orig_query_len = record.query_len;
let orig_record_size = record.record_size;
let orig_next = record.next;
let orig_prev = record.prev;
let bytes = record.to_bytes();
assert_eq!(bytes.len(), QUERY_RECORD_HEADER_SIZE);
let recovered = QueryRecord::from_bytes(&bytes);
let rec_id = recovered.id;
let rec_flags = recovered.flags;
let rec_param_count = recovered.param_count;
let rec_name_len = recovered.name_len;
let rec_description_len = recovered.description_len;
let rec_query_len = recovered.query_len;
let rec_record_size = recovered.record_size;
let rec_next = recovered.next;
let rec_prev = recovered.prev;
assert_eq!(rec_id, orig_id);
assert_eq!(rec_flags, orig_flags);
assert_eq!(rec_param_count, orig_param_count);
assert_eq!(rec_name_len, orig_name_len);
assert_eq!(rec_description_len, orig_description_len);
assert_eq!(rec_query_len, orig_query_len);
assert_eq!(rec_record_size, orig_record_size);
assert_eq!(rec_next, orig_next);
assert_eq!(rec_prev, orig_prev);
}
#[test]
fn test_query_record_byte_order() {
let record = QueryRecord::new(
0x01020304u32,
QUERY_TYPE_GREMLIN,
0x0506u16,
0x0708u16,
0x090Au16,
0x0B0C0D0Eu32,
0x0F101112u32,
);
let bytes = record.to_bytes();
let id_bytes: [u8; 4] = [bytes[0], bytes[1], bytes[2], bytes[3]];
assert_eq!(id_bytes[0], 0x04); assert_eq!(id_bytes[3], 0x01);
}
#[test]
fn test_parameter_entry_size() {
assert_eq!(
std::mem::size_of::<ParameterEntry>(),
PARAMETER_ENTRY_HEADER_SIZE,
"ParameterEntry size must be exactly 4 bytes"
);
}
#[test]
fn test_parameter_entry_alignment() {
assert_eq!(
std::mem::size_of::<ParameterEntry>(),
2 + 1 + 1,
"ParameterEntry fields should sum to 4 bytes"
);
}
#[test]
fn test_parameter_entry_new() {
let entry = ParameterEntry::new(10, 0x02);
let name_len = entry.name_len;
let value_type = entry.value_type;
let reserved = entry._reserved;
assert_eq!(name_len, 10);
assert_eq!(value_type, 0x02);
assert_eq!(reserved, 0);
}
#[test]
fn test_parameter_entry_new_any() {
let entry = ParameterEntry::new_any(15);
let name_len = entry.name_len;
let value_type = entry.value_type;
assert_eq!(name_len, 15);
assert_eq!(value_type, PARAMETER_TYPE_ANY);
}
#[test]
fn test_parameter_entry_roundtrip() {
let entry = ParameterEntry::new(25, 0x04);
let orig_name_len = entry.name_len;
let orig_value_type = entry.value_type;
let bytes = entry.to_bytes();
assert_eq!(bytes.len(), PARAMETER_ENTRY_HEADER_SIZE);
let recovered = ParameterEntry::from_bytes(&bytes);
let rec_name_len = recovered.name_len;
let rec_value_type = recovered.value_type;
assert_eq!(rec_name_len, orig_name_len);
assert_eq!(rec_value_type, orig_value_type);
}
#[test]
fn test_parameter_entry_total_size() {
let entry = ParameterEntry::new(20, 0x02);
assert_eq!(entry.total_size(), PARAMETER_ENTRY_HEADER_SIZE + 20);
}
#[test]
fn test_parameter_entry_byte_order() {
let entry = ParameterEntry::new(0x0102u16, 0xAB);
let bytes = entry.to_bytes();
let name_len_bytes: [u8; 2] = [bytes[0], bytes[1]];
assert_eq!(name_len_bytes[0], 0x02); assert_eq!(name_len_bytes[1], 0x01);
assert_eq!(bytes[2], 0xAB);
assert_eq!(bytes[3], 0x00);
}
#[test]
fn test_query_storage_constants() {
assert_eq!(QUERY_REGION_MAGIC, 0x51525953);
assert_eq!(QUERY_REGION_VERSION, 1);
assert_eq!(QUERY_REGION_HEADER_SIZE, 16);
assert_eq!(QUERY_RECORD_HEADER_SIZE, 36);
assert_eq!(PARAMETER_ENTRY_HEADER_SIZE, 4);
assert_eq!(QUERY_FLAG_DELETED, 0x0001);
assert_eq!(QUERY_TYPE_MASK, 0x0006);
assert_eq!(QUERY_TYPE_SHIFT, 1);
assert_eq!(QUERY_TYPE_GREMLIN, 1);
assert_eq!(QUERY_TYPE_GQL, 2);
assert_eq!(PARAMETER_TYPE_ANY, 0xFF);
}
#[test]
fn test_file_header_query_metadata_default() {
let header = FileHeader::new();
assert!(!header.has_query_region());
assert_eq!(header.query_store_offset(), 0);
assert_eq!(header.query_store_end(), 0);
assert_eq!(header.query_count(), 0);
assert_eq!(header.next_query_id(), 0);
}
#[test]
fn test_file_header_query_metadata_setters() {
let mut header = FileHeader::new();
header.set_query_store_offset(1000);
header.set_query_store_end(2000);
header.set_query_count(5);
header.set_next_query_id(10);
assert!(header.has_query_region());
assert_eq!(header.query_store_offset(), 1000);
assert_eq!(header.query_store_end(), 2000);
assert_eq!(header.query_count(), 5);
assert_eq!(header.next_query_id(), 10);
}
#[test]
fn test_file_header_query_metadata_roundtrip() {
let mut header = FileHeader::new();
header.set_query_store_offset(0x123456789ABCDEF0);
header.set_query_store_end(0xFEDCBA9876543210);
header.set_query_count(0x12345678);
header.set_next_query_id(0x87654321);
header.update_crc32();
let bytes = header.to_bytes();
let recovered = FileHeader::from_bytes(&bytes);
assert_eq!(recovered.query_store_offset(), 0x123456789ABCDEF0);
assert_eq!(recovered.query_store_end(), 0xFEDCBA9876543210);
assert_eq!(recovered.query_count(), 0x12345678);
assert_eq!(recovered.next_query_id(), 0x87654321);
assert!(recovered.has_query_region());
}
#[test]
fn test_file_header_query_metadata_crc_after_change() {
let mut header = FileHeader::new();
assert!(header.validate_crc32());
header.set_query_store_offset(1000);
assert!(header.validate_crc32());
}
#[test]
fn test_file_header_query_metadata_byte_layout() {
let mut header = FileHeader::new();
header.set_query_store_offset(0x0807060504030201);
header.set_query_store_end(0x100F0E0D0C0B0A09);
header.set_query_count(0x14131211);
header.set_next_query_id(0x18171615);
let bytes = header.to_bytes();
assert_eq!(bytes[156], 0x01); assert_eq!(bytes[163], 0x08);
assert_eq!(bytes[164], 0x09); assert_eq!(bytes[171], 0x10);
assert_eq!(bytes[172], 0x11); assert_eq!(bytes[175], 0x14);
assert_eq!(bytes[176], 0x15); assert_eq!(bytes[179], 0x18); }
#[test]
fn test_file_header_has_query_region() {
let mut header = FileHeader::new();
assert!(!header.has_query_region());
header.set_query_store_offset(1);
assert!(header.has_query_region());
header.set_query_store_offset(0);
assert!(!header.has_query_region());
}
}