use crate::backend::native::NativeBackendError;
use crate::backend::native::NativeResult;
use crate::backend::native::types::NodeFlags;
pub mod constants {
pub const FIXED_METADATA_SIZE: usize = 44;
pub const MAX_INLINE_DATA: usize = 64;
pub const ID_OFFSET: usize = 0;
pub const FLAGS_OFFSET: usize = 8;
pub const KIND_OFFSET: usize = 12;
pub const NAME_OFFSET: usize = 14;
pub const DATA_LEN_OFFSET: usize = 16;
pub const OUTGOING_CLUSTER_OFFSET: usize = 18;
pub const OUTGOING_COUNT_OFFSET: usize = 26;
pub const INCOMING_CLUSTER_OFFSET: usize = 30;
pub const INCOMING_COUNT_OFFSET: usize = 38;
pub const EXTERNAL_DATA_FLAG: u16 = 0x8000;
pub const MAX_DATA_LEN: u16 = 0x7FFF;
pub const ID_SIZE: usize = 8;
pub const FLAGS_SIZE: usize = 4;
pub const KIND_OFFSET_SIZE: usize = 2;
pub const NAME_OFFSET_SIZE: usize = 2;
pub const DATA_LEN_SIZE: usize = 2;
pub const OUTGOING_CLUSTER_SIZE: usize = 8;
pub const OUTGOING_COUNT_SIZE: usize = 4;
pub const INCOMING_CLUSTER_SIZE: usize = 8;
pub const INCOMING_COUNT_SIZE: usize = 4;
}
pub const FIXED_METADATA_SIZE: usize = constants::FIXED_METADATA_SIZE;
pub const MAX_INLINE_DATA: usize = constants::MAX_INLINE_DATA;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NodeRecordV3 {
pub id: i64,
pub flags: NodeFlags,
pub kind_offset: u16,
pub name_offset: u16,
pub data_len: u16,
pub data_inline: Option<Vec<u8>>,
pub data_external_offset: Option<u64>,
pub outgoing_cluster_offset: u64,
pub outgoing_edge_count: u32,
pub incoming_cluster_offset: u64,
pub incoming_edge_count: u32,
}
impl NodeRecordV3 {
pub fn new_inline(
id: i64,
flags: NodeFlags,
kind_offset: u16,
name_offset: u16,
data: Vec<u8>,
outgoing_cluster_offset: u64,
outgoing_edge_count: u32,
incoming_cluster_offset: u64,
incoming_edge_count: u32,
) -> Self {
let data_len = data.len() as u16;
assert!(
data_len <= MAX_INLINE_DATA as u16,
"Inline data exceeds MAX_INLINE_DATA"
);
NodeRecordV3 {
id,
flags,
kind_offset,
name_offset,
data_len,
data_inline: Some(data),
data_external_offset: None,
outgoing_cluster_offset,
outgoing_edge_count,
incoming_cluster_offset,
incoming_edge_count,
}
}
pub fn new_external(
id: i64,
flags: NodeFlags,
kind_offset: u16,
name_offset: u16,
data_external_offset: u64,
data_len: u16,
outgoing_cluster_offset: u64,
outgoing_edge_count: u32,
incoming_cluster_offset: u64,
incoming_edge_count: u32,
) -> Self {
assert!(
data_len > MAX_INLINE_DATA as u16,
"External data must exceed MAX_INLINE_DATA"
);
NodeRecordV3 {
id,
flags,
kind_offset,
name_offset,
data_len,
data_inline: None,
data_external_offset: Some(data_external_offset),
outgoing_cluster_offset,
outgoing_edge_count,
incoming_cluster_offset,
incoming_edge_count,
}
}
pub fn id(&self) -> i64 {
self.id
}
pub fn is_inline(&self) -> bool {
self.data_inline.is_some()
}
pub fn is_external(&self) -> bool {
self.data_external_offset.is_some() || (self.data_len & constants::EXTERNAL_DATA_FLAG) != 0
}
pub fn data_len(&self) -> u16 {
self.data_len & constants::MAX_DATA_LEN
}
pub fn serialized_size(&self) -> usize {
let mut size = FIXED_METADATA_SIZE;
if self.is_inline() {
size += self.data_inline.as_ref().map(|d| d.len()).unwrap_or(0);
} else if self.is_external() {
size += 8;
}
size
}
pub fn serialize(&self) -> NativeResult<Vec<u8>> {
let mut buffer = Vec::with_capacity(self.serialized_size());
buffer.extend_from_slice(&self.id.to_be_bytes());
buffer.extend_from_slice(&self.flags.0.to_be_bytes());
buffer.extend_from_slice(&self.kind_offset.to_be_bytes());
buffer.extend_from_slice(&self.name_offset.to_be_bytes());
let encoded_data_len = if self.is_external() {
self.data_len | constants::EXTERNAL_DATA_FLAG
} else {
self.data_len
};
buffer.extend_from_slice(&encoded_data_len.to_be_bytes());
buffer.extend_from_slice(&[0u8; 2]);
buffer.extend_from_slice(&self.outgoing_cluster_offset.to_be_bytes());
buffer.extend_from_slice(&self.outgoing_edge_count.to_be_bytes());
buffer.extend_from_slice(&self.incoming_cluster_offset.to_be_bytes());
buffer.extend_from_slice(&self.incoming_edge_count.to_be_bytes());
assert_eq!(
buffer.len(),
FIXED_METADATA_SIZE,
"Fixed metadata must be exactly {} bytes",
FIXED_METADATA_SIZE
);
if let Some(ref data) = self.data_inline {
buffer.extend_from_slice(data);
} else if let Some(offset) = self.data_external_offset {
buffer.extend_from_slice(&offset.to_be_bytes());
}
Ok(buffer)
}
pub fn deserialize(bytes: &[u8]) -> NativeResult<Self> {
if bytes.len() < FIXED_METADATA_SIZE {
return Err(NativeBackendError::InvalidHeader {
field: "node_record".to_string(),
reason: format!(
"insufficient bytes: expected at least {}, found {}",
FIXED_METADATA_SIZE,
bytes.len()
),
});
}
let mut offset = 0;
let id = i64::from_be_bytes(
bytes[offset..offset + constants::ID_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.id".to_string(),
reason: "invalid ID bytes".to_string(),
})?,
);
offset += constants::ID_SIZE;
let flags = NodeFlags(u32::from_be_bytes(
bytes[offset..offset + constants::FLAGS_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.flags".to_string(),
reason: "invalid flags bytes".to_string(),
})?,
));
offset += constants::FLAGS_SIZE;
let kind_offset = u16::from_be_bytes(
bytes[offset..offset + constants::KIND_OFFSET_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.kind_offset".to_string(),
reason: "invalid kind_offset bytes".to_string(),
})?,
);
offset += constants::KIND_OFFSET_SIZE;
let name_offset = u16::from_be_bytes(
bytes[offset..offset + constants::NAME_OFFSET_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.name_offset".to_string(),
reason: "invalid name_offset bytes".to_string(),
})?,
);
offset += constants::NAME_OFFSET_SIZE;
let encoded_data_len = u16::from_be_bytes(
bytes[offset..offset + constants::DATA_LEN_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.data_len".to_string(),
reason: "invalid data_len bytes".to_string(),
})?,
);
offset += constants::DATA_LEN_SIZE;
let is_external = (encoded_data_len & constants::EXTERNAL_DATA_FLAG) != 0;
let _data_len = encoded_data_len & constants::MAX_DATA_LEN;
offset += 2;
let outgoing_cluster_offset = u64::from_be_bytes(
bytes[offset..offset + constants::OUTGOING_CLUSTER_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.outgoing_cluster_offset".to_string(),
reason: "invalid outgoing_cluster_offset bytes".to_string(),
})?,
);
offset += constants::OUTGOING_CLUSTER_SIZE;
let outgoing_edge_count = u32::from_be_bytes(
bytes[offset..offset + constants::OUTGOING_COUNT_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.outgoing_edge_count".to_string(),
reason: "invalid outgoing_edge_count bytes".to_string(),
})?,
);
offset += constants::OUTGOING_COUNT_SIZE;
let incoming_cluster_offset = u64::from_be_bytes(
bytes[offset..offset + constants::INCOMING_CLUSTER_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.incoming_cluster_offset".to_string(),
reason: "invalid incoming_cluster_offset bytes".to_string(),
})?,
);
offset += constants::INCOMING_CLUSTER_SIZE;
let incoming_edge_count = u32::from_be_bytes(
bytes[offset..offset + constants::INCOMING_COUNT_SIZE]
.try_into()
.map_err(|_| NativeBackendError::InvalidHeader {
field: "node_record.incoming_edge_count".to_string(),
reason: "invalid incoming_edge_count bytes".to_string(),
})?,
);
offset += constants::INCOMING_COUNT_SIZE;
assert_eq!(
offset, FIXED_METADATA_SIZE,
"Offset should be at end of fixed metadata"
);
let (data_inline, data_external_offset) = if is_external {
let external_offset = if bytes.len() > offset {
let ext_offset =
u64::from_be_bytes(bytes[offset..offset + 8].try_into().unwrap_or([0u8; 8]));
Some(ext_offset)
} else {
None
};
(None, external_offset)
} else {
let inline_data = bytes[offset..].to_vec();
(Some(inline_data), None)
};
Ok(NodeRecordV3 {
id,
flags,
kind_offset,
name_offset,
data_len: encoded_data_len,
data_inline,
data_external_offset,
outgoing_cluster_offset,
outgoing_edge_count,
incoming_cluster_offset,
incoming_edge_count,
})
}
pub fn size_estimate() -> usize {
FIXED_METADATA_SIZE + MAX_INLINE_DATA / 2
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_constants() {
assert_eq!(FIXED_METADATA_SIZE, 44);
assert_eq!(MAX_INLINE_DATA, 64);
assert_eq!(constants::ID_OFFSET, 0);
assert_eq!(constants::FLAGS_OFFSET, 8);
assert_eq!(constants::KIND_OFFSET, 12);
assert_eq!(constants::NAME_OFFSET, 14);
assert_eq!(constants::DATA_LEN_OFFSET, 16);
assert_eq!(constants::OUTGOING_CLUSTER_OFFSET, 18);
assert_eq!(constants::OUTGOING_COUNT_OFFSET, 26);
assert_eq!(constants::INCOMING_CLUSTER_OFFSET, 30);
assert_eq!(constants::INCOMING_COUNT_OFFSET, 38);
}
#[test]
fn test_new_inline_node() {
let node = NodeRecordV3::new_inline(
12345,
NodeFlags::empty(),
100,
200,
b"test data".to_vec(),
1000,
5,
2000,
3,
);
assert_eq!(node.id(), 12345);
assert!(node.is_inline());
assert!(!node.is_external());
assert_eq!(node.data_len(), 9);
}
#[test]
fn test_new_external_node() {
let node = NodeRecordV3::new_external(
12345,
NodeFlags::empty(),
100,
200,
5000,
100,
0, 5, 0, 3, );
assert_eq!(node.id(), 12345);
assert!(!node.is_inline());
assert!(node.is_external());
assert_eq!(node.data_len(), 100);
}
#[test]
fn test_inline_data_max_size() {
let max_data = vec![0xFFu8; MAX_INLINE_DATA];
let node =
NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, max_data.clone(), 0, 0, 0, 0);
assert!(node.is_inline());
assert_eq!(node.data_len(), MAX_INLINE_DATA as u16);
}
#[test]
#[should_panic(expected = "Inline data exceeds MAX_INLINE_DATA")]
fn test_inline_data_too_large_panics() {
let too_large = vec![0xFFu8; MAX_INLINE_DATA + 1];
let _ = NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, too_large, 0, 0, 0, 0);
}
#[test]
fn test_serialize_inline_node() {
let node = NodeRecordV3::new_inline(
-12345, NodeFlags::empty(),
100,
200,
b"Hello, V3!".to_vec(),
1000,
5,
2000,
3,
);
let serialized = node.serialize().unwrap();
assert_eq!(serialized.len(), FIXED_METADATA_SIZE + "Hello, V3!".len());
}
#[test]
fn test_serialize_external_node() {
let node = NodeRecordV3::new_external(
12345,
NodeFlags::empty(),
100,
200,
5000,
100,
0, 5, 0, 3, );
let serialized = node.serialize().unwrap();
assert_eq!(serialized.len(), FIXED_METADATA_SIZE + 8);
}
#[test]
fn test_round_trip_inline() {
let original = NodeRecordV3::new_inline(
999999,
NodeFlags::DELETED,
42,
84,
b"Test node data for round-trip".to_vec(),
1111,
10,
2222,
20,
);
let serialized = original.serialize().unwrap();
let restored = NodeRecordV3::deserialize(&serialized).unwrap();
assert_eq!(restored.id(), original.id());
assert_eq!(restored.flags, original.flags);
assert_eq!(restored.kind_offset, original.kind_offset);
assert_eq!(restored.name_offset, original.name_offset);
assert_eq!(restored.data_len(), original.data_len());
assert_eq!(restored.data_inline, original.data_inline);
assert_eq!(
restored.outgoing_cluster_offset,
original.outgoing_cluster_offset
);
assert_eq!(restored.outgoing_edge_count, original.outgoing_edge_count);
assert_eq!(
restored.incoming_cluster_offset,
original.incoming_cluster_offset
);
assert_eq!(restored.incoming_edge_count, original.incoming_edge_count);
}
#[test]
fn test_round_trip_external() {
let original = NodeRecordV3::new_external(
888888,
NodeFlags::empty(),
10,
20,
7777,
200,
0, 15, 0, 25, );
let serialized = original.serialize().unwrap();
let restored = NodeRecordV3::deserialize(&serialized).unwrap();
assert_eq!(restored.id(), original.id());
assert_eq!(restored.flags, original.flags);
assert_eq!(restored.kind_offset, original.kind_offset);
assert_eq!(restored.name_offset, original.name_offset);
assert_eq!(restored.data_len(), original.data_len());
assert!(restored.is_external());
}
#[test]
fn test_full_id_encoding() {
let test_ids = vec![0, 1, -1, 1000000, -1000000, i64::MAX, i64::MIN];
for id in test_ids {
let node = NodeRecordV3::new_inline(id, NodeFlags::empty(), 0, 0, vec![], 0, 0, 0, 0);
let serialized = node.serialize().unwrap();
let restored = NodeRecordV3::deserialize(&serialized).unwrap();
assert_eq!(
restored.id(),
id,
"ID {} should be preserved through round-trip",
id
);
}
}
#[test]
fn test_serialized_size_calculation() {
let empty_data = vec![];
let small_data = vec![1u8; 10];
let max_inline = vec![2u8; MAX_INLINE_DATA];
let empty = NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, empty_data, 0, 0, 0, 0);
assert_eq!(empty.serialized_size(), FIXED_METADATA_SIZE);
let small =
NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, small_data.clone(), 0, 0, 0, 0);
assert_eq!(small.serialized_size(), FIXED_METADATA_SIZE + 10);
let max =
NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, max_inline.clone(), 0, 0, 0, 0);
assert_eq!(max.serialized_size(), FIXED_METADATA_SIZE + MAX_INLINE_DATA);
}
#[test]
fn test_edge_cluster_offsets_preserved() {
let node = NodeRecordV3::new_inline(
1,
NodeFlags::empty(),
0,
0,
vec![],
0x123456789ABCDEF0, 42, 0xFEDCBA9876543210, 99, );
let serialized = node.serialize().unwrap();
let restored = NodeRecordV3::deserialize(&serialized).unwrap();
assert_eq!(restored.outgoing_cluster_offset, 0x123456789ABCDEF0);
assert_eq!(restored.outgoing_edge_count, 42);
assert_eq!(restored.incoming_cluster_offset, 0xFEDCBA9876543210);
assert_eq!(restored.incoming_edge_count, 99);
}
#[test]
fn test_deserialize_insufficient_bytes() {
let short_data = vec![0u8; 10]; let result = NodeRecordV3::deserialize(&short_data);
assert!(result.is_err());
}
#[test]
fn test_flags_encoding() {
let flags = NodeFlags::DELETED;
let node = NodeRecordV3::new_inline(1, flags, 0, 0, vec![], 0, 0, 0, 0);
let serialized = node.serialize().unwrap();
let restored = NodeRecordV3::deserialize(&serialized).unwrap();
assert_eq!(restored.flags, flags);
assert!(restored.flags.contains(NodeFlags::DELETED));
}
#[test]
fn test_string_table_offsets() {
let node = NodeRecordV3::new_inline(
1,
NodeFlags::empty(),
0x1234, 0x5678, vec![],
0,
0,
0,
0,
);
let serialized = node.serialize().unwrap();
let restored = NodeRecordV3::deserialize(&serialized).unwrap();
assert_eq!(restored.kind_offset, 0x1234);
assert_eq!(restored.name_offset, 0x5678);
}
}