Skip to main content

ruvector_temporal_tensor/
segment.rs

1//! Segment binary format: encode and decode.
2//!
3//! Format (little-endian):
4//!
5//! ```text
6//! [magic:4][version:1][bits:1][group_len:4][tensor_len:4][frames:4]
7//! [scale_count:4][scales:2*S][data_len:4][data:D]
8//! ```
9//!
10//! Magic: `0x43545154` ("TQTC" in LE). Header is 26 bytes before scales.
11
12use crate::quantizer;
13
14/// Segment magic number: `"TQTC"` in little-endian.
15pub const MAGIC: u32 = 0x4354_5154;
16/// Current segment format version.
17pub const VERSION: u8 = 1;
18/// Minimum valid segment size in bytes (header fields + data_len, no scales/data).
19pub const HEADER_SIZE: usize = 26;
20
21/// Encode a segment from metadata, scales, and packed data.
22pub fn encode(
23    bits: u8,
24    group_len: u32,
25    tensor_len: u32,
26    frame_count: u32,
27    scales: &[u16],
28    data: &[u8],
29    out: &mut Vec<u8>,
30) {
31    out.clear();
32    let estimated = HEADER_SIZE + scales.len() * 2 + data.len();
33    out.reserve(estimated);
34
35    // Header
36    out.extend_from_slice(&MAGIC.to_le_bytes());
37    out.push(VERSION);
38    out.push(bits);
39    out.extend_from_slice(&group_len.to_le_bytes());
40    out.extend_from_slice(&tensor_len.to_le_bytes());
41    out.extend_from_slice(&frame_count.to_le_bytes());
42
43    // Scales
44    let scale_count = scales.len() as u32;
45    out.extend_from_slice(&scale_count.to_le_bytes());
46    for &s in scales {
47        out.extend_from_slice(&s.to_le_bytes());
48    }
49
50
51    // Data
52    let data_len = data.len() as u32;
53    out.extend_from_slice(&data_len.to_le_bytes());
54    out.extend_from_slice(data);
55}
56
57/// Decoded segment header.
58#[derive(Debug, Clone)]
59pub struct SegmentHeader {
60    pub bits: u8,
61    pub group_len: u32,
62    pub tensor_len: u32,
63    pub frame_count: u32,
64    pub scale_count: u32,
65}
66
67/// Decode a segment, returning all frames as f32 values.
68pub fn decode(segment: &[u8], out: &mut Vec<f32>) {
69    out.clear();
70    if segment.len() < HEADER_SIZE {
71        return;
72    }
73
74    let mut off = 0;
75
76    let magic = read_u32_le(segment, &mut off);
77    if magic != MAGIC {
78        return;
79    }
80
81    let version = segment[off];
82    off += 1;
83    if version != VERSION {
84        return;
85    }
86
87    let bits = segment[off];
88    off += 1;
89
90    let group_len = read_u32_le(segment, &mut off);
91    let tensor_len = read_u32_le(segment, &mut off);
92    let frame_count = read_u32_le(segment, &mut off);
93    let scale_count = read_u32_le(segment, &mut off);
94
95    // Read scales
96    let scales_end = off + (scale_count as usize) * 2;
97    if scales_end > segment.len() {
98        return;
99    }
100    let mut scales = Vec::with_capacity(scale_count as usize);
101    for _ in 0..scale_count {
102        scales.push(read_u16_le(segment, &mut off));
103    }
104
105    // Read data
106    if off + 4 > segment.len() {
107        return;
108    }
109    let data_len = read_u32_le(segment, &mut off) as usize;
110    if off + data_len > segment.len() {
111        return;
112    }
113    let data = &segment[off..off + data_len];
114
115    // Convert scales to f32 once, then dequantize via the optimized path
116    let scales_f32 = quantizer::scales_to_f32(&scales);
117    quantizer::dequantize_f32(
118        data,
119        &scales_f32,
120        group_len as usize,
121        bits,
122        tensor_len as usize,
123        frame_count as usize,
124        out,
125    );
126}
127
128/// Parse only the segment header (no data decoding).
129pub fn parse_header(segment: &[u8]) -> Option<SegmentHeader> {
130    if segment.len() < HEADER_SIZE {
131        return None;
132    }
133    let mut off = 0;
134    let magic = read_u32_le(segment, &mut off);
135    if magic != MAGIC {
136        return None;
137    }
138    let version = segment[off];
139    off += 1;
140    if version != VERSION {
141        return None;
142    }
143    let bits = segment[off];
144    off += 1;
145    let group_len = read_u32_le(segment, &mut off);
146    let tensor_len = read_u32_le(segment, &mut off);
147    let frame_count = read_u32_le(segment, &mut off);
148    let scale_count = read_u32_le(segment, &mut off);
149
150    Some(SegmentHeader {
151        bits,
152        group_len,
153        tensor_len,
154        frame_count,
155        scale_count,
156    })
157}
158
159/// Compute the compression ratio for a segment: raw f32 bytes / segment bytes.
160///
161/// Returns `0.0` if the segment is empty or has no frames.
162pub fn compression_ratio(segment: &[u8]) -> f32 {
163    match parse_header(segment) {
164        Some(h) if h.frame_count > 0 => {
165            let raw = h.tensor_len as usize * h.frame_count as usize * 4;
166            raw as f32 / segment.len() as f32
167        }
168        _ => 0.0,
169    }
170}
171
172/// Decode a single frame by index from a segment.
173///
174/// Returns `None` if the segment is invalid or `frame_idx` is out of range.
175pub fn decode_single_frame(segment: &[u8], frame_idx: usize) -> Option<Vec<f32>> {
176    let header = parse_header(segment)?;
177    if frame_idx >= header.frame_count as usize {
178        return None;
179    }
180
181    // Skip past the fixed header fields (magic + version + bits + group_len +
182    // tensor_len + frame_count + scale_count = 4+1+1+4+4+4+4 = 22 bytes).
183    let mut off = 22usize;
184    let scale_count = header.scale_count as usize;
185
186    // Read scales
187    let scales_end = off + scale_count * 2;
188    if scales_end > segment.len() {
189        return None;
190    }
191    let mut scales_f16 = Vec::with_capacity(scale_count);
192    for _ in 0..scale_count {
193        scales_f16.push(read_u16_le(segment, &mut off));
194    }
195    let scales_f32 = quantizer::scales_to_f32(&scales_f16);
196
197    // Read data section
198    if off + 4 > segment.len() {
199        return None;
200    }
201    let data_len = read_u32_le(segment, &mut off) as usize;
202    if off + data_len > segment.len() {
203        return None;
204    }
205    let data = &segment[off..off + data_len];
206
207    // Compute byte offset for the requested frame
208    let tensor_len = header.tensor_len as usize;
209    let bits = header.bits;
210    let bits_per_frame = tensor_len * bits as usize;
211    let bytes_per_frame = bits_per_frame.div_ceil(8);
212
213    let frame_start = frame_idx * bytes_per_frame;
214    if frame_start + bytes_per_frame > data.len() {
215        return None;
216    }
217    let frame_data = &data[frame_start..frame_start + bytes_per_frame];
218
219    let mut out = Vec::new();
220    quantizer::dequantize_f32(
221        frame_data,
222        &scales_f32,
223        header.group_len as usize,
224        bits,
225        tensor_len,
226        1,
227        &mut out,
228    );
229    Some(out)
230}
231
232#[inline]
233fn read_u32_le(bytes: &[u8], offset: &mut usize) -> u32 {
234    let o = *offset;
235    let arr = [bytes[o], bytes[o + 1], bytes[o + 2], bytes[o + 3]];
236    *offset = o + 4;
237    u32::from_le_bytes(arr)
238}
239
240fn read_u16_le(bytes: &[u8], offset: &mut usize) -> u16 {
241    let o = *offset;
242    let arr = [bytes[o], bytes[o + 1]];
243    *offset = o + 2;
244    u16::from_le_bytes(arr)
245}
246
247#[cfg(test)]
248mod tests {
249    use super::*;
250    use crate::quantizer;
251
252    #[test]
253    fn test_encode_decode_roundtrip() {
254        let frame: Vec<f32> = (0..128).map(|i| (i as f32 - 64.0) * 0.1).collect();
255        let group_len = 64usize;
256        let bits = 8u8;
257
258        let scales = quantizer::compute_scales(&frame, group_len, bits);
259        let mut packed = Vec::new();
260        quantizer::quantize_and_pack(&frame, &scales, group_len, bits, &mut packed);
261
262        let mut seg = Vec::new();
263        encode(bits, group_len as u32, frame.len() as u32, 1, &scales, &packed, &mut seg);
264
265        let mut decoded = Vec::new();
266        decode(&seg, &mut decoded);
267
268        assert_eq!(decoded.len(), frame.len());
269        for (i, (&orig, &dec)) in frame.iter().zip(decoded.iter()).enumerate() {
270            let err = (orig - dec).abs();
271            assert!(err < 0.1, "i={i} orig={orig} dec={dec} err={err}");
272        }
273    }
274
275    #[test]
276    fn test_magic_validation() {
277        let mut decoded = Vec::new();
278        decode(&[0, 0, 0, 0], &mut decoded);
279        assert!(decoded.is_empty()); // Wrong magic
280    }
281
282    #[test]
283    fn test_parse_header() {
284        let frame = vec![1.0f32; 64];
285        let scales = quantizer::compute_scales(&frame, 64, 7);
286        let mut packed = Vec::new();
287        quantizer::quantize_and_pack(&frame, &scales, 64, 7, &mut packed);
288
289        let mut seg = Vec::new();
290        encode(7, 64, 64, 1, &scales, &packed, &mut seg);
291
292        let header = parse_header(&seg).unwrap();
293        assert_eq!(header.bits, 7);
294        assert_eq!(header.group_len, 64);
295        assert_eq!(header.tensor_len, 64);
296        assert_eq!(header.frame_count, 1);
297    }
298
299    #[test]
300    fn test_multi_frame_roundtrip() {
301        let group_len = 32usize;
302        let bits = 5u8;
303        let tensor_len = 64;
304
305        let frame1: Vec<f32> = (0..tensor_len).map(|i| (i as f32) * 0.1).collect();
306        let frame2: Vec<f32> = (0..tensor_len).map(|i| (i as f32) * 0.09).collect();
307
308        let scales = quantizer::compute_scales(&frame1, group_len, bits);
309        let mut packed = Vec::new();
310        quantizer::quantize_and_pack(&frame1, &scales, group_len, bits, &mut packed);
311        quantizer::quantize_and_pack(&frame2, &scales, group_len, bits, &mut packed);
312
313        let mut seg = Vec::new();
314        encode(bits, group_len as u32, tensor_len as u32, 2, &scales, &packed, &mut seg);
315
316        let mut decoded = Vec::new();
317        decode(&seg, &mut decoded);
318        assert_eq!(decoded.len(), tensor_len * 2);
319    }
320}