Skip to main content

triblespace_core/inline/encodings/
time.rs

1use crate::inline::Encodes;
2use crate::id::ExclusiveId;
3use crate::id::Id;
4use crate::id_hex;
5use crate::macros::entity;
6use crate::metadata;
7use crate::metadata::MetaDescribe;
8use crate::trible::Fragment;
9use crate::inline::IntoInline;
10use crate::inline::TryFromInline;
11use crate::inline::TryToInline;
12use crate::inline::Inline;
13use crate::inline::InlineEncoding;
14use std::convert::Infallible;
15
16use std::convert::TryInto;
17
18use hifitime::prelude::*;
19
20/// A inline encoding for a TAI interval (order-preserving big-endian).
21///
22/// A TAI interval is a pair of TAI epochs stored as two 128-bit signed
23/// integers (lower, upper) in **order-preserving big-endian** byte order.
24/// Both bounds are inclusive.
25///
26/// Each i128 is XOR'd with the sign bit (mapping i128::MIN to 0, 0 to 2^127,
27/// i128::MAX to u128::MAX) then written big-endian. Byte-lexicographic order
28/// matches numeric order across the full i128 range, enabling efficient range
29/// scans on the trie.
30pub struct NsTAIInterval;
31
32impl MetaDescribe for NsTAIInterval {
33    fn describe() -> Fragment {
34        let id: Id = id_hex!("2170014368272A2B1B18B86B1F1F1CB5");
35        entity! {
36            ExclusiveId::force_ref(&id) @
37                metadata::name: "nstai_interval_be",
38                metadata::description: "Inclusive TAI interval encoded as two offset-big-endian i128 nanosecond bounds. Each i128 is XOR'd with i128::MIN then stored big-endian, so byte-lexicographic order matches numeric order. This enables efficient range scans on ordered indexes.\n\nSemantically identical to the legacy LE encoding — same inclusive bounds, same TAI monotonic time.",
39                metadata::tag: metadata::KIND_INLINE_ENCODING,
40        }
41    }
42}
43
44const SIGN_BIT: u128 = 1u128 << 127;
45
46/// Encode i128 as order-preserving big-endian: flip sign bit, then BE.
47/// Maps i128::MIN→0, 0→2^127, i128::MAX→u128::MAX.
48pub(crate) fn i128_to_ordered_be(v: i128) -> [u8; 16] {
49    ((v as u128) ^ SIGN_BIT).to_be_bytes()
50}
51
52/// Decode order-preserving big-endian back to i128.
53pub(crate) fn i128_from_ordered_be(bytes: [u8; 16]) -> i128 {
54    (u128::from_be_bytes(bytes) ^ SIGN_BIT) as i128
55}
56
57impl InlineEncoding for NsTAIInterval {
58    type ValidationError = InvertedIntervalError;
59    type Encoding = Self;
60
61    fn validate(value: Inline<Self>) -> Result<Inline<Self>, Self::ValidationError> {
62        let lower = i128_from_ordered_be(value.raw[0..16].try_into().unwrap());
63        let upper = i128_from_ordered_be(value.raw[16..32].try_into().unwrap());
64        if lower > upper {
65            Err(InvertedIntervalError { lower, upper })
66        } else {
67            Ok(value)
68        }
69    }
70}
71
72impl TryToInline<NsTAIInterval> for (Epoch, Epoch) {
73    type Error = InvertedIntervalError;
74    fn try_to_inline(self) -> Result<Inline<NsTAIInterval>, InvertedIntervalError> {
75        let lower = self.0.to_tai_duration().total_nanoseconds();
76        let upper = self.1.to_tai_duration().total_nanoseconds();
77        if lower > upper {
78            return Err(InvertedIntervalError { lower, upper });
79        }
80        let mut value = [0; 32];
81        value[0..16].copy_from_slice(&i128_to_ordered_be(lower));
82        value[16..32].copy_from_slice(&i128_to_ordered_be(upper));
83        Ok(Inline::new(value))
84    }
85}
86
87impl TryFromInline<'_, NsTAIInterval> for (Epoch, Epoch) {
88    type Error = InvertedIntervalError;
89    fn try_from_inline(v: &Inline<NsTAIInterval>) -> Result<Self, InvertedIntervalError> {
90        let lower = i128_from_ordered_be(v.raw[0..16].try_into().unwrap());
91        let upper = i128_from_ordered_be(v.raw[16..32].try_into().unwrap());
92        if lower > upper {
93            return Err(InvertedIntervalError { lower, upper });
94        }
95        Ok((
96            Epoch::from_tai_duration(Duration::from_total_nanoseconds(lower)),
97            Epoch::from_tai_duration(Duration::from_total_nanoseconds(upper)),
98        ))
99    }
100}
101
102impl TryFromInline<'_, NsTAIInterval> for (i128, i128) {
103    type Error = InvertedIntervalError;
104    fn try_from_inline(v: &Inline<NsTAIInterval>) -> Result<Self, InvertedIntervalError> {
105        let lower = i128_from_ordered_be(v.raw[0..16].try_into().unwrap());
106        let upper = i128_from_ordered_be(v.raw[16..32].try_into().unwrap());
107        if lower > upper {
108            return Err(InvertedIntervalError { lower, upper });
109        }
110        Ok((lower, upper))
111    }
112}
113
114/// The lower bound of a TAI interval in nanoseconds.
115/// Use this when you want to sort or compare by interval start time.
116///
117/// ```rust,ignore
118/// find!(t: Lower, pattern!(&space, [{ entity @ attr: ?t }]))
119///     .max_by_key(|t| *t)  // latest start time
120/// ```
121#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
122pub struct Lower(pub i128);
123
124/// The upper bound of a TAI interval in nanoseconds.
125/// Use this when you want to sort or compare by interval end time.
126#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
127pub struct Upper(pub i128);
128
129/// The midpoint of a TAI interval in nanoseconds.
130/// Use this when you want to sort or compare by interval center.
131#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
132pub struct Midpoint(pub i128);
133
134/// The width of a TAI interval in nanoseconds.
135/// Use this when you want to sort or compare by interval duration.
136#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
137pub struct Width(pub i128);
138
139impl TryFromInline<'_, NsTAIInterval> for Lower {
140    type Error = Infallible;
141    fn try_from_inline(v: &Inline<NsTAIInterval>) -> Result<Self, Infallible> {
142        Ok(Lower(i128_from_ordered_be(
143            v.raw[0..16].try_into().unwrap(),
144        )))
145    }
146}
147
148impl TryFromInline<'_, NsTAIInterval> for Upper {
149    type Error = Infallible;
150    fn try_from_inline(v: &Inline<NsTAIInterval>) -> Result<Self, Infallible> {
151        Ok(Upper(i128_from_ordered_be(
152            v.raw[16..32].try_into().unwrap(),
153        )))
154    }
155}
156
157impl TryFromInline<'_, NsTAIInterval> for Midpoint {
158    type Error = InvertedIntervalError;
159    fn try_from_inline(v: &Inline<NsTAIInterval>) -> Result<Self, InvertedIntervalError> {
160        let lower = i128_from_ordered_be(v.raw[0..16].try_into().unwrap());
161        let upper = i128_from_ordered_be(v.raw[16..32].try_into().unwrap());
162        if lower > upper {
163            return Err(InvertedIntervalError { lower, upper });
164        }
165        Ok(Midpoint(lower + (upper - lower) / 2))
166    }
167}
168
169impl TryFromInline<'_, NsTAIInterval> for Width {
170    type Error = InvertedIntervalError;
171    fn try_from_inline(v: &Inline<NsTAIInterval>) -> Result<Self, InvertedIntervalError> {
172        let lower = i128_from_ordered_be(v.raw[0..16].try_into().unwrap());
173        let upper = i128_from_ordered_be(v.raw[16..32].try_into().unwrap());
174        if lower > upper {
175            return Err(InvertedIntervalError { lower, upper });
176        }
177        Ok(Width(upper - lower))
178    }
179}
180
181/// The lower bound exceeds the upper bound.
182#[derive(Debug, Clone, Copy, PartialEq, Eq)]
183pub struct InvertedIntervalError {
184    /// The lower bound that was greater than `upper`.
185    pub lower: i128,
186    /// The upper bound that was less than `lower`.
187    pub upper: i128,
188}
189
190/// A inline encoding for a signed nanosecond duration delta.
191///
192/// log2(ns / Planck second) ≈ 113.8, so a 128-bit ns count comfortably
193/// holds every duration physics has an opinion about. We use the upper
194/// 16 bytes for the i128 nanosecond magnitude (sign-bit-XOR'd big-
195/// endian, same trick as [`NsTAIInterval`]) and reserve the lower 16
196/// bytes as zero. Today's readers/writers ignore the lower half;
197/// future implementations can use those bits to carry sub-nanosecond
198/// precision (picoseconds, femtoseconds, eventually Planck seconds)
199/// without breaking byte-lex ordering or trie compatibility — older
200/// values just sort before any future-precision values that share the
201/// same ns count.
202pub struct NsDuration;
203
204impl MetaDescribe for NsDuration {
205    fn describe() -> Fragment {
206        let id: Id = id_hex!("951D5249DB193D3B3F208B994B1072C4");
207        entity! {
208            ExclusiveId::force_ref(&id) @
209                metadata::name: "ns_duration",
210                metadata::description: "Signed nanosecond duration delta encoded as an offset-big-endian i128 in the upper 16 bytes; the lower 16 bytes are reserved (zero today, sub-nanosecond precision in the future). XOR'ing the i128 with i128::MIN before big-endian write makes byte-lexicographic order match numeric order across the full i128 range, so range scans on a sorted trie work natively.",
211                metadata::tag: metadata::KIND_INLINE_ENCODING,
212        }
213    }
214}
215
216impl InlineEncoding for NsDuration {
217    type ValidationError = ReservedBitsNonZero;
218    type Encoding = Self;
219
220    fn validate(value: Inline<Self>) -> Result<Inline<Self>, Self::ValidationError> {
221        if value.raw[16..32] != [0u8; 16] {
222            return Err(ReservedBitsNonZero);
223        }
224        Ok(value)
225    }
226}
227
228/// The reserved (lower 16 bytes) of an [`NsDuration`] encoding contained
229/// non-zero bytes. Future precision-extending readers must accept those
230/// values; today they're rejected at validation so we don't silently
231/// drop precision through a round-trip.
232#[derive(Debug, Clone, Copy, PartialEq, Eq)]
233pub struct ReservedBitsNonZero;
234
235impl std::fmt::Display for ReservedBitsNonZero {
236    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
237        write!(
238            f,
239            "NsDuration reserved bits (bytes 16..32) are non-zero — \
240             this value carries sub-nanosecond precision that the \
241             current reader does not understand"
242        )
243    }
244}
245
246impl Encodes<i128> for NsDuration
247{
248    type Output = Inline<NsDuration>;
249    fn encode(source: i128) -> Inline<NsDuration> {
250        let mut raw = [0u8; 32];
251        raw[0..16].copy_from_slice(&i128_to_ordered_be(source));
252        Inline::new(raw)
253    }
254}
255
256impl TryFromInline<'_, NsDuration> for i128 {
257    type Error = ReservedBitsNonZero;
258
259    fn try_from_inline(v: &Inline<NsDuration>) -> Result<Self, Self::Error> {
260        if v.raw[16..32] != [0u8; 16] {
261            return Err(ReservedBitsNonZero);
262        }
263        Ok(i128_from_ordered_be(v.raw[0..16].try_into().unwrap()))
264    }
265}
266
267impl Encodes<Duration> for NsDuration
268{
269    type Output = Inline<NsDuration>;
270    fn encode(source: Duration) -> Inline<NsDuration> {
271        source.total_nanoseconds().to_inline()
272    }
273}
274
275impl TryFromInline<'_, NsDuration> for Duration {
276    type Error = ReservedBitsNonZero;
277
278    fn try_from_inline(v: &Inline<NsDuration>) -> Result<Self, Self::Error> {
279        let ns: i128 = v.try_from_inline()?;
280        Ok(Duration::from_total_nanoseconds(ns))
281    }
282}
283
284impl std::fmt::Display for InvertedIntervalError {
285    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
286        write!(
287            f,
288            "inverted interval: lower {} > upper {}",
289            self.lower, self.upper
290        )
291    }
292}
293
294#[cfg(test)]
295mod tests {
296    use super::*;
297
298    #[test]
299    fn hifitime_conversion() {
300        let epoch = Epoch::from_tai_duration(Duration::from_total_nanoseconds(0));
301        let time_in: (Epoch, Epoch) = (epoch, epoch);
302        let interval: Inline<NsTAIInterval> = time_in.try_to_inline().unwrap();
303        let time_out: (Epoch, Epoch) = interval.try_from_inline().unwrap();
304
305        assert_eq!(time_in, time_out);
306    }
307
308    #[test]
309    fn projection_types() {
310        let lower_ns: i128 = 1_000_000_000;
311        let upper_ns: i128 = 3_000_000_000;
312        let lower = Epoch::from_tai_duration(Duration::from_total_nanoseconds(lower_ns));
313        let upper = Epoch::from_tai_duration(Duration::from_total_nanoseconds(upper_ns));
314        let interval: Inline<NsTAIInterval> = (lower, upper).try_to_inline().unwrap();
315
316        let l: Lower = interval.from_inline();
317        let u: Upper = interval.from_inline();
318        let m: Midpoint = interval.try_from_inline().unwrap();
319        let w: Width = interval.try_from_inline().unwrap();
320
321        assert_eq!(l.0, lower_ns);
322        assert_eq!(u.0, upper_ns);
323        assert_eq!(m.0, 2_000_000_000); // midpoint
324        assert_eq!(w.0, 2_000_000_000); // width
325        assert!(l < Lower(upper_ns)); // Ord works
326    }
327
328    #[test]
329    fn try_to_value_rejects_inverted() {
330        let lower = Epoch::from_tai_duration(Duration::from_total_nanoseconds(2_000_000_000));
331        let upper = Epoch::from_tai_duration(Duration::from_total_nanoseconds(1_000_000_000));
332        let result: Result<Inline<NsTAIInterval>, _> = (lower, upper).try_to_inline();
333        assert!(result.is_err());
334    }
335
336    #[test]
337    fn validate_accepts_equal() {
338        let t = Epoch::from_tai_duration(Duration::from_total_nanoseconds(1_000_000_000));
339        let interval: Inline<NsTAIInterval> = (t, t).try_to_inline().unwrap();
340        assert!(NsTAIInterval::validate(interval).is_ok());
341    }
342
343    #[test]
344    fn nanosecond_conversion() {
345        let lower_ns: i128 = 1_000_000_000;
346        let upper_ns: i128 = 2_000_000_000;
347        let lower = Epoch::from_tai_duration(Duration::from_total_nanoseconds(lower_ns));
348        let upper = Epoch::from_tai_duration(Duration::from_total_nanoseconds(upper_ns));
349        let interval: Inline<NsTAIInterval> = (lower, upper).try_to_inline().unwrap();
350
351        let (out_lower, out_upper): (i128, i128) = interval.try_from_inline().unwrap();
352        assert_eq!(out_lower, lower_ns);
353        assert_eq!(out_upper, upper_ns);
354    }
355
356    #[test]
357    fn byte_order_matches_numeric_order() {
358        // Order-preserving BE: byte order = i128 numeric order.
359        let times = [
360            i128::MIN,
361            -1_000_000_000,
362            -1,
363            0,
364            1,
365            1_000_000_000,
366            i128::MAX,
367        ];
368        for pair in times.windows(2) {
369            let a = i128_to_ordered_be(pair[0]);
370            let b = i128_to_ordered_be(pair[1]);
371            assert!(a < b, "{} should sort before {} in bytes", pair[0], pair[1]);
372        }
373    }
374
375    #[test]
376    fn roundtrip_edge_cases() {
377        for v in [i128::MIN, -1, 0, 1, i128::MAX] {
378            assert_eq!(i128_from_ordered_be(i128_to_ordered_be(v)), v);
379        }
380    }
381
382    #[test]
383    fn ns_duration_roundtrip_i128() {
384        for ns in [
385            i128::MIN,
386            -1_000_000_000_000,
387            -1,
388            0,
389            1,
390            42,
391            1_000_000_000,
392            i128::MAX,
393        ] {
394            let v: Inline<NsDuration> = ns.to_inline();
395            // Reserved lower 16 bytes are zero today.
396            assert_eq!(v.raw[16..32], [0u8; 16], "lower bits must be reserved=0");
397            let back: i128 = v.try_from_inline().unwrap();
398            assert_eq!(ns, back);
399        }
400    }
401
402    #[test]
403    fn ns_duration_byte_order_matches_numeric_order() {
404        // Sorting by byte-lex on the upper 16 bytes must match numeric order.
405        let mut values: Vec<(i128, Inline<NsDuration>)> = vec![
406            i128::MIN,
407            -1_000_000_000,
408            -1,
409            0,
410            1,
411            1_000_000_000,
412            i128::MAX,
413        ]
414        .into_iter()
415        .map(|n| (n, n.to_inline()))
416        .collect();
417        values.sort_by(|a, b| a.1.raw.cmp(&b.1.raw));
418        let sorted_ns: Vec<i128> = values.iter().map(|(n, _)| *n).collect();
419        let mut expected = sorted_ns.clone();
420        expected.sort();
421        assert_eq!(sorted_ns, expected);
422    }
423
424    #[test]
425    fn ns_duration_hifitime_duration_roundtrips() {
426        let d_in = Duration::from_total_nanoseconds(1_234_567_890_123);
427        let v: Inline<NsDuration> = d_in.to_inline();
428        let d_out: Duration = v.try_from_inline().unwrap();
429        assert_eq!(d_in.total_nanoseconds(), d_out.total_nanoseconds());
430    }
431
432    #[test]
433    fn ns_duration_validate_rejects_dirty_reserved_bits() {
434        let mut raw = [0u8; 32];
435        raw[0..16].copy_from_slice(&i128_to_ordered_be(0));
436        raw[20] = 1; // dirty reserved byte
437        let v: Inline<NsDuration> = Inline::new(raw);
438        assert!(NsDuration::validate(v).is_err());
439    }
440}