1use crate::{CalibratedClock, Clock};
2use std::time::Instant;
3
4#[derive(Copy, Clone, PartialEq, Eq, Hash)]
5#[repr(transparent)]
6pub struct TscInstant(i64);
7
8impl PartialOrd for TscInstant {
9 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
10 Some(self.0.cmp(&other.0))
11 }
12}
13
14impl Ord for TscInstant {
15 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
16 (self.0).wrapping_sub(other.0).cmp(&0)
17 }
18}
19
20#[derive(Copy, Clone, Eq, PartialEq, Hash)]
21pub struct Tsc(());
22
23impl Clock for Tsc {
24 type Instant = TscInstant;
25 #[inline(always)]
26 fn now(self) -> Self::Instant {
27 TscInstant(unsafe { core::arch::x86_64::_rdtsc() } as i64)
28 }
29}
30
31#[derive(Copy, Clone)]
32pub struct CalibratedTsc {
33 ns_per_cycle: f64,
34 tsc: Tsc,
35}
36
37#[derive(Debug)]
38#[non_exhaustive]
39pub struct TscUnavailable;
40
41impl core::fmt::Display for TscUnavailable {
42 fn fmt(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result {
43 formatter.write_str("No stable TSC available")
44 }
45}
46
47impl Tsc {
48 pub fn try_new_assume_stable() -> Result<Self, TscUnavailable> {
49 let edx = unsafe { core::arch::x86_64::__cpuid(1).edx };
50 if (edx & (1 << 4)) != 0 {
51 Ok(Tsc(()))
52 } else {
53 Err(TscUnavailable)
54 }
55 }
56
57 #[cfg(target_os = "linux")]
58 pub fn try_new_linux_sys() -> Result<Self, TscUnavailable> {
59 let stable_tsc_detected = std::fs::read_to_string(
60 "/sys/devices/system/clocksource/clocksource0/available_clocksource",
61 )
62 .is_ok_and(|x| x.contains("tsc"));
63 if stable_tsc_detected {
64 Ok(Tsc(()))
65 } else {
66 Err(TscUnavailable)
67 }
68 }
69
70 pub fn calibrate(self) -> CalibratedTsc {
71 let mut old_cycles = 0.0;
72 loop {
73 let t1 = Instant::now();
74 let tsc1 = self.now();
75 let mut t2;
76 let mut tsc2;
77 let cycles_per_ns = loop {
78 t2 = Instant::now();
79 tsc2 = self.now();
80 let elapsed_nanos = (t2 - t1).as_nanos();
81 let elapsed_cycles = tsc2.0.wrapping_sub(tsc1.0);
82 if elapsed_nanos > 10_000_000 && elapsed_cycles > 0 {
83 break elapsed_cycles as f64 / elapsed_nanos as f64;
84 }
85 };
86 let delta = f64::abs(cycles_per_ns - old_cycles);
87 if delta / cycles_per_ns < 0.00001 {
88 let ns_per_cycle = cycles_per_ns.recip();
89 debug_assert!(ns_per_cycle > 0.0);
90 return CalibratedTsc {
91 ns_per_cycle,
92 tsc: self,
93 };
94 } else {
95 old_cycles = cycles_per_ns;
96 }
97 }
98 }
99}
100
101impl From<CalibratedTsc> for Tsc {
102 fn from(value: CalibratedTsc) -> Self {
103 value.tsc
104 }
105}
106
107impl CalibratedClock for CalibratedTsc {
108 fn between_u64_ns(self, later: Self::Instant, earlier: Self::Instant) -> u64 {
109 let d = later.0.wrapping_sub(earlier.0);
110 debug_assert!(d >= 0);
111 (d as f64 * self.ns_per_cycle).round() as u64
112 }
113
114 fn add_u64_ns(self, base: Self::Instant, offset: u64) -> Self::Instant {
115 let offset = offset as f64 / self.ns_per_cycle;
116 TscInstant(base.0.wrapping_add(offset as i64))
117 }
118
119 fn sub_u64_ns(self, base: Self::Instant, offset: u64) -> Self::Instant {
120 let offset = offset as f64 / self.ns_per_cycle;
121 TscInstant(base.0.wrapping_sub(offset as i64))
122 }
123}
124
125impl Clock for CalibratedTsc {
126 type Instant = TscInstant;
127 fn now(self) -> Self::Instant {
128 self.tsc.now()
129 }
130}