perovskite_core/
util.rs

1use std::{
2    fmt::Debug,
3    sync::{atomic::AtomicUsize, Arc},
4    time::Instant,
5};
6
7use rand::Rng;
8// TODO: Conditionally replace with a ZST based on a feature flag
9
10static TRACE_RATE_DENOMINATOR: AtomicUsize = AtomicUsize::new(1);
11
12pub fn set_trace_rate_denominator(val: usize) {
13    TRACE_RATE_DENOMINATOR.store(val, std::sync::atomic::Ordering::Relaxed);
14}
15
16struct TraceBufferInner {
17    created: Instant,
18    buf: std::sync::mpsc::SyncSender<(Instant, &'static str)>,
19    buf_recv: std::sync::Mutex<std::sync::mpsc::Receiver<(Instant, &'static str)>>,
20}
21
22pub struct TraceBuffer {
23    inner: Option<Arc<TraceBufferInner>>,
24}
25impl Clone for TraceBuffer {
26    fn clone(&self) -> Self {
27        self.log("Cloning trace buffer");
28        Self {
29            inner: self.inner.clone(),
30        }
31    }
32}
33impl Debug for TraceBuffer {
34    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
35        if self.inner.is_some() {
36            f.write_str("TraceBuffer")
37        } else {
38            f.write_str("Empty TraceBuffer")
39        }
40    }
41}
42
43impl TraceBuffer {
44    pub fn new(force_print: bool) -> TraceBuffer {
45        if force_print
46            || rand::thread_rng().gen_bool(
47                1.0 / TRACE_RATE_DENOMINATOR.load(std::sync::atomic::Ordering::Relaxed) as f64,
48            )
49        {
50            Self::new_filled()
51        } else {
52            Self::empty()
53        }
54    }
55    pub fn log(&self, msg: &'static str) {
56        if let Some(inner) = self.inner.as_ref() {
57            let _ = inner.buf.try_send((Instant::now(), msg));
58        }
59    }
60    fn new_filled() -> TraceBuffer {
61        let (tx, rx) = std::sync::mpsc::sync_channel(4096);
62        let inner = TraceBufferInner {
63            created: Instant::now(),
64            buf: tx,
65            buf_recv: std::sync::Mutex::new(rx),
66        };
67        TraceBuffer {
68            inner: Some(Arc::new(inner)),
69        }
70    }
71    pub fn empty() -> TraceBuffer {
72        TraceBuffer { inner: None }
73    }
74}
75impl Drop for TraceBufferInner {
76    fn drop(&mut self) {
77        println!("+-----TRACE-----");
78        let mut prev_nanos = 0;
79        for (i, (when, msg)) in self.buf_recv.lock().unwrap().try_iter().enumerate() {
80            let nanos = (when - self.created).as_nanos();
81            let diff = nanos - prev_nanos;
82            prev_nanos = nanos;
83            println!("| {: >4} {: >12} (+{: >12}): {}", i, nanos, diff, msg);
84        }
85    }
86}
87
88pub trait TraceLog {
89    fn log(&self, msg: &'static str);
90}
91
92pub trait LogInspect: Sized {
93    fn trace_point(self, tracer: &TraceBuffer, message: &'static str) -> Self {
94        tracer.log(message);
95        self
96    }
97}
98impl<T> LogInspect for T {}
99
100/// An atomic variation of std::time::Instant, able to count
101/// about 584 years from when it is constructed with new.
102/// It cannot represent times before when it was constructed.
103///
104/// TODO: On machines that do not support 64-bit atomics,
105/// provide a fallback that uses a mutex instead.
106///
107/// Note: This is tailored for some specific uses in Perovskite
108/// (both client and server, hence in the core crate), and isn't
109/// intended to be used by general outside usages.
110pub struct AtomicInstant {
111    initial: std::time::Instant,
112    offset: std::sync::atomic::AtomicU64,
113}
114impl AtomicInstant {
115    pub fn new() -> AtomicInstant {
116        AtomicInstant {
117            initial: std::time::Instant::now(),
118            offset: std::sync::atomic::AtomicU64::new(0),
119        }
120    }
121    pub fn get_acquire(&self) -> std::time::Instant {
122        let offset = self.offset.load(std::sync::atomic::Ordering::Acquire);
123        self.initial + std::time::Duration::from_nanos(offset)
124    }
125    pub fn update_now_release(&self) {
126        self.update_to_release(std::time::Instant::now());
127    }
128    pub fn get_relaxed(&self) -> std::time::Instant {
129        let offset = self.offset.load(std::sync::atomic::Ordering::Relaxed);
130        self.initial + std::time::Duration::from_nanos(offset)
131    }
132    pub fn update_now_relaxed(&self) {
133        self.update_to_relaxed(std::time::Instant::now());
134    }
135    pub fn update_to_release(&self, when: std::time::Instant) {
136        if when < self.initial {
137            panic!(
138                "Attempted to set an instant ({:?}) before AtomicInstant was constructed ({:?})",
139                when, self.initial
140            );
141        }
142        let offset = when
143            .duration_since(self.initial)
144            .as_nanos()
145            .try_into()
146            .unwrap();
147        self.offset
148            .store(offset, std::sync::atomic::Ordering::Release);
149    }
150    pub fn update_to_relaxed(&self, when: std::time::Instant) {
151        if when < self.initial {
152            panic!(
153                "Attempted to set an instant ({:?}) before AtomicInstant was constructed ({:?})",
154                when, self.initial
155            );
156        }
157        let offset = when
158            .duration_since(self.initial)
159            .as_nanos()
160            .try_into()
161            .unwrap();
162        self.offset
163            .store(offset, std::sync::atomic::Ordering::Relaxed);
164    }
165}