1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
use measureme::TimingGuard;
use std::{
error::Error,
path::PathBuf,
sync::atomic::{AtomicBool, Ordering},
thread::ThreadId,
};
#[cfg(not(windows))]
type Sink = measureme::MmapSerializationSink;
#[cfg(windows)]
type Sink = measureme::FileSerializationSink;
pub struct Guard<'guard> {
_inner: Option<TimingGuard<'guard, Sink>>,
}
pub struct Profiler {
profiler: measureme::Profiler<Sink>,
enabled: AtomicBool,
}
impl Profiler {
pub fn from_path(path: impl Into<PathBuf>) -> Result<Self, Box<dyn Error>> {
let path = path.into();
match path.parent() {
Some(parent) if !path.exists() => {
std::fs::create_dir_all(parent)?;
}
_ => {}
}
Ok(Self {
profiler: measureme::Profiler::new(path.as_ref())?,
enabled: AtomicBool::default(),
})
}
pub fn from_name(name: impl AsRef<str>) -> Result<Self, Box<dyn Error>> {
let path = format!("./trace/{}-{}", name.as_ref(), std::process::id());
Self::from_path(path)
}
pub fn enable(&self) {
self.enabled.store(true, Ordering::SeqCst);
}
pub fn disable(&self) {
self.enabled.store(false, Ordering::SeqCst);
}
#[allow(clippy::cast_possible_truncation)]
pub fn trace(&self, category: &str, label: &str) -> Guard<'_> {
if !self.enabled.load(Ordering::SeqCst) {
return Guard { _inner: None };
}
let kind = self.profiler.alloc_string(category);
let label = self.profiler.alloc_string(label);
let id = measureme::EventId::from_label(label);
let thread_id = current_thread_id() as u32;
let inner = self
.profiler
.start_recording_interval_event(kind, id, thread_id);
Guard {
_inner: Some(inner),
}
}
}
fn current_thread_id() -> u64 {
let tid = std::thread::current().id();
unsafe { std::mem::transmute::<ThreadId, u64>(tid) }
}