linux_perf_event_reader/perf_event.rs
1use crate::constants::*;
2use crate::types::*;
3use byteorder::{ByteOrder, ReadBytesExt};
4use std::io;
5use std::io::Read;
6use std::num::NonZeroU64;
7
8/// `perf_event_header`
9#[derive(Debug, Clone, Copy)]
10pub struct PerfEventHeader {
11 pub type_: u32,
12 pub misc: u16,
13 pub size: u16,
14}
15
16impl PerfEventHeader {
17 pub const STRUCT_SIZE: usize = 4 + 2 + 2;
18
19 pub fn parse<R: Read, T: ByteOrder>(mut reader: R) -> Result<Self, std::io::Error> {
20 let type_ = reader.read_u32::<T>()?;
21 let misc = reader.read_u16::<T>()?;
22 let size = reader.read_u16::<T>()?;
23 Ok(Self { type_, misc, size })
24 }
25}
26
27/// `perf_event_attr`
28#[derive(Debug, Clone, Copy)]
29pub struct PerfEventAttr {
30 /// The type of the perf event.
31 pub type_: PerfEventType,
32
33 /// The sampling policy.
34 pub sampling_policy: SamplingPolicy,
35
36 /// Specifies values included in sample. (original name `sample_type`)
37 pub sample_format: SampleFormat,
38
39 /// Specifies the structure values returned by read() on a perf event fd,
40 /// see [`ReadFormat`].
41 pub read_format: ReadFormat,
42
43 /// Bitset of flags.
44 pub flags: AttrFlags,
45
46 /// The wake-up policy.
47 pub wakeup_policy: WakeupPolicy,
48
49 /// Branch-sample specific flags.
50 pub branch_sample_format: BranchSampleFormat,
51
52 /// Defines set of user regs to dump on samples.
53 /// See asm/perf_regs.h for details.
54 pub sample_regs_user: u64,
55
56 /// Defines size of the user stack to dump on samples.
57 pub sample_stack_user: u32,
58
59 /// The clock ID.
60 pub clock: PerfClock,
61
62 /// Defines set of regs to dump for each sample
63 /// state captured on:
64 /// - precise = 0: PMU interrupt
65 /// - precise > 0: sampled instruction
66 ///
67 /// See asm/perf_regs.h for details.
68 pub sample_regs_intr: u64,
69
70 /// Wakeup watermark for AUX area
71 pub aux_watermark: u32,
72
73 /// When collecting stacks, this is the maximum number of stack frames
74 /// (user + kernel) to collect.
75 pub sample_max_stack: u16,
76
77 /// When sampling AUX events, this is the size of the AUX sample.
78 pub aux_sample_size: u32,
79
80 /// User provided data if sigtrap=1, passed back to user via
81 /// siginfo_t::si_perf_data, e.g. to permit user to identify the event.
82 /// Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
83 /// truncated accordingly on 32 bit architectures.
84 pub sig_data: u64,
85}
86
87impl PerfEventAttr {
88 /// Parse from a reader. On success, this returns the parsed attribute and
89 /// the number of bytes that were read from the reader. This matches the self-reported
90 /// size in the attribute.
91 pub fn parse<R: Read, T: ByteOrder>(mut reader: R) -> Result<(Self, u64), std::io::Error> {
92 let type_ = reader.read_u32::<T>()?;
93 let size = reader.read_u32::<T>()?;
94 let config = reader.read_u64::<T>()?;
95
96 if size < PERF_ATTR_SIZE_VER0 {
97 return Err(io::ErrorKind::InvalidInput.into());
98 }
99
100 let sampling_period_or_frequency = reader.read_u64::<T>()?;
101 let sample_type = reader.read_u64::<T>()?;
102 let read_format = reader.read_u64::<T>()?;
103 let flags = reader.read_u64::<T>()?;
104 let wakeup_events_or_watermark = reader.read_u32::<T>()?;
105 let bp_type = reader.read_u32::<T>()?;
106 let bp_addr_or_kprobe_func_or_uprobe_func_or_config1 = reader.read_u64::<T>()?;
107
108 let bp_len_or_kprobe_addr_or_probe_offset_or_config2 = if size >= PERF_ATTR_SIZE_VER1 {
109 reader.read_u64::<T>()?
110 } else {
111 0
112 };
113
114 let branch_sample_type = if size >= PERF_ATTR_SIZE_VER2 {
115 reader.read_u64::<T>()?
116 } else {
117 0
118 };
119
120 let (sample_regs_user, sample_stack_user, clockid) = if size >= PERF_ATTR_SIZE_VER3 {
121 let sample_regs_user = reader.read_u64::<T>()?;
122 let sample_stack_user = reader.read_u32::<T>()?;
123 let clockid = reader.read_u32::<T>()?;
124
125 (sample_regs_user, sample_stack_user, clockid)
126 } else {
127 (0, 0, 0)
128 };
129
130 let sample_regs_intr = if size >= PERF_ATTR_SIZE_VER4 {
131 reader.read_u64::<T>()?
132 } else {
133 0
134 };
135
136 let (aux_watermark, sample_max_stack) = if size >= PERF_ATTR_SIZE_VER5 {
137 let aux_watermark = reader.read_u32::<T>()?;
138 let sample_max_stack = reader.read_u16::<T>()?;
139 let __reserved_2 = reader.read_u16::<T>()?;
140 (aux_watermark, sample_max_stack)
141 } else {
142 (0, 0)
143 };
144
145 let aux_sample_size = if size >= PERF_ATTR_SIZE_VER6 {
146 let aux_sample_size = reader.read_u32::<T>()?;
147 let __reserved_3 = reader.read_u32::<T>()?;
148 aux_sample_size
149 } else {
150 0
151 };
152
153 let sig_data = if size >= PERF_ATTR_SIZE_VER7 {
154 reader.read_u64::<T>()?
155 } else {
156 0
157 };
158
159 // Consume any remaining bytes.
160 if size > PERF_ATTR_SIZE_VER7 {
161 let remaining = size - PERF_ATTR_SIZE_VER7;
162 io::copy(&mut reader.by_ref().take(remaining.into()), &mut io::sink())?;
163 }
164
165 let flags = AttrFlags::from_bits_truncate(flags);
166 let type_ = PerfEventType::parse(
167 type_,
168 bp_type,
169 config,
170 bp_addr_or_kprobe_func_or_uprobe_func_or_config1,
171 bp_len_or_kprobe_addr_or_probe_offset_or_config2,
172 )
173 .ok_or(io::ErrorKind::InvalidInput)?;
174
175 // If AttrFlags::FREQ is set in `flags`, this is the sample frequency,
176 // otherwise it is the sample period.
177 //
178 // ```c
179 // union {
180 // /// Period of sampling
181 // __u64 sample_period;
182 // /// Frequency of sampling
183 // __u64 sample_freq;
184 // };
185 // ```
186 let sampling_policy = if flags.contains(AttrFlags::FREQ) {
187 SamplingPolicy::Frequency(sampling_period_or_frequency)
188 } else if let Some(period) = NonZeroU64::new(sampling_period_or_frequency) {
189 SamplingPolicy::Period(period)
190 } else {
191 SamplingPolicy::NoSampling
192 };
193
194 let wakeup_policy = if flags.contains(AttrFlags::WATERMARK) {
195 WakeupPolicy::Watermark(wakeup_events_or_watermark)
196 } else {
197 WakeupPolicy::EventCount(wakeup_events_or_watermark)
198 };
199
200 let clock = if flags.contains(AttrFlags::USE_CLOCKID) {
201 let clockid = ClockId::from_u32(clockid).ok_or(io::ErrorKind::InvalidInput)?;
202 PerfClock::ClockId(clockid)
203 } else {
204 PerfClock::Default
205 };
206
207 let attr = Self {
208 type_,
209 sampling_policy,
210 sample_format: SampleFormat::from_bits_truncate(sample_type),
211 read_format: ReadFormat::from_bits_truncate(read_format),
212 flags,
213 wakeup_policy,
214 branch_sample_format: BranchSampleFormat::from_bits_truncate(branch_sample_type),
215 sample_regs_user,
216 sample_stack_user,
217 clock,
218 sample_regs_intr,
219 aux_watermark,
220 sample_max_stack,
221 aux_sample_size,
222 sig_data,
223 };
224
225 Ok((attr, size.into()))
226 }
227}
228
229/// The type of perf event
230#[derive(Debug, Clone, Copy)]
231pub enum PerfEventType {
232 /// A hardware perf event. (`PERF_TYPE_HARDWARE`)
233 Hardware(HardwareEventId, PmuTypeId),
234 /// A software perf event. (`PERF_TYPE_SOFTWARE`)
235 ///
236 /// Special "software" events provided by the kernel, even if the hardware
237 /// does not support performance events. These events measure various
238 /// physical and sw events of the kernel (and allow the profiling of them as
239 /// well).
240 Software(SoftwareCounterType),
241 /// A tracepoint perf event. (`PERF_TYPE_TRACEPOINT`)
242 Tracepoint(u64),
243 /// A hardware cache perf event. (`PERF_TYPE_HW_CACHE`)
244 ///
245 /// Selects a certain combination of CacheId, CacheOp, CacheOpResult, PMU type ID.
246 ///
247 /// ```plain
248 /// { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
249 /// { read, write, prefetch } x
250 /// { accesses, misses }
251 /// ```
252 HwCache(
253 HardwareCacheId,
254 HardwareCacheOp,
255 HardwareCacheOpResult,
256 PmuTypeId,
257 ),
258 /// A hardware breakpoint perf event. (`PERF_TYPE_BREAKPOINT`)
259 ///
260 /// Breakpoints can be read/write accesses to an address as well as
261 /// execution of an instruction address.
262 Breakpoint(HwBreakpointType, HwBreakpointAddr, HwBreakpointLen),
263 /// Dynamic PMU
264 ///
265 /// `(pmu, config, config1, config2)`
266 ///
267 /// Acceptable values for each of `config`, `config1` and `config2`
268 /// parameters are defined by corresponding entries in
269 /// `/sys/bus/event_source/devices/<pmu>/format/*`.
270 ///
271 /// From the `perf_event_open` man page:
272 /// > Since Linux 2.6.38, perf_event_open() can support multiple PMUs. To
273 /// > enable this, a value exported by the kernel can be used in the type
274 /// > field to indicate which PMU to use. The value to use can be found in
275 /// > the sysfs filesystem: there is a subdirectory per PMU instance under
276 /// > /sys/bus/event_source/devices. In each subdirectory there is a type
277 /// > file whose content is an integer that can be used in the type field.
278 /// > For instance, /sys/bus/event_source/devices/cpu/type contains the
279 /// > value for the core CPU PMU, which is usually 4.
280 ///
281 /// (I don't fully understand this - the value 4 also means `PERF_TYPE_RAW`.
282 /// Maybe the type `Raw` is just one of those dynamic PMUs, usually "core"?)
283 ///
284 /// Among the "dynamic PMU" values, there are two special values for
285 /// kprobes and uprobes:
286 ///
287 /// > kprobe and uprobe (since Linux 4.17)
288 /// > These two dynamic PMUs create a kprobe/uprobe and attach it to the
289 /// > file descriptor generated by perf_event_open. The kprobe/uprobe will
290 /// > be destroyed on the destruction of the file descriptor. See fields
291 /// > kprobe_func, uprobe_path, kprobe_addr, and probe_offset for more details.
292 ///
293 /// ```c
294 /// union {
295 /// __u64 kprobe_func; /* for perf_kprobe */
296 /// __u64 uprobe_path; /* for perf_uprobe */
297 /// __u64 config1; /* extension of config */
298 /// };
299 ///
300 /// union {
301 /// __u64 kprobe_addr; /* when kprobe_func == NULL */
302 /// __u64 probe_offset; /* for perf_[k,u]probe */
303 /// __u64 config2; /* extension of config1 */
304 /// };
305 /// ```
306 DynamicPmu(u32, u64, u64, u64),
307}
308
309/// PMU type ID
310///
311/// The PMU type ID allows selecting whether to observe only "atom", only "core",
312/// or both. If the PMU type ID is zero, both "atom" and "core" are observed.
313/// To observe just one of them, the PMU type ID needs to be set to the value of
314/// `/sys/devices/cpu_atom/type` or of `/sys/devices/cpu_core/type`.
315#[derive(Debug, Clone, Copy)]
316pub struct PmuTypeId(pub u32);
317
318/// The address of the breakpoint.
319///
320/// For execution breakpoints, this is the memory address of the instruction
321/// of interest; for read and write breakpoints, it is the memory address of
322/// the memory location of interest.
323#[derive(Debug, Clone, Copy)]
324pub struct HwBreakpointAddr(pub u64);
325
326/// The length of the breakpoint being measured.
327///
328/// Options are `HW_BREAKPOINT_LEN_1`, `HW_BREAKPOINT_LEN_2`,
329/// `HW_BREAKPOINT_LEN_4`, and `HW_BREAKPOINT_LEN_8`. For an
330/// execution breakpoint, set this to sizeof(long).
331#[derive(Debug, Clone, Copy)]
332pub struct HwBreakpointLen(pub u64);
333
334impl PerfEventType {
335 pub fn parse(
336 type_: u32,
337 bp_type: u32,
338 config: u64,
339 config1: u64,
340 config2: u64,
341 ) -> Option<Self> {
342 let t = match type_ {
343 PERF_TYPE_HARDWARE => {
344 // Config format: 0xEEEEEEEE000000AA
345 //
346 // - AA: hardware event ID
347 // - EEEEEEEE: PMU type ID
348 let hardware_event_id = (config & 0xff) as u8;
349 let pmu_type = PmuTypeId((config >> 32) as u32);
350 Self::Hardware(HardwareEventId::parse(hardware_event_id)?, pmu_type)
351 }
352 PERF_TYPE_SOFTWARE => Self::Software(SoftwareCounterType::parse(config)?),
353 PERF_TYPE_TRACEPOINT => Self::Tracepoint(config),
354 PERF_TYPE_HW_CACHE => {
355 // Config format: 0xEEEEEEEE00DDCCBB
356 //
357 // - BB: hardware cache ID
358 // - CC: hardware cache op ID
359 // - DD: hardware cache op result ID
360 // - EEEEEEEE: PMU type ID
361 let cache_id = config as u8;
362 let cache_op_id = (config >> 8) as u8;
363 let cache_op_result = (config >> 16) as u8;
364 let pmu_type = PmuTypeId((config >> 32) as u32);
365 Self::HwCache(
366 HardwareCacheId::parse(cache_id)?,
367 HardwareCacheOp::parse(cache_op_id)?,
368 HardwareCacheOpResult::parse(cache_op_result)?,
369 pmu_type,
370 )
371 }
372 PERF_TYPE_BREAKPOINT => {
373 let bp_type = HwBreakpointType::from_bits_truncate(bp_type);
374 Self::Breakpoint(bp_type, HwBreakpointAddr(config1), HwBreakpointLen(config2))
375 }
376 _ => Self::DynamicPmu(type_, config, config1, config2),
377 // PERF_TYPE_RAW is handled as part of DynamicPmu.
378 };
379 Some(t)
380 }
381}
382
383#[derive(Debug, Clone, Copy)]
384#[non_exhaustive]
385pub enum HardwareEventId {
386 /// `PERF_COUNT_HW_CPU_CYCLES`
387 CpuCycles,
388 /// `PERF_COUNT_HW_INSTRUCTIONS`
389 Instructions,
390 /// `PERF_COUNT_HW_CACHE_REFERENCES`
391 CacheReferences,
392 /// `PERF_COUNT_HW_CACHE_MISSES`
393 CacheMisses,
394 /// `PERF_COUNT_HW_BRANCH_INSTRUCTIONS`
395 BranchInstructions,
396 /// `PERF_COUNT_HW_BRANCH_MISSES`
397 BranchMisses,
398 /// `PERF_COUNT_HW_BUS_CYCLES`
399 BusCycles,
400 /// `PERF_COUNT_HW_STALLED_CYCLES_FRONTEND`
401 StalledCyclesFrontend,
402 /// `PERF_COUNT_HW_STALLED_CYCLES_BACKEND`
403 StalledCyclesBackend,
404 /// `PERF_COUNT_HW_REF_CPU_CYCLES`
405 RefCpuCycles,
406}
407
408impl HardwareEventId {
409 pub fn parse(hardware_event_id: u8) -> Option<Self> {
410 let t = match hardware_event_id {
411 PERF_COUNT_HW_CPU_CYCLES => Self::CpuCycles,
412 PERF_COUNT_HW_INSTRUCTIONS => Self::Instructions,
413 PERF_COUNT_HW_CACHE_REFERENCES => Self::CacheReferences,
414 PERF_COUNT_HW_CACHE_MISSES => Self::CacheMisses,
415 PERF_COUNT_HW_BRANCH_INSTRUCTIONS => Self::BranchInstructions,
416 PERF_COUNT_HW_BRANCH_MISSES => Self::BranchMisses,
417 PERF_COUNT_HW_BUS_CYCLES => Self::BusCycles,
418 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND => Self::StalledCyclesFrontend,
419 PERF_COUNT_HW_STALLED_CYCLES_BACKEND => Self::StalledCyclesBackend,
420 PERF_COUNT_HW_REF_CPU_CYCLES => Self::RefCpuCycles,
421 _ => return None,
422 };
423 Some(t)
424 }
425}
426
427#[derive(Debug, Clone, Copy)]
428#[non_exhaustive]
429pub enum SoftwareCounterType {
430 /// `PERF_COUNT_SW_CPU_CLOCK`
431 CpuClock,
432 /// `PERF_COUNT_SW_TASK_CLOCK`
433 TaskClock,
434 /// `PERF_COUNT_SW_PAGE_FAULTS`
435 PageFaults,
436 /// `PERF_COUNT_SW_CONTEXT_SWITCHES`
437 ContextSwitches,
438 /// `PERF_COUNT_SW_CPU_MIGRATIONS`
439 CpuMigrations,
440 /// `PERF_COUNT_SW_PAGE_FAULTS_MIN`
441 PageFaultsMin,
442 /// `PERF_COUNT_SW_PAGE_FAULTS_MAJ`
443 PageFaultsMaj,
444 /// `PERF_COUNT_SW_ALIGNMENT_FAULTS`
445 AlignmentFaults,
446 /// `PERF_COUNT_SW_EMULATION_FAULTS`
447 EmulationFaults,
448 /// `PERF_COUNT_SW_DUMMY`
449 Dummy,
450 /// `PERF_COUNT_SW_BPF_OUTPUT`
451 BpfOutput,
452 /// `PERF_COUNT_SW_CGROUP_SWITCHES`
453 CgroupSwitches,
454}
455
456impl SoftwareCounterType {
457 pub fn parse(config: u64) -> Option<Self> {
458 let t = match config {
459 PERF_COUNT_SW_CPU_CLOCK => Self::CpuClock,
460 PERF_COUNT_SW_TASK_CLOCK => Self::TaskClock,
461 PERF_COUNT_SW_PAGE_FAULTS => Self::PageFaults,
462 PERF_COUNT_SW_CONTEXT_SWITCHES => Self::ContextSwitches,
463 PERF_COUNT_SW_CPU_MIGRATIONS => Self::CpuMigrations,
464 PERF_COUNT_SW_PAGE_FAULTS_MIN => Self::PageFaultsMin,
465 PERF_COUNT_SW_PAGE_FAULTS_MAJ => Self::PageFaultsMaj,
466 PERF_COUNT_SW_ALIGNMENT_FAULTS => Self::AlignmentFaults,
467 PERF_COUNT_SW_EMULATION_FAULTS => Self::EmulationFaults,
468 PERF_COUNT_SW_DUMMY => Self::Dummy,
469 PERF_COUNT_SW_BPF_OUTPUT => Self::BpfOutput,
470 PERF_COUNT_SW_CGROUP_SWITCHES => Self::CgroupSwitches,
471 _ => return None,
472 };
473 Some(t)
474 }
475}
476
477#[derive(Debug, Clone, Copy)]
478#[non_exhaustive]
479pub enum HardwareCacheId {
480 /// `PERF_COUNT_HW_CACHE_L1D`
481 L1d,
482 /// `PERF_COUNT_HW_CACHE_L1I`
483 L1i,
484 /// `PERF_COUNT_HW_CACHE_LL`
485 Ll,
486 /// `PERF_COUNT_HW_CACHE_DTLB`
487 Dtlb,
488 /// `PERF_COUNT_HW_CACHE_ITLB`
489 Itlb,
490 /// `PERF_COUNT_HW_CACHE_BPU`
491 Bpu,
492 /// `PERF_COUNT_HW_CACHE_NODE`
493 Node,
494}
495
496impl HardwareCacheId {
497 pub fn parse(cache_id: u8) -> Option<Self> {
498 let rv = match cache_id {
499 PERF_COUNT_HW_CACHE_L1D => Self::L1d,
500 PERF_COUNT_HW_CACHE_L1I => Self::L1i,
501 PERF_COUNT_HW_CACHE_LL => Self::Ll,
502 PERF_COUNT_HW_CACHE_DTLB => Self::Dtlb,
503 PERF_COUNT_HW_CACHE_ITLB => Self::Itlb,
504 PERF_COUNT_HW_CACHE_BPU => Self::Bpu,
505 PERF_COUNT_HW_CACHE_NODE => Self::Node,
506 _ => return None,
507 };
508 Some(rv)
509 }
510}
511
512#[derive(Debug, Clone, Copy)]
513pub enum HardwareCacheOp {
514 /// `PERF_COUNT_HW_CACHE_OP_READ`
515 Read,
516 /// `PERF_COUNT_HW_CACHE_OP_WRITE`
517 Write,
518 /// `PERF_COUNT_HW_CACHE_OP_PREFETCH`
519 Prefetch,
520}
521
522impl HardwareCacheOp {
523 pub fn parse(cache_op: u8) -> Option<Self> {
524 match cache_op {
525 PERF_COUNT_HW_CACHE_OP_READ => Some(Self::Read),
526 PERF_COUNT_HW_CACHE_OP_WRITE => Some(Self::Write),
527 PERF_COUNT_HW_CACHE_OP_PREFETCH => Some(Self::Prefetch),
528 _ => None,
529 }
530 }
531}
532
533#[derive(Debug, Clone, Copy)]
534pub enum HardwareCacheOpResult {
535 /// `PERF_COUNT_HW_CACHE_RESULT_ACCESS`
536 Access,
537 /// `PERF_COUNT_HW_CACHE_RESULT_MISS`
538 Miss,
539}
540
541impl HardwareCacheOpResult {
542 pub fn parse(cache_op_result: u8) -> Option<Self> {
543 match cache_op_result {
544 PERF_COUNT_HW_CACHE_RESULT_ACCESS => Some(Self::Access),
545 PERF_COUNT_HW_CACHE_RESULT_MISS => Some(Self::Miss),
546 _ => None,
547 }
548 }
549}
550
551/// Sampling Policy
552///
553/// > Events can be set to notify when a threshold is crossed,
554/// > indicating an overflow. [...]
555/// >
556/// > Overflows are generated only by sampling events (sample_period
557/// > must have a nonzero value).
558#[derive(Debug, Clone, Copy)]
559pub enum SamplingPolicy {
560 /// `NoSampling` means that the event is a count and not a sampling event.
561 NoSampling,
562 /// Sets a fixed sampling period for a sampling event, in the unit of the
563 /// observed count / event.
564 ///
565 /// A "sampling" event is one that generates an overflow notification every
566 /// N events, where N is given by the sampling period. A sampling event has
567 /// a sampling period greater than zero.
568 ///
569 /// When an overflow occurs, requested data is recorded in the mmap buffer.
570 /// The `SampleFormat` bitfield controls what data is recorded on each overflow.
571 Period(NonZeroU64),
572 /// Sets a frequency for a sampling event, in "samples per (wall-clock) second".
573 ///
574 /// This uses a dynamic period which is adjusted by the kernel to hit the
575 /// desired frequency. The rate of adjustment is a timer tick.
576 ///
577 /// If `SampleFormat::PERIOD` is requested, the current period at the time of
578 /// the sample is stored in the sample.
579 Frequency(u64),
580}
581
582/// Wakeup policy for "overflow notifications". This controls the point at
583/// which the `read` call completes. (TODO: double check this)
584///
585/// > There are two ways to generate overflow notifications.
586/// >
587/// > The first is to set a `WakeupPolicy`
588/// > that will trigger if a certain number of samples or bytes have
589/// > been written to the mmap ring buffer.
590/// >
591/// > The other way is by use of the PERF_EVENT_IOC_REFRESH ioctl.
592/// > This ioctl adds to a counter that decrements each time the event
593/// > overflows. When nonzero, POLLIN is indicated, but once the
594/// > counter reaches 0 POLLHUP is indicated and the underlying event
595/// > is disabled.
596#[derive(Debug, Clone, Copy)]
597pub enum WakeupPolicy {
598 /// Wake up every time N records of type `RecordType::SAMPLE` have been
599 /// written to the mmap ring buffer.
600 EventCount(u32),
601 /// Wake up after N bytes of any record type have been written to the mmap
602 /// ring buffer.
603 ///
604 /// To receive a wakeup after every single record, choose `Watermark(1)`.
605 /// `Watermark(0)` is treated the same as `Watermark(1)`.
606 Watermark(u32),
607}
608
609/// This allows selecting which internal Linux clock to use when generating
610/// timestamps.
611///
612/// Setting a specific ClockId can make it easier to correlate perf sample
613/// times with timestamps generated by other tools. For example, when sampling
614/// applications which emit JITDUMP information, you'll usually select the
615/// moonotonic clock. This makes it possible to correctly order perf event
616/// records and JITDUMP records - those also usually use the monotonic clock.
617#[derive(Debug, Clone, Copy)]
618pub enum PerfClock {
619 /// The default clock. If this is used, the timestamps in event records
620 /// are obtained with `local_clock()` which is a hardware timestamp if
621 /// available and the jiffies value if not.
622 ///
623 /// In practice, on x86_64 this seems to use ktime_get_ns() which is the
624 /// number of nanoseconds since boot.
625 Default,
626
627 /// A specific clock.
628 ClockId(ClockId),
629}