linux_perf_event_reader/constants.rs
1// Constants from perf_event.h
2
3/// A hardware perf event.
4///
5/// Config format: 0xEEEEEEEE000000AA
6///
7/// - AA: hardware event ID
8/// - EEEEEEEE: PMU type ID
9pub const PERF_TYPE_HARDWARE: u32 = 0;
10
11/// A software perf event.
12///
13/// Special "software" events provided by the kernel, even if the hardware
14/// does not support performance events. These events measure various
15/// physical and sw events of the kernel (and allow the profiling of them as
16/// well).
17pub const PERF_TYPE_SOFTWARE: u32 = 1;
18
19/// A tracepoint perf event.
20pub const PERF_TYPE_TRACEPOINT: u32 = 2;
21
22/// A hardware cache perf event.
23///
24/// The corresponding `attr.config` chooses the cache, the cache op,
25/// and the cache op result.
26///
27/// Config format: 0xEEEEEEEE00DDCCBB
28///
29/// - BB: hardware cache ID
30/// - CC: hardware cache op ID
31/// - DD: hardware cache op result ID
32/// - EEEEEEEE: PMU type ID
33///
34/// ```plain
35/// { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
36/// { read, write, prefetch } x
37/// { accesses, misses }
38/// ```
39pub const PERF_TYPE_HW_CACHE: u32 = 3;
40
41/// A raw perf event.
42pub const PERF_TYPE_RAW: u32 = 4;
43
44/// A breakpoint perf event.
45pub const PERF_TYPE_BREAKPOINT: u32 = 5;
46
47pub const PERF_COUNT_HW_CPU_CYCLES: u8 = 0;
48pub const PERF_COUNT_HW_INSTRUCTIONS: u8 = 1;
49pub const PERF_COUNT_HW_CACHE_REFERENCES: u8 = 2;
50pub const PERF_COUNT_HW_CACHE_MISSES: u8 = 3;
51pub const PERF_COUNT_HW_BRANCH_INSTRUCTIONS: u8 = 4;
52pub const PERF_COUNT_HW_BRANCH_MISSES: u8 = 5;
53pub const PERF_COUNT_HW_BUS_CYCLES: u8 = 6;
54pub const PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: u8 = 7;
55pub const PERF_COUNT_HW_STALLED_CYCLES_BACKEND: u8 = 8;
56pub const PERF_COUNT_HW_REF_CPU_CYCLES: u8 = 9;
57
58pub const PERF_COUNT_SW_CPU_CLOCK: u64 = 0;
59pub const PERF_COUNT_SW_TASK_CLOCK: u64 = 1;
60pub const PERF_COUNT_SW_PAGE_FAULTS: u64 = 2;
61pub const PERF_COUNT_SW_CONTEXT_SWITCHES: u64 = 3;
62pub const PERF_COUNT_SW_CPU_MIGRATIONS: u64 = 4;
63pub const PERF_COUNT_SW_PAGE_FAULTS_MIN: u64 = 5;
64pub const PERF_COUNT_SW_PAGE_FAULTS_MAJ: u64 = 6;
65pub const PERF_COUNT_SW_ALIGNMENT_FAULTS: u64 = 7;
66pub const PERF_COUNT_SW_EMULATION_FAULTS: u64 = 8;
67pub const PERF_COUNT_SW_DUMMY: u64 = 9;
68pub const PERF_COUNT_SW_BPF_OUTPUT: u64 = 10;
69pub const PERF_COUNT_SW_CGROUP_SWITCHES: u64 = 11;
70
71pub const PERF_COUNT_HW_CACHE_L1D: u8 = 0;
72pub const PERF_COUNT_HW_CACHE_L1I: u8 = 1;
73pub const PERF_COUNT_HW_CACHE_LL: u8 = 2;
74pub const PERF_COUNT_HW_CACHE_DTLB: u8 = 3;
75pub const PERF_COUNT_HW_CACHE_ITLB: u8 = 4;
76pub const PERF_COUNT_HW_CACHE_BPU: u8 = 5;
77pub const PERF_COUNT_HW_CACHE_NODE: u8 = 6;
78
79pub const PERF_COUNT_HW_CACHE_OP_READ: u8 = 0;
80pub const PERF_COUNT_HW_CACHE_OP_WRITE: u8 = 1;
81pub const PERF_COUNT_HW_CACHE_OP_PREFETCH: u8 = 2;
82
83pub const PERF_COUNT_HW_CACHE_RESULT_ACCESS: u8 = 0;
84pub const PERF_COUNT_HW_CACHE_RESULT_MISS: u8 = 1;
85
86pub const HW_BREAKPOINT_EMPTY: u8 = 0;
87pub const HW_BREAKPOINT_R: u8 = 1;
88pub const HW_BREAKPOINT_W: u8 = 2;
89pub const HW_BREAKPOINT_RW: u8 = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
90pub const HW_BREAKPOINT_X: u8 = 4;
91pub const HW_BREAKPOINT_INVALID: u8 = HW_BREAKPOINT_RW | HW_BREAKPOINT_X;
92
93/// sizeof first published struct
94pub const PERF_ATTR_SIZE_VER0: u32 = 64;
95/// add: config2
96pub const PERF_ATTR_SIZE_VER1: u32 = 72;
97/// add: branch_sample_type
98pub const PERF_ATTR_SIZE_VER2: u32 = 80;
99/// add: sample_regs_user, sample_stack_user, clockid
100pub const PERF_ATTR_SIZE_VER3: u32 = 96;
101/// add: sample_regs_intr
102pub const PERF_ATTR_SIZE_VER4: u32 = 104;
103/// add: aux_watermark
104pub const PERF_ATTR_SIZE_VER5: u32 = 112;
105/// add: aux_sample_size
106pub const PERF_ATTR_SIZE_VER6: u32 = 120;
107/// add: sig_data
108pub const PERF_ATTR_SIZE_VER7: u32 = 128;
109
110/// off by default
111pub const ATTR_FLAG_BIT_DISABLED: u64 = 1 << 0;
112/// children inherit it
113pub const ATTR_FLAG_BIT_INHERIT: u64 = 1 << 1;
114/// must always be on PMU
115pub const ATTR_FLAG_BIT_PINNED: u64 = 1 << 2;
116/// only group on PMU
117pub const ATTR_FLAG_BIT_EXCLUSIVE: u64 = 1 << 3;
118/// don't count user
119pub const ATTR_FLAG_BIT_EXCLUDE_USER: u64 = 1 << 4;
120/// don't count kernel
121pub const ATTR_FLAG_BIT_EXCLUDE_KERNEL: u64 = 1 << 5;
122/// don't count hypervisor
123pub const ATTR_FLAG_BIT_EXCLUDE_HV: u64 = 1 << 6;
124/// don't count when idle
125pub const ATTR_FLAG_BIT_EXCLUDE_IDLE: u64 = 1 << 7;
126/// include mmap data
127pub const ATTR_FLAG_BIT_MMAP: u64 = 1 << 8;
128/// include comm data
129pub const ATTR_FLAG_BIT_COMM: u64 = 1 << 9;
130/// use freq, not period
131pub const ATTR_FLAG_BIT_FREQ: u64 = 1 << 10;
132/// per task counts
133pub const ATTR_FLAG_BIT_INHERIT_STAT: u64 = 1 << 11;
134/// next exec enables
135pub const ATTR_FLAG_BIT_ENABLE_ON_EXEC: u64 = 1 << 12;
136/// trace fork/exit
137pub const ATTR_FLAG_BIT_TASK: u64 = 1 << 13;
138/// wakeup_watermark
139pub const ATTR_FLAG_BIT_WATERMARK: u64 = 1 << 14;
140/// skid constraint
141/// Specifies how precise the instruction address should be.
142///
143/// From the perf-list man page:
144///
145/// > 0 - SAMPLE_IP can have arbitrary skid
146/// > 1 - SAMPLE_IP must have constant skid
147/// > 2 - SAMPLE_IP requested to have 0 skid
148/// > 3 - SAMPLE_IP must have 0 skid, or uses randomization to avoid
149/// > sample shadowing effects.
150/// >
151/// > For Intel systems precise event sampling is implemented with PEBS
152/// > which supports up to precise-level 2, and precise level 3 for
153/// > some special cases.
154/// >
155/// > On AMD systems it is implemented using IBS (up to precise-level
156/// > 2). The precise modifier works with event types 0x76 (cpu-cycles,
157/// > CPU clocks not halted) and 0xC1 (micro-ops retired). Both events
158/// > map to IBS execution sampling (IBS op) with the IBS Op Counter
159/// > Control bit (IbsOpCntCtl) set respectively (see AMD64
160/// > Architecture Programmer’s Manual Volume 2: System Programming,
161/// > 13.3 Instruction-Based Sampling). Examples to use IBS:
162/// >
163/// > perf record -a -e cpu-cycles:p ... # use ibs op counting cycles
164/// > perf record -a -e r076:p ... # same as -e cpu-cycles:p
165/// > perf record -a -e r0C1:p ... # use ibs op counting micro-ops
166///
167/// From Brendan Gregg's perf page:
168///
169/// > perf can use precise sampling by adding a :p modifier to the PMC event
170/// > name, eg, "-e instructions:p". The more p's, the more accurate.
171///
172pub const ATTR_FLAG_BITMASK_PRECISE_IP: u64 = (1 << 15) | (1 << 16);
173/// non-exec mmap data
174pub const ATTR_FLAG_BIT_MMAP_DATA: u64 = 1 << 17;
175/// sample_type all events
176pub const ATTR_FLAG_BIT_SAMPLE_ID_ALL: u64 = 1 << 18;
177/// don't count in host
178pub const ATTR_FLAG_BIT_EXCLUDE_HOST: u64 = 1 << 19;
179/// don't count in guest
180pub const ATTR_FLAG_BIT_EXCLUDE_GUEST: u64 = 1 << 20;
181/// exclude kernel callchains
182pub const ATTR_FLAG_BIT_EXCLUDE_CALLCHAIN_KERNEL: u64 = 1 << 21;
183/// exclude user callchains
184pub const ATTR_FLAG_BIT_EXCLUDE_CALLCHAIN_USER: u64 = 1 << 22;
185/// include mmap with inode data
186pub const ATTR_FLAG_BIT_MMAP2: u64 = 1 << 23;
187/// flag comm events that are due to exec
188pub const ATTR_FLAG_BIT_COMM_EXEC: u64 = 1 << 24;
189/// use @clockid for time fields
190pub const ATTR_FLAG_BIT_USE_CLOCKID: u64 = 1 << 25;
191/// context switch data
192pub const ATTR_FLAG_BIT_CONTEXT_SWITCH: u64 = 1 << 26;
193/// Write ring buffer from end to beginning
194pub const ATTR_FLAG_BIT_WRITE_BACKWARD: u64 = 1 << 27;
195/// include namespaces data
196pub const ATTR_FLAG_BIT_NAMESPACES: u64 = 1 << 28;
197/// include ksymbol events
198pub const ATTR_FLAG_BIT_KSYMBOL: u64 = 1 << 29;
199/// include bpf events
200pub const ATTR_FLAG_BIT_BPF_EVENT: u64 = 1 << 30;
201/// generate AUX records instead of events
202pub const ATTR_FLAG_BIT_AUX_OUTPUT: u64 = 1 << 31;
203/// include cgroup events
204pub const ATTR_FLAG_BIT_CGROUP: u64 = 1 << 32;
205/// include text poke events
206pub const ATTR_FLAG_BIT_TEXT_POKE: u64 = 1 << 33;
207/// use build id in mmap2 events
208pub const ATTR_FLAG_BIT_BUILD_ID: u64 = 1 << 34;
209/// children only inherit if cloned with CLONE_THREAD
210pub const ATTR_FLAG_BIT_INHERIT_THREAD: u64 = 1 << 35;
211/// event is removed from task on exec
212pub const ATTR_FLAG_BIT_REMOVE_ON_EXEC: u64 = 1 << 36;
213/// send synchronous SIGTRAP on event
214pub const ATTR_FLAG_BIT_SIGTRAP: u64 = 1 << 37;
215
216/*
217 * If perf_event_attr.sample_id_all is set then all event types will
218 * have the sample_type selected fields related to where/when
219 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
220 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
221 * just after the perf_event_header and the fields already present for
222 * the existing fields, i.e. at the end of the payload. That way a newer
223 * perf.data file will be supported by older perf tools, with these new
224 * optional fields being ignored.
225 *
226 * struct sample_id {
227 * { u32 pid, tid; } && PERF_SAMPLE_TID
228 * { u64 time; } && PERF_SAMPLE_TIME
229 * { u64 id; } && PERF_SAMPLE_ID
230 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
231 * { u32 cpu, res; } && PERF_SAMPLE_CPU
232 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
233 * } && perf_event_attr::sample_id_all
234 *
235 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
236 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
237 * relative to header.size.
238 */
239
240/*
241 * The MMAP events record the PROT_EXEC mappings so that we can
242 * correlate userspace IPs to code. They have the following structure:
243 *
244 * struct {
245 * struct perf_event_header header;
246 *
247 * u32 pid, tid;
248 * u64 addr;
249 * u64 len;
250 * u64 pgoff;
251 * char filename[];
252 * struct sample_id sample_id;
253 * };
254 */
255pub const PERF_RECORD_MMAP: u32 = 1;
256
257/*
258 * struct {
259 * struct perf_event_header header;
260 * u64 id;
261 * u64 lost;
262 * struct sample_id sample_id;
263 * };
264 */
265pub const PERF_RECORD_LOST: u32 = 2;
266
267/*
268 * struct {
269 * struct perf_event_header header;
270 *
271 * u32 pid, tid;
272 * char comm[];
273 * struct sample_id sample_id;
274 * };
275 */
276pub const PERF_RECORD_COMM: u32 = 3;
277
278/*
279 * struct {
280 * struct perf_event_header header;
281 * u32 pid, ppid;
282 * u32 tid, ptid;
283 * u64 time;
284 * struct sample_id sample_id;
285 * };
286 */
287pub const PERF_RECORD_EXIT: u32 = 4;
288
289/*
290 * struct {
291 * struct perf_event_header header;
292 * u64 time;
293 * u64 id;
294 * u64 stream_id;
295 * struct sample_id sample_id;
296 * };
297 */
298pub const PERF_RECORD_THROTTLE: u32 = 5;
299pub const PERF_RECORD_UNTHROTTLE: u32 = 6;
300
301/*
302 * struct {
303 * struct perf_event_header header;
304 * u32 pid, ppid;
305 * u32 tid, ptid;
306 * u64 time;
307 * struct sample_id sample_id;
308 * };
309 */
310pub const PERF_RECORD_FORK: u32 = 7;
311
312/*
313 * struct {
314 * struct perf_event_header header;
315 * u32 pid, tid;
316 *
317 * struct read_format values;
318 * struct sample_id sample_id;
319 * };
320 */
321pub const PERF_RECORD_READ: u32 = 8;
322
323/*
324 * struct {
325 * struct perf_event_header header;
326 *
327 * #
328 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
329 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
330 * # is fixed relative to header.
331 * #
332 *
333 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
334 * { u64 ip; } && PERF_SAMPLE_IP
335 * { u32 pid, tid; } && PERF_SAMPLE_TID
336 * { u64 time; } && PERF_SAMPLE_TIME
337 * { u64 addr; } && PERF_SAMPLE_ADDR
338 * { u64 id; } && PERF_SAMPLE_ID
339 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
340 * { u32 cpu, res; } && PERF_SAMPLE_CPU
341 * { u64 period; } && PERF_SAMPLE_PERIOD
342 *
343 * { struct read_format values; } && PERF_SAMPLE_READ
344 *
345 * #
346 * # The callchain includes both regular addresses, and special "context"
347 * # frames. The context frames are >= PERF_CONTEXT_MAX and annotate the
348 * # subsequent addresses as user / kernel / hypervisor / guest addresses.
349 * #
350 *
351 * { u64 nr,
352 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
353 *
354 * #
355 * # The RAW record below is opaque data wrt the ABI
356 * #
357 * # That is, the ABI doesn't make any promises wrt to
358 * # the stability of its content, it may vary depending
359 * # on event, hardware, kernel version and phase of
360 * # the moon.
361 * #
362 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
363 * #
364 *
365 * { u32 size;
366 * char data[size];}&& PERF_SAMPLE_RAW
367 *
368 * { u64 nr;
369 * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
370 * { u64 from, to, flags } lbr[nr];
371 * } && PERF_SAMPLE_BRANCH_STACK
372 *
373 * { u64 abi; # enum perf_sample_regs_abi
374 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
375 *
376 * { u64 size;
377 * char data[size];
378 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
379 *
380 * { union perf_sample_weight
381 * {
382 * u64 full; && PERF_SAMPLE_WEIGHT
383 * #if defined(__LITTLE_ENDIAN_BITFIELD)
384 * struct {
385 * u32 var1_dw;
386 * u16 var2_w;
387 * u16 var3_w;
388 * } && PERF_SAMPLE_WEIGHT_STRUCT
389 * #elif defined(__BIG_ENDIAN_BITFIELD)
390 * struct {
391 * u16 var3_w;
392 * u16 var2_w;
393 * u32 var1_dw;
394 * } && PERF_SAMPLE_WEIGHT_STRUCT
395 * #endif
396 * }
397 * }
398 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
399 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
400 * { u64 abi; # enum perf_sample_regs_abi
401 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
402 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
403 * { u64 size;
404 * char data[size]; } && PERF_SAMPLE_AUX
405 * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
406 * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
407 * };
408 */
409pub const PERF_RECORD_SAMPLE: u32 = 9;
410
411/*
412 * The MMAP2 records are an augmented version of MMAP, they add
413 * maj, min, ino numbers to be used to uniquely identify each mapping
414 *
415 * struct {
416 * struct perf_event_header header;
417 *
418 * u32 pid, tid;
419 * u64 addr;
420 * u64 len;
421 * u64 pgoff;
422 * union {
423 * struct {
424 * u32 maj;
425 * u32 min;
426 * u64 ino;
427 * u64 ino_generation;
428 * };
429 * struct {
430 * u8 build_id_size;
431 * u8 __reserved_1;
432 * u16 __reserved_2;
433 * u8 build_id[20];
434 * };
435 * };
436 * u32 prot, flags;
437 * char filename[];
438 * struct sample_id sample_id;
439 * };
440 */
441pub const PERF_RECORD_MMAP2: u32 = 10;
442
443/*
444 * Records that new data landed in the AUX buffer part.
445 *
446 * struct {
447 * struct perf_event_header header;
448 *
449 * u64 aux_offset;
450 * u64 aux_size;
451 * u64 flags;
452 * struct sample_id sample_id;
453 * };
454 */
455pub const PERF_RECORD_AUX: u32 = 11;
456
457/*
458 * Indicates that instruction trace has started
459 *
460 * struct {
461 * struct perf_event_header header;
462 * u32 pid;
463 * u32 tid;
464 * struct sample_id sample_id;
465 * };
466 */
467pub const PERF_RECORD_ITRACE_START: u32 = 12;
468
469/*
470 * Records the dropped/lost sample number.
471 *
472 * struct {
473 * struct perf_event_header header;
474 *
475 * u64 lost;
476 * struct sample_id sample_id;
477 * };
478 */
479pub const PERF_RECORD_LOST_SAMPLES: u32 = 13;
480
481/*
482 * Records a context switch in or out (flagged by
483 * PERF_RECORD_MISC_SWITCH_OUT). See also
484 * PERF_RECORD_SWITCH_CPU_WIDE.
485 *
486 * struct {
487 * struct perf_event_header header;
488 * struct sample_id sample_id;
489 * };
490 */
491pub const PERF_RECORD_SWITCH: u32 = 14;
492
493/*
494 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
495 * next_prev_tid that are the next (switching out) or previous
496 * (switching in) pid/tid.
497 *
498 * struct {
499 * struct perf_event_header header;
500 * u32 next_prev_pid;
501 * u32 next_prev_tid;
502 * struct sample_id sample_id;
503 * };
504 */
505pub const PERF_RECORD_SWITCH_CPU_WIDE: u32 = 15;
506
507/*
508 * struct {
509 * struct perf_event_header header;
510 * u32 pid;
511 * u32 tid;
512 * u64 nr_namespaces;
513 * { u64 dev, inode; } [nr_namespaces];
514 * struct sample_id sample_id;
515 * };
516 */
517pub const PERF_RECORD_NAMESPACES: u32 = 16;
518
519/*
520 * Record ksymbol register/unregister events:
521 *
522 * struct {
523 * struct perf_event_header header;
524 * u64 addr;
525 * u32 len;
526 * u16 ksym_type;
527 * u16 flags;
528 * char name[];
529 * struct sample_id sample_id;
530 * };
531 */
532pub const PERF_RECORD_KSYMBOL: u32 = 17;
533
534/*
535 * Record bpf events:
536 * enum perf_bpf_event_type {
537 * PERF_BPF_EVENT_UNKNOWN = 0,
538 * PERF_BPF_EVENT_PROG_LOAD = 1,
539 * PERF_BPF_EVENT_PROG_UNLOAD = 2,
540 * };
541 *
542 * struct {
543 * struct perf_event_header header;
544 * u16 type;
545 * u16 flags;
546 * u32 id;
547 * u8 tag[BPF_TAG_SIZE];
548 * struct sample_id sample_id;
549 * };
550 */
551pub const PERF_RECORD_BPF_EVENT: u32 = 18;
552
553/*
554 * struct {
555 * struct perf_event_header header;
556 * u64 id;
557 * char path[];
558 * struct sample_id sample_id;
559 * };
560 */
561pub const PERF_RECORD_CGROUP: u32 = 19;
562
563/*
564 * Records changes to kernel text i.e. self-modified code. 'old_len' is
565 * the number of old bytes, 'new_len' is the number of new bytes. Either
566 * 'old_len' or 'new_len' may be zero to indicate, for example, the
567 * addition or removal of a trampoline. 'bytes' contains the old bytes
568 * followed immediately by the new bytes.
569 *
570 * struct {
571 * struct perf_event_header header;
572 * u64 addr;
573 * u16 old_len;
574 * u16 new_len;
575 * u8 bytes[];
576 * struct sample_id sample_id;
577 * };
578 */
579pub const PERF_RECORD_TEXT_POKE: u32 = 20;
580
581/*
582 * Data written to the AUX area by hardware due to aux_output, may need
583 * to be matched to the event by an architecture-specific hardware ID.
584 * This records the hardware ID, but requires sample_id to provide the
585 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
586 * records from multiple events.
587 *
588 * struct {
589 * struct perf_event_header header;
590 * u64 hw_id;
591 * struct sample_id sample_id;
592 * };
593 */
594pub const PERF_RECORD_AUX_OUTPUT_HW_ID: u32 = 21;
595
596pub const PERF_RECORD_USER_TYPE_START: u32 = 64;
597
598pub const PERF_SAMPLE_IP: u64 = 1 << 0;
599pub const PERF_SAMPLE_TID: u64 = 1 << 1;
600pub const PERF_SAMPLE_TIME: u64 = 1 << 2;
601pub const PERF_SAMPLE_ADDR: u64 = 1 << 3;
602pub const PERF_SAMPLE_READ: u64 = 1 << 4;
603pub const PERF_SAMPLE_CALLCHAIN: u64 = 1 << 5;
604pub const PERF_SAMPLE_ID: u64 = 1 << 6;
605pub const PERF_SAMPLE_CPU: u64 = 1 << 7;
606pub const PERF_SAMPLE_PERIOD: u64 = 1 << 8;
607pub const PERF_SAMPLE_STREAM_ID: u64 = 1 << 9;
608pub const PERF_SAMPLE_RAW: u64 = 1 << 10;
609pub const PERF_SAMPLE_BRANCH_STACK: u64 = 1 << 11;
610pub const PERF_SAMPLE_REGS_USER: u64 = 1 << 12;
611pub const PERF_SAMPLE_STACK_USER: u64 = 1 << 13;
612pub const PERF_SAMPLE_WEIGHT: u64 = 1 << 14;
613pub const PERF_SAMPLE_DATA_SRC: u64 = 1 << 15;
614pub const PERF_SAMPLE_IDENTIFIER: u64 = 1 << 16;
615pub const PERF_SAMPLE_TRANSACTION: u64 = 1 << 17;
616pub const PERF_SAMPLE_REGS_INTR: u64 = 1 << 18;
617pub const PERF_SAMPLE_PHYS_ADDR: u64 = 1 << 19;
618pub const PERF_SAMPLE_AUX: u64 = 1 << 20;
619pub const PERF_SAMPLE_CGROUP: u64 = 1 << 21;
620pub const PERF_SAMPLE_DATA_PAGE_SIZE: u64 = 1 << 22;
621pub const PERF_SAMPLE_CODE_PAGE_SIZE: u64 = 1 << 23;
622pub const PERF_SAMPLE_WEIGHT_STRUCT: u64 = 1 << 24;
623
624pub const PERF_REG_X86_AX: u64 = 0;
625pub const PERF_REG_X86_BX: u64 = 1;
626pub const PERF_REG_X86_CX: u64 = 2;
627pub const PERF_REG_X86_DX: u64 = 3;
628pub const PERF_REG_X86_SI: u64 = 4;
629pub const PERF_REG_X86_DI: u64 = 5;
630pub const PERF_REG_X86_BP: u64 = 6;
631pub const PERF_REG_X86_SP: u64 = 7;
632pub const PERF_REG_X86_IP: u64 = 8;
633pub const PERF_REG_X86_FLAGS: u64 = 9;
634pub const PERF_REG_X86_CS: u64 = 10;
635pub const PERF_REG_X86_SS: u64 = 11;
636pub const PERF_REG_X86_DS: u64 = 12;
637pub const PERF_REG_X86_ES: u64 = 13;
638pub const PERF_REG_X86_FS: u64 = 14;
639pub const PERF_REG_X86_GS: u64 = 15;
640pub const PERF_REG_X86_R8: u64 = 16;
641pub const PERF_REG_X86_R9: u64 = 17;
642pub const PERF_REG_X86_R10: u64 = 18;
643pub const PERF_REG_X86_R11: u64 = 19;
644pub const PERF_REG_X86_R12: u64 = 20;
645pub const PERF_REG_X86_R13: u64 = 21;
646pub const PERF_REG_X86_R14: u64 = 22;
647pub const PERF_REG_X86_R15: u64 = 23;
648
649pub const PERF_REG_X86_32_MAX: u64 = PERF_REG_X86_GS + 1;
650pub const PERF_REG_X86_64_MAX: u64 = PERF_REG_X86_R15 + 1;
651
652pub const PERF_REG_ARM_R0: u64 = 0;
653pub const PERF_REG_ARM_R1: u64 = 1;
654pub const PERF_REG_ARM_R2: u64 = 2;
655pub const PERF_REG_ARM_R3: u64 = 3;
656pub const PERF_REG_ARM_R4: u64 = 4;
657pub const PERF_REG_ARM_R5: u64 = 5;
658pub const PERF_REG_ARM_R6: u64 = 6;
659pub const PERF_REG_ARM_R7: u64 = 7;
660pub const PERF_REG_ARM_R8: u64 = 8;
661pub const PERF_REG_ARM_R9: u64 = 9;
662pub const PERF_REG_ARM_R10: u64 = 10;
663pub const PERF_REG_ARM_FP: u64 = 11;
664pub const PERF_REG_ARM_IP: u64 = 12;
665pub const PERF_REG_ARM_SP: u64 = 13;
666pub const PERF_REG_ARM_LR: u64 = 14;
667pub const PERF_REG_ARM_PC: u64 = 15;
668pub const PERF_REG_ARM_MAX: u64 = 16;
669
670pub const PERF_REG_MIPS_PC: u64 = 0;
671pub const PERF_REG_MIPS_R1: u64 = 1;
672pub const PERF_REG_MIPS_R2: u64 = 2;
673pub const PERF_REG_MIPS_R3: u64 = 3;
674pub const PERF_REG_MIPS_R4: u64 = 4;
675pub const PERF_REG_MIPS_R5: u64 = 5;
676pub const PERF_REG_MIPS_R6: u64 = 6;
677pub const PERF_REG_MIPS_R7: u64 = 7;
678pub const PERF_REG_MIPS_R8: u64 = 8;
679pub const PERF_REG_MIPS_R9: u64 = 9;
680pub const PERF_REG_MIPS_R10: u64 = 10;
681pub const PERF_REG_MIPS_R11: u64 = 11;
682pub const PERF_REG_MIPS_R12: u64 = 12;
683pub const PERF_REG_MIPS_R13: u64 = 13;
684pub const PERF_REG_MIPS_R14: u64 = 14;
685pub const PERF_REG_MIPS_R15: u64 = 15;
686pub const PERF_REG_MIPS_R16: u64 = 16;
687pub const PERF_REG_MIPS_R17: u64 = 17;
688pub const PERF_REG_MIPS_R18: u64 = 18;
689pub const PERF_REG_MIPS_R19: u64 = 19;
690pub const PERF_REG_MIPS_R20: u64 = 20;
691pub const PERF_REG_MIPS_R21: u64 = 21;
692pub const PERF_REG_MIPS_R22: u64 = 22;
693pub const PERF_REG_MIPS_R23: u64 = 23;
694pub const PERF_REG_MIPS_R24: u64 = 24;
695pub const PERF_REG_MIPS_R25: u64 = 25;
696pub const PERF_REG_MIPS_R28: u64 = 26;
697pub const PERF_REG_MIPS_R29: u64 = 27;
698pub const PERF_REG_MIPS_R30: u64 = 28;
699pub const PERF_REG_MIPS_R31: u64 = 29;
700pub const PERF_REG_MIPS_MAX: u64 = PERF_REG_MIPS_R31 + 1;
701
702pub const PERF_REG_ARM64_X0: u64 = 0;
703pub const PERF_REG_ARM64_X1: u64 = 1;
704pub const PERF_REG_ARM64_X2: u64 = 2;
705pub const PERF_REG_ARM64_X3: u64 = 3;
706pub const PERF_REG_ARM64_X4: u64 = 4;
707pub const PERF_REG_ARM64_X5: u64 = 5;
708pub const PERF_REG_ARM64_X6: u64 = 6;
709pub const PERF_REG_ARM64_X7: u64 = 7;
710pub const PERF_REG_ARM64_X8: u64 = 8;
711pub const PERF_REG_ARM64_X9: u64 = 9;
712pub const PERF_REG_ARM64_X10: u64 = 10;
713pub const PERF_REG_ARM64_X11: u64 = 11;
714pub const PERF_REG_ARM64_X12: u64 = 12;
715pub const PERF_REG_ARM64_X13: u64 = 13;
716pub const PERF_REG_ARM64_X14: u64 = 14;
717pub const PERF_REG_ARM64_X15: u64 = 15;
718pub const PERF_REG_ARM64_X16: u64 = 16;
719pub const PERF_REG_ARM64_X17: u64 = 17;
720pub const PERF_REG_ARM64_X18: u64 = 18;
721pub const PERF_REG_ARM64_X19: u64 = 19;
722pub const PERF_REG_ARM64_X20: u64 = 20;
723pub const PERF_REG_ARM64_X21: u64 = 21;
724pub const PERF_REG_ARM64_X22: u64 = 22;
725pub const PERF_REG_ARM64_X23: u64 = 23;
726pub const PERF_REG_ARM64_X24: u64 = 24;
727pub const PERF_REG_ARM64_X25: u64 = 25;
728pub const PERF_REG_ARM64_X26: u64 = 26;
729pub const PERF_REG_ARM64_X27: u64 = 27;
730pub const PERF_REG_ARM64_X28: u64 = 28;
731pub const PERF_REG_ARM64_X29: u64 = 29;
732pub const PERF_REG_ARM64_LR: u64 = 30;
733pub const PERF_REG_ARM64_SP: u64 = 31;
734pub const PERF_REG_ARM64_PC: u64 = 32;
735pub const PERF_REG_ARM64_MAX: u64 = 33;
736
737pub const PERF_SAMPLE_REGS_ABI_32: u64 = 1;
738pub const PERF_SAMPLE_REGS_ABI_64: u64 = 2;
739
740pub const PERF_FORMAT_TOTAL_TIME_ENABLED: u64 = 1 << 0;
741pub const PERF_FORMAT_TOTAL_TIME_RUNNING: u64 = 1 << 1;
742pub const PERF_FORMAT_ID: u64 = 1 << 2;
743pub const PERF_FORMAT_GROUP: u64 = 1 << 3;
744
745/*
746 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
747 *
748 * If the user does not pass priv level information via branch_sample_type,
749 * the kernel uses the event's priv level. Branch and event priv levels do
750 * not have to match. Branch priv level is checked for permissions.
751 *
752 * The branch types can be combined, however BRANCH_ANY covers all types
753 * of branches and therefore it supersedes all the other types.
754 */
755/// user branches
756pub const PERF_SAMPLE_BRANCH_USER_SHIFT: u32 = 0;
757/// kernel branches
758pub const PERF_SAMPLE_BRANCH_KERNEL_SHIFT: u32 = 1;
759/// hypervisor branches
760pub const PERF_SAMPLE_BRANCH_HV_SHIFT: u32 = 2;
761/// any branch types
762pub const PERF_SAMPLE_BRANCH_ANY_SHIFT: u32 = 3;
763/// any call branch
764pub const PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT: u32 = 4;
765/// any return branch
766pub const PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT: u32 = 5;
767/// indirect calls
768pub const PERF_SAMPLE_BRANCH_IND_CALL_SHIFT: u32 = 6;
769/// transaction aborts
770pub const PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT: u32 = 7;
771/// in transaction
772pub const PERF_SAMPLE_BRANCH_IN_TX_SHIFT: u32 = 8;
773/// not in transaction
774pub const PERF_SAMPLE_BRANCH_NO_TX_SHIFT: u32 = 9;
775/// conditional branches
776pub const PERF_SAMPLE_BRANCH_COND_SHIFT: u32 = 10;
777/// call/ret stack
778pub const PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT: u32 = 11;
779/// indirect jumps
780pub const PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT: u32 = 12;
781/// direct call
782pub const PERF_SAMPLE_BRANCH_CALL_SHIFT: u32 = 13;
783/// no flags
784pub const PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT: u32 = 14;
785/// no cycles
786pub const PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT: u32 = 15;
787/// save branch type
788pub const PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT: u32 = 16;
789/// save low level index of raw branch records
790pub const PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT: u32 = 17;
791
792pub const PERF_SAMPLE_BRANCH_USER: u64 = 1 << PERF_SAMPLE_BRANCH_USER_SHIFT;
793pub const PERF_SAMPLE_BRANCH_KERNEL: u64 = 1 << PERF_SAMPLE_BRANCH_KERNEL_SHIFT;
794pub const PERF_SAMPLE_BRANCH_HV: u64 = 1 << PERF_SAMPLE_BRANCH_HV_SHIFT;
795pub const PERF_SAMPLE_BRANCH_ANY: u64 = 1 << PERF_SAMPLE_BRANCH_ANY_SHIFT;
796pub const PERF_SAMPLE_BRANCH_ANY_CALL: u64 = 1 << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT;
797pub const PERF_SAMPLE_BRANCH_ANY_RETURN: u64 = 1 << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT;
798pub const PERF_SAMPLE_BRANCH_IND_CALL: u64 = 1 << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT;
799pub const PERF_SAMPLE_BRANCH_ABORT_TX: u64 = 1 << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT;
800pub const PERF_SAMPLE_BRANCH_IN_TX: u64 = 1 << PERF_SAMPLE_BRANCH_IN_TX_SHIFT;
801pub const PERF_SAMPLE_BRANCH_NO_TX: u64 = 1 << PERF_SAMPLE_BRANCH_NO_TX_SHIFT;
802pub const PERF_SAMPLE_BRANCH_COND: u64 = 1 << PERF_SAMPLE_BRANCH_COND_SHIFT;
803pub const PERF_SAMPLE_BRANCH_CALL_STACK: u64 = 1 << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT;
804pub const PERF_SAMPLE_BRANCH_IND_JUMP: u64 = 1 << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT;
805pub const PERF_SAMPLE_BRANCH_CALL: u64 = 1 << PERF_SAMPLE_BRANCH_CALL_SHIFT;
806pub const PERF_SAMPLE_BRANCH_NO_FLAGS: u64 = 1 << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT;
807pub const PERF_SAMPLE_BRANCH_NO_CYCLES: u64 = 1 << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT;
808pub const PERF_SAMPLE_BRANCH_TYPE_SAVE: u64 = 1 << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT;
809pub const PERF_SAMPLE_BRANCH_HW_INDEX: u64 = 1 << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT;
810
811// The current state of perf_event_header::misc bits usage:
812// ('|' used bit, '-' unused bit)
813//
814// 012 CDEF
815// |||---------||||
816//
817// Where:
818// 0-2 CPUMODE_MASK
819//
820// C PROC_MAP_PARSE_TIMEOUT
821// D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
822// E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
823// F (reserved)
824pub const PERF_RECORD_MISC_CPUMODE_MASK: u16 = 0b111;
825pub const PERF_RECORD_MISC_CPUMODE_UNKNOWN: u16 = 0;
826pub const PERF_RECORD_MISC_KERNEL: u16 = 1;
827pub const PERF_RECORD_MISC_USER: u16 = 2;
828pub const PERF_RECORD_MISC_HYPERVISOR: u16 = 3;
829pub const PERF_RECORD_MISC_GUEST_KERNEL: u16 = 4;
830pub const PERF_RECORD_MISC_GUEST_USER: u16 = 5;
831/// Indicates that /proc/PID/maps parsing are truncated by time out.
832pub const PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT: u16 = 1 << 12;
833// The following PERF_RECORD_MISC_* are used on different
834// events, so can reuse the same bit position.
835/// Used on PERF_RECORD_MMAP events to indicate mappings which are not executable.
836/// Not used on PERF_RECORD_MMAP2 events - those have the full protection bitset.
837pub const PERF_RECORD_MISC_MMAP_DATA: u16 = 1 << 13;
838/// Used on PERF_RECORD_COMM event.
839pub const PERF_RECORD_MISC_COMM_EXEC: u16 = 1 << 13;
840/// Used on PERF_RECORD_FORK events (perf internal).
841pub const PERF_RECORD_MISC_FORK_EXEC: u16 = 1 << 13;
842/// Used on PERF_RECORD_SWITCH* events.
843pub const PERF_RECORD_MISC_SWITCH_OUT: u16 = 1 << 13;
844/// Indicates that the content of PERF_SAMPLE_IP points to
845/// the actual instruction that triggered the event. See also
846/// perf_event_attr::precise_ip.
847/// Used on PERF_RECORD_SAMPLE of precise events.
848pub const PERF_RECORD_MISC_EXACT_IP: u16 = 1 << 14;
849/// Indicates that thread was preempted in TASK_RUNNING state.
850/// Used on PERF_RECORD_SWITCH* events.
851///
852/// This helps understanding whether a workload is CPU or IO bound.
853pub const PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: u16 = 1 << 14;
854/// Indicates that mmap2 event carries build id data.
855/// Used on PERF_RECORD_MMAP2 events.
856pub const PERF_RECORD_MISC_MMAP_BUILD_ID: u16 = 1 << 14;
857/// Used in header.misc of the HEADER_BUILD_ID event. If set, the length
858/// of the buildid is specified in the event (no more than 20).
859pub const PERF_RECORD_MISC_BUILD_ID_SIZE: u16 = 1 << 15;
860
861// These PERF_CONTEXT addresses are inserted into callchain to mark the
862// "context" of the call chain addresses that follow. The special frames
863// can be differentiated from real addresses by the fact that they are
864// >= PERF_CONTEXT_MAX.
865/// The callchain frames following this context marker frame are "hypervisor" frames.
866pub const PERF_CONTEXT_HV: u64 = -32i64 as u64;
867/// The callchain frames following this context marker frame are "kernel" frames.
868pub const PERF_CONTEXT_KERNEL: u64 = -128i64 as u64;
869/// The callchain frames following this context marker frame are "user" frames.
870pub const PERF_CONTEXT_USER: u64 = -512i64 as u64;
871/// The callchain frames following this context marker frame are "guest" frames.
872pub const PERF_CONTEXT_GUEST: u64 = -2048i64 as u64;
873/// The callchain frames following this context marker frame are "guest kernel" frames.
874pub const PERF_CONTEXT_GUEST_KERNEL: u64 = -2176i64 as u64;
875/// The callchain frames following this context marker frame are "guest user" frames.
876pub const PERF_CONTEXT_GUEST_USER: u64 = -2560i64 as u64;
877/// Any callchain frames which are >= PERF_CONTEXT_MAX are not real addresses;
878/// instead, they mark the context of the subsequent callchain frames.
879pub const PERF_CONTEXT_MAX: u64 = -4095i64 as u64;