1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
/// Constants from perf_event.h

/// sizeof first published struct
pub const PERF_ATTR_SIZE_VER0: u32 = 64;
/// add: config2
pub const PERF_ATTR_SIZE_VER1: u32 = 72;
/// add: branch_sample_type
pub const PERF_ATTR_SIZE_VER2: u32 = 80;
/// add: sample_regs_user, sample_stack_user, clockid
pub const PERF_ATTR_SIZE_VER3: u32 = 96;
/// add: sample_regs_intr
pub const PERF_ATTR_SIZE_VER4: u32 = 104;
/// add: aux_watermark
pub const PERF_ATTR_SIZE_VER5: u32 = 112;
/// add: aux_sample_size
pub const PERF_ATTR_SIZE_VER6: u32 = 120;
/// add: sig_data
pub const PERF_ATTR_SIZE_VER7: u32 = 128;

/// off by default
pub const ATTR_FLAG_BIT_DISABLED: u64 = 1 << 0;
/// children inherit it
pub const ATTR_FLAG_BIT_INHERIT: u64 = 1 << 1;
/// must always be on PMU
pub const ATTR_FLAG_BIT_PINNED: u64 = 1 << 2;
/// only group on PMU
pub const ATTR_FLAG_BIT_EXCLUSIVE: u64 = 1 << 3;
/// don't count user
pub const ATTR_FLAG_BIT_EXCLUDE_USER: u64 = 1 << 4;
/// don't count kernel
pub const ATTR_FLAG_BIT_EXCLUDE_KERNEL: u64 = 1 << 5;
/// don't count hypervisor
pub const ATTR_FLAG_BIT_EXCLUDE_HV: u64 = 1 << 6;
/// don't count when idle
pub const ATTR_FLAG_BIT_EXCLUDE_IDLE: u64 = 1 << 7;
/// include mmap data
pub const ATTR_FLAG_BIT_MMAP: u64 = 1 << 8;
/// include comm data
pub const ATTR_FLAG_BIT_COMM: u64 = 1 << 9;
/// use freq, not period
pub const ATTR_FLAG_BIT_FREQ: u64 = 1 << 10;
/// per task counts
pub const ATTR_FLAG_BIT_INHERIT_STAT: u64 = 1 << 11;
/// next exec enables
pub const ATTR_FLAG_BIT_ENABLE_ON_EXEC: u64 = 1 << 12;
/// trace fork/exit
pub const ATTR_FLAG_BIT_TASK: u64 = 1 << 13;
/// wakeup_watermark
pub const ATTR_FLAG_BIT_WATERMARK: u64 = 1 << 14;
/// skid constraint
/// Specifies how precise the instruction address should be.
///
/// From the perf-list man page:
///
/// > 0 - SAMPLE_IP can have arbitrary skid
/// > 1 - SAMPLE_IP must have constant skid
/// > 2 - SAMPLE_IP requested to have 0 skid
/// > 3 - SAMPLE_IP must have 0 skid, or uses randomization to avoid
/// >     sample shadowing effects.
/// >
/// > For Intel systems precise event sampling is implemented with PEBS
/// > which supports up to precise-level 2, and precise level 3 for
/// > some special cases.
/// >
/// > On AMD systems it is implemented using IBS (up to precise-level
/// > 2). The precise modifier works with event types 0x76 (cpu-cycles,
/// > CPU clocks not halted) and 0xC1 (micro-ops retired). Both events
/// > map to IBS execution sampling (IBS op) with the IBS Op Counter
/// > Control bit (IbsOpCntCtl) set respectively (see AMD64
/// > Architecture Programmer’s Manual Volume 2: System Programming,
/// > 13.3 Instruction-Based Sampling). Examples to use IBS:
/// >
/// > perf record -a -e cpu-cycles:p ...    # use ibs op counting cycles
/// > perf record -a -e r076:p ...          # same as -e cpu-cycles:p
/// > perf record -a -e r0C1:p ...          # use ibs op counting micro-ops
///
/// From Brendan Gregg's perf page:
///
/// > perf can use precise sampling by adding a :p modifier to the PMC event
/// > name, eg, "-e instructions:p". The more p's, the more accurate.
///
pub const ATTR_FLAG_BITMASK_PRECISE_IP: u64 = 1 << 15 | 1 << 16;
/// non-exec mmap data
pub const ATTR_FLAG_BIT_MMAP_DATA: u64 = 1 << 17;
/// sample_type all events
pub const ATTR_FLAG_BIT_SAMPLE_ID_ALL: u64 = 1 << 18;
/// don't count in host
pub const ATTR_FLAG_BIT_EXCLUDE_HOST: u64 = 1 << 19;
/// don't count in guest
pub const ATTR_FLAG_BIT_EXCLUDE_GUEST: u64 = 1 << 20;
/// exclude kernel callchains
pub const ATTR_FLAG_BIT_EXCLUDE_CALLCHAIN_KERNEL: u64 = 1 << 21;
/// exclude user callchains
pub const ATTR_FLAG_BIT_EXCLUDE_CALLCHAIN_USER: u64 = 1 << 22;
/// include mmap with inode data
pub const ATTR_FLAG_BIT_MMAP2: u64 = 1 << 23;
/// flag comm events that are due to exec
pub const ATTR_FLAG_BIT_COMM_EXEC: u64 = 1 << 24;
/// use @clockid for time fields
pub const ATTR_FLAG_BIT_USE_CLOCKID: u64 = 1 << 25;
/// context switch data
pub const ATTR_FLAG_BIT_CONTEXT_SWITCH: u64 = 1 << 26;
/// Write ring buffer from end to beginning
pub const ATTR_FLAG_BIT_WRITE_BACKWARD: u64 = 1 << 27;
/// include namespaces data
pub const ATTR_FLAG_BIT_NAMESPACES: u64 = 1 << 28;
/// include ksymbol events
pub const ATTR_FLAG_BIT_KSYMBOL: u64 = 1 << 29;
/// include bpf events
pub const ATTR_FLAG_BIT_BPF_EVENT: u64 = 1 << 30;
/// generate AUX records instead of events
pub const ATTR_FLAG_BIT_AUX_OUTPUT: u64 = 1 << 31;
/// include cgroup events
pub const ATTR_FLAG_BIT_CGROUP: u64 = 1 << 32;
/// include text poke events
pub const ATTR_FLAG_BIT_TEXT_POKE: u64 = 1 << 33;
/// use build id in mmap2 events
pub const ATTR_FLAG_BIT_BUILD_ID: u64 = 1 << 34;
/// children only inherit if cloned with CLONE_THREAD
pub const ATTR_FLAG_BIT_INHERIT_THREAD: u64 = 1 << 35;
/// event is removed from task on exec
pub const ATTR_FLAG_BIT_REMOVE_ON_EXEC: u64 = 1 << 36;
/// send synchronous SIGTRAP on event
pub const ATTR_FLAG_BIT_SIGTRAP: u64 = 1 << 37;

/*
 * If perf_event_attr.sample_id_all is set then all event types will
 * have the sample_type selected fields related to where/when
 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
 * just after the perf_event_header and the fields already present for
 * the existing fields, i.e. at the end of the payload. That way a newer
 * perf.data file will be supported by older perf tools, with these new
 * optional fields being ignored.
 *
 * struct sample_id {
 * 	{ u32			pid, tid; } && PERF_SAMPLE_TID
 * 	{ u64			time;     } && PERF_SAMPLE_TIME
 * 	{ u64			id;       } && PERF_SAMPLE_ID
 * 	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
 * 	{ u32			cpu, res; } && PERF_SAMPLE_CPU
 *	{ u64			id;	  } && PERF_SAMPLE_IDENTIFIER
 * } && perf_event_attr::sample_id_all
 *
 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.  The
 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
 * relative to header.size.
 */

/*
 * The MMAP events record the PROT_EXEC mappings so that we can
 * correlate userspace IPs to code. They have the following structure:
 *
 * struct {
 *	struct perf_event_header	header;
 *
 *	u32				pid, tid;
 *	u64				addr;
 *	u64				len;
 *	u64				pgoff;
 *	char				filename[];
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_MMAP: u32 = 1;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u64				id;
 *	u64				lost;
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_LOST: u32 = 2;

/*
 * struct {
 *	struct perf_event_header	header;
 *
 *	u32				pid, tid;
 *	char				comm[];
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_COMM: u32 = 3;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u32				pid, ppid;
 *	u32				tid, ptid;
 *	u64				time;
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_EXIT: u32 = 4;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u64				time;
 *	u64				id;
 *	u64				stream_id;
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_THROTTLE: u32 = 5;
pub const PERF_RECORD_UNTHROTTLE: u32 = 6;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u32				pid, ppid;
 *	u32				tid, ptid;
 *	u64				time;
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_FORK: u32 = 7;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u32				pid, tid;
 *
 *	struct read_format		values;
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_READ: u32 = 8;

/*
 * struct {
 *	struct perf_event_header	header;
 *
 *	#
 *	# Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
 *	# The advantage of PERF_SAMPLE_IDENTIFIER is that its position
 *	# is fixed relative to header.
 *	#
 *
 *	{ u64			id;	  } && PERF_SAMPLE_IDENTIFIER
 *	{ u64			ip;	  } && PERF_SAMPLE_IP
 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
 *	{ u64			time;     } && PERF_SAMPLE_TIME
 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
 *	{ u64			id;	  } && PERF_SAMPLE_ID
 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
 *
 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
 *
 *  #
 *  # The callchain includes both regular addresses, and special "context"
 *  # frames. The context frames are >= PERF_CONTEXT_MAX and annotate the
 *  # subsequent addresses as user / kernel / hypervisor / guest addresses.
 *  #
 *
 *	{ u64			nr,
 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
 *
 *	#
 *	# The RAW record below is opaque data wrt the ABI
 *	#
 *	# That is, the ABI doesn't make any promises wrt to
 *	# the stability of its content, it may vary depending
 *	# on event, hardware, kernel version and phase of
 *	# the moon.
 *	#
 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
 *	#
 *
 *	{ u32			size;
 *	  char                  data[size];}&& PERF_SAMPLE_RAW
 *
 *	{ u64                   nr;
 *	  { u64	hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
 *        { u64 from, to, flags } lbr[nr];
 *      } && PERF_SAMPLE_BRANCH_STACK
 *
 * 	{ u64			abi; # enum perf_sample_regs_abi
 * 	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
 *
 * 	{ u64			size;
 * 	  char			data[size];
 * 	  u64			dyn_size; } && PERF_SAMPLE_STACK_USER
 *
 *	{ union perf_sample_weight
 *	 {
 *		u64		full; && PERF_SAMPLE_WEIGHT
 *	#if defined(__LITTLE_ENDIAN_BITFIELD)
 *		struct {
 *			u32	var1_dw;
 *			u16	var2_w;
 *			u16	var3_w;
 *		} && PERF_SAMPLE_WEIGHT_STRUCT
 *	#elif defined(__BIG_ENDIAN_BITFIELD)
 *		struct {
 *			u16	var3_w;
 *			u16	var2_w;
 *			u32	var1_dw;
 *		} && PERF_SAMPLE_WEIGHT_STRUCT
 *	#endif
 *	 }
 *	}
 *	{ u64			data_src; } && PERF_SAMPLE_DATA_SRC
 *	{ u64			transaction; } && PERF_SAMPLE_TRANSACTION
 *	{ u64			abi; # enum perf_sample_regs_abi
 *	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
 *	{ u64			phys_addr;} && PERF_SAMPLE_PHYS_ADDR
 *	{ u64			size;
 *	  char			data[size]; } && PERF_SAMPLE_AUX
 *	{ u64			data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
 *	{ u64			code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
 * };
 */
pub const PERF_RECORD_SAMPLE: u32 = 9;

/*
 * The MMAP2 records are an augmented version of MMAP, they add
 * maj, min, ino numbers to be used to uniquely identify each mapping
 *
 * struct {
 *	struct perf_event_header	header;
 *
 *	u32				pid, tid;
 *	u64				addr;
 *	u64				len;
 *	u64				pgoff;
 *	union {
 *		struct {
 *			u32		maj;
 *			u32		min;
 *			u64		ino;
 *			u64		ino_generation;
 *		};
 *		struct {
 *			u8		build_id_size;
 *			u8		__reserved_1;
 *			u16		__reserved_2;
 *			u8		build_id[20];
 *		};
 *	};
 *	u32				prot, flags;
 *	char				filename[];
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_MMAP2: u32 = 10;

/*
 * Records that new data landed in the AUX buffer part.
 *
 * struct {
 * 	struct perf_event_header	header;
 *
 * 	u64				aux_offset;
 * 	u64				aux_size;
 *	u64				flags;
 * 	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_AUX: u32 = 11;

/*
 * Indicates that instruction trace has started
 *
 * struct {
 *	struct perf_event_header	header;
 *	u32				pid;
 *	u32				tid;
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_ITRACE_START: u32 = 12;

/*
 * Records the dropped/lost sample number.
 *
 * struct {
 *	struct perf_event_header	header;
 *
 *	u64				lost;
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_LOST_SAMPLES: u32 = 13;

/*
 * Records a context switch in or out (flagged by
 * PERF_RECORD_MISC_SWITCH_OUT). See also
 * PERF_RECORD_SWITCH_CPU_WIDE.
 *
 * struct {
 *	struct perf_event_header	header;
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_SWITCH: u32 = 14;

/*
 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
 * next_prev_tid that are the next (switching out) or previous
 * (switching in) pid/tid.
 *
 * struct {
 *	struct perf_event_header	header;
 *	u32				next_prev_pid;
 *	u32				next_prev_tid;
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_SWITCH_CPU_WIDE: u32 = 15;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u32				pid;
 *	u32				tid;
 *	u64				nr_namespaces;
 *	{ u64				dev, inode; } [nr_namespaces];
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_NAMESPACES: u32 = 16;

/*
 * Record ksymbol register/unregister events:
 *
 * struct {
 *	struct perf_event_header	header;
 *	u64				addr;
 *	u32				len;
 *	u16				ksym_type;
 *	u16				flags;
 *	char				name[];
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_KSYMBOL: u32 = 17;

/*
 * Record bpf events:
 *  enum perf_bpf_event_type {
 *	PERF_BPF_EVENT_UNKNOWN		= 0,
 *	PERF_BPF_EVENT_PROG_LOAD	= 1,
 *	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
 *  };
 *
 * struct {
 *	struct perf_event_header	header;
 *	u16				type;
 *	u16				flags;
 *	u32				id;
 *	u8				tag[BPF_TAG_SIZE];
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_BPF_EVENT: u32 = 18;

/*
 * struct {
 *	struct perf_event_header	header;
 *	u64				id;
 *	char				path[];
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_CGROUP: u32 = 19;

/*
 * Records changes to kernel text i.e. self-modified code. 'old_len' is
 * the number of old bytes, 'new_len' is the number of new bytes. Either
 * 'old_len' or 'new_len' may be zero to indicate, for example, the
 * addition or removal of a trampoline. 'bytes' contains the old bytes
 * followed immediately by the new bytes.
 *
 * struct {
 *	struct perf_event_header	header;
 *	u64				addr;
 *	u16				old_len;
 *	u16				new_len;
 *	u8				bytes[];
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_TEXT_POKE: u32 = 20;

/*
 * Data written to the AUX area by hardware due to aux_output, may need
 * to be matched to the event by an architecture-specific hardware ID.
 * This records the hardware ID, but requires sample_id to provide the
 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
 * records from multiple events.
 *
 * struct {
 *	struct perf_event_header	header;
 *	u64				hw_id;
 *	struct sample_id		sample_id;
 * };
 */
pub const PERF_RECORD_AUX_OUTPUT_HW_ID: u32 = 21;

pub const PERF_RECORD_USER_TYPE_START: u32 = 64;
pub const PERF_RECORD_HEADER_ATTR: u32 = 64;
pub const PERF_RECORD_HEADER_EVENT_TYPE: u32 = 65; /* deprecated */
pub const PERF_RECORD_HEADER_TRACING_DATA: u32 = 66;
pub const PERF_RECORD_HEADER_BUILD_ID: u32 = 67;
pub const PERF_RECORD_FINISHED_ROUND: u32 = 68;
pub const PERF_RECORD_ID_INDEX: u32 = 69;
pub const PERF_RECORD_AUXTRACE_INFO: u32 = 70;
pub const PERF_RECORD_AUXTRACE: u32 = 71;
pub const PERF_RECORD_AUXTRACE_ERROR: u32 = 72;
pub const PERF_RECORD_THREAD_MAP: u32 = 73;
pub const PERF_RECORD_CPU_MAP: u32 = 74;
pub const PERF_RECORD_STAT_CONFIG: u32 = 75;
pub const PERF_RECORD_STAT: u32 = 76;
pub const PERF_RECORD_STAT_ROUND: u32 = 77;
pub const PERF_RECORD_EVENT_UPDATE: u32 = 78;
pub const PERF_RECORD_TIME_CONV: u32 = 79;
pub const PERF_RECORD_HEADER_FEATURE: u32 = 80;
pub const PERF_RECORD_COMPRESSED: u32 = 81;

pub const PERF_SAMPLE_IP: u64 = 1 << 0;
pub const PERF_SAMPLE_TID: u64 = 1 << 1;
pub const PERF_SAMPLE_TIME: u64 = 1 << 2;
pub const PERF_SAMPLE_ADDR: u64 = 1 << 3;
pub const PERF_SAMPLE_READ: u64 = 1 << 4;
pub const PERF_SAMPLE_CALLCHAIN: u64 = 1 << 5;
pub const PERF_SAMPLE_ID: u64 = 1 << 6;
pub const PERF_SAMPLE_CPU: u64 = 1 << 7;
pub const PERF_SAMPLE_PERIOD: u64 = 1 << 8;
pub const PERF_SAMPLE_STREAM_ID: u64 = 1 << 9;
pub const PERF_SAMPLE_RAW: u64 = 1 << 10;
pub const PERF_SAMPLE_BRANCH_STACK: u64 = 1 << 11;
pub const PERF_SAMPLE_REGS_USER: u64 = 1 << 12;
pub const PERF_SAMPLE_STACK_USER: u64 = 1 << 13;
pub const PERF_SAMPLE_WEIGHT: u64 = 1 << 14;
pub const PERF_SAMPLE_DATA_SRC: u64 = 1 << 15;
pub const PERF_SAMPLE_IDENTIFIER: u64 = 1 << 16;
pub const PERF_SAMPLE_TRANSACTION: u64 = 1 << 17;
pub const PERF_SAMPLE_REGS_INTR: u64 = 1 << 18;
pub const PERF_SAMPLE_PHYS_ADDR: u64 = 1 << 19;
pub const PERF_SAMPLE_AUX: u64 = 1 << 20;
pub const PERF_SAMPLE_CGROUP: u64 = 1 << 21;
pub const PERF_SAMPLE_DATA_PAGE_SIZE: u64 = 1 << 22;
pub const PERF_SAMPLE_CODE_PAGE_SIZE: u64 = 1 << 23;
pub const PERF_SAMPLE_WEIGHT_STRUCT: u64 = 1 << 24;

pub const PERF_REG_X86_AX: u64 = 0;
pub const PERF_REG_X86_BX: u64 = 1;
pub const PERF_REG_X86_CX: u64 = 2;
pub const PERF_REG_X86_DX: u64 = 3;
pub const PERF_REG_X86_SI: u64 = 4;
pub const PERF_REG_X86_DI: u64 = 5;
pub const PERF_REG_X86_BP: u64 = 6;
pub const PERF_REG_X86_SP: u64 = 7;
pub const PERF_REG_X86_IP: u64 = 8;
pub const PERF_REG_X86_FLAGS: u64 = 9;
pub const PERF_REG_X86_CS: u64 = 10;
pub const PERF_REG_X86_SS: u64 = 11;
pub const PERF_REG_X86_DS: u64 = 12;
pub const PERF_REG_X86_ES: u64 = 13;
pub const PERF_REG_X86_FS: u64 = 14;
pub const PERF_REG_X86_GS: u64 = 15;
pub const PERF_REG_X86_R8: u64 = 16;
pub const PERF_REG_X86_R9: u64 = 17;
pub const PERF_REG_X86_R10: u64 = 18;
pub const PERF_REG_X86_R11: u64 = 19;
pub const PERF_REG_X86_R12: u64 = 20;
pub const PERF_REG_X86_R13: u64 = 21;
pub const PERF_REG_X86_R14: u64 = 22;
pub const PERF_REG_X86_R15: u64 = 23;

pub const PERF_REG_X86_32_MAX: u64 = PERF_REG_X86_GS + 1;
pub const PERF_REG_X86_64_MAX: u64 = PERF_REG_X86_R15 + 1;

pub const PERF_REG_ARM_R0: u64 = 0;
pub const PERF_REG_ARM_R1: u64 = 1;
pub const PERF_REG_ARM_R2: u64 = 2;
pub const PERF_REG_ARM_R3: u64 = 3;
pub const PERF_REG_ARM_R4: u64 = 4;
pub const PERF_REG_ARM_R5: u64 = 5;
pub const PERF_REG_ARM_R6: u64 = 6;
pub const PERF_REG_ARM_R7: u64 = 7;
pub const PERF_REG_ARM_R8: u64 = 8;
pub const PERF_REG_ARM_R9: u64 = 9;
pub const PERF_REG_ARM_R10: u64 = 10;
pub const PERF_REG_ARM_FP: u64 = 11;
pub const PERF_REG_ARM_IP: u64 = 12;
pub const PERF_REG_ARM_SP: u64 = 13;
pub const PERF_REG_ARM_LR: u64 = 14;
pub const PERF_REG_ARM_PC: u64 = 15;
pub const PERF_REG_ARM_MAX: u64 = 16;

pub const PERF_REG_MIPS_PC: u64 = 0;
pub const PERF_REG_MIPS_R1: u64 = 1;
pub const PERF_REG_MIPS_R2: u64 = 2;
pub const PERF_REG_MIPS_R3: u64 = 3;
pub const PERF_REG_MIPS_R4: u64 = 4;
pub const PERF_REG_MIPS_R5: u64 = 5;
pub const PERF_REG_MIPS_R6: u64 = 6;
pub const PERF_REG_MIPS_R7: u64 = 7;
pub const PERF_REG_MIPS_R8: u64 = 8;
pub const PERF_REG_MIPS_R9: u64 = 9;
pub const PERF_REG_MIPS_R10: u64 = 10;
pub const PERF_REG_MIPS_R11: u64 = 11;
pub const PERF_REG_MIPS_R12: u64 = 12;
pub const PERF_REG_MIPS_R13: u64 = 13;
pub const PERF_REG_MIPS_R14: u64 = 14;
pub const PERF_REG_MIPS_R15: u64 = 15;
pub const PERF_REG_MIPS_R16: u64 = 16;
pub const PERF_REG_MIPS_R17: u64 = 17;
pub const PERF_REG_MIPS_R18: u64 = 18;
pub const PERF_REG_MIPS_R19: u64 = 19;
pub const PERF_REG_MIPS_R20: u64 = 20;
pub const PERF_REG_MIPS_R21: u64 = 21;
pub const PERF_REG_MIPS_R22: u64 = 22;
pub const PERF_REG_MIPS_R23: u64 = 23;
pub const PERF_REG_MIPS_R24: u64 = 24;
pub const PERF_REG_MIPS_R25: u64 = 25;
pub const PERF_REG_MIPS_R28: u64 = 26;
pub const PERF_REG_MIPS_R29: u64 = 27;
pub const PERF_REG_MIPS_R30: u64 = 28;
pub const PERF_REG_MIPS_R31: u64 = 29;
pub const PERF_REG_MIPS_MAX: u64 = PERF_REG_MIPS_R31 + 1;

pub const PERF_REG_ARM64_X0: u64 = 0;
pub const PERF_REG_ARM64_X1: u64 = 1;
pub const PERF_REG_ARM64_X2: u64 = 2;
pub const PERF_REG_ARM64_X3: u64 = 3;
pub const PERF_REG_ARM64_X4: u64 = 4;
pub const PERF_REG_ARM64_X5: u64 = 5;
pub const PERF_REG_ARM64_X6: u64 = 6;
pub const PERF_REG_ARM64_X7: u64 = 7;
pub const PERF_REG_ARM64_X8: u64 = 8;
pub const PERF_REG_ARM64_X9: u64 = 9;
pub const PERF_REG_ARM64_X10: u64 = 10;
pub const PERF_REG_ARM64_X11: u64 = 11;
pub const PERF_REG_ARM64_X12: u64 = 12;
pub const PERF_REG_ARM64_X13: u64 = 13;
pub const PERF_REG_ARM64_X14: u64 = 14;
pub const PERF_REG_ARM64_X15: u64 = 15;
pub const PERF_REG_ARM64_X16: u64 = 16;
pub const PERF_REG_ARM64_X17: u64 = 17;
pub const PERF_REG_ARM64_X18: u64 = 18;
pub const PERF_REG_ARM64_X19: u64 = 19;
pub const PERF_REG_ARM64_X20: u64 = 20;
pub const PERF_REG_ARM64_X21: u64 = 21;
pub const PERF_REG_ARM64_X22: u64 = 22;
pub const PERF_REG_ARM64_X23: u64 = 23;
pub const PERF_REG_ARM64_X24: u64 = 24;
pub const PERF_REG_ARM64_X25: u64 = 25;
pub const PERF_REG_ARM64_X26: u64 = 26;
pub const PERF_REG_ARM64_X27: u64 = 27;
pub const PERF_REG_ARM64_X28: u64 = 28;
pub const PERF_REG_ARM64_X29: u64 = 29;
pub const PERF_REG_ARM64_LR: u64 = 30;
pub const PERF_REG_ARM64_SP: u64 = 31;
pub const PERF_REG_ARM64_PC: u64 = 32;
pub const PERF_REG_ARM64_MAX: u64 = 33;

pub const PERF_SAMPLE_REGS_ABI_32: u64 = 1;
pub const PERF_SAMPLE_REGS_ABI_64: u64 = 2;

pub const PERF_FORMAT_TOTAL_TIME_ENABLED: u64 = 1 << 0;
pub const PERF_FORMAT_TOTAL_TIME_RUNNING: u64 = 1 << 1;
pub const PERF_FORMAT_ID: u64 = 1 << 2;
pub const PERF_FORMAT_GROUP: u64 = 1 << 3;

/*
 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
 *
 * If the user does not pass priv level information via branch_sample_type,
 * the kernel uses the event's priv level. Branch and event priv levels do
 * not have to match. Branch priv level is checked for permissions.
 *
 * The branch types can be combined, however BRANCH_ANY covers all types
 * of branches and therefore it supersedes all the other types.
 */
/// user branches
pub const PERF_SAMPLE_BRANCH_USER_SHIFT: u32 = 0;
/// kernel branches
pub const PERF_SAMPLE_BRANCH_KERNEL_SHIFT: u32 = 1;
/// hypervisor branches
pub const PERF_SAMPLE_BRANCH_HV_SHIFT: u32 = 2;
/// any branch types
pub const PERF_SAMPLE_BRANCH_ANY_SHIFT: u32 = 3;
/// any call branch
pub const PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT: u32 = 4;
/// any return branch
pub const PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT: u32 = 5;
/// indirect calls
pub const PERF_SAMPLE_BRANCH_IND_CALL_SHIFT: u32 = 6;
/// transaction aborts
pub const PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT: u32 = 7;
/// in transaction
pub const PERF_SAMPLE_BRANCH_IN_TX_SHIFT: u32 = 8;
/// not in transaction
pub const PERF_SAMPLE_BRANCH_NO_TX_SHIFT: u32 = 9;
/// conditional branches
pub const PERF_SAMPLE_BRANCH_COND_SHIFT: u32 = 10;
/// call/ret stack
pub const PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT: u32 = 11;
/// indirect jumps
pub const PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT: u32 = 12;
/// direct call
pub const PERF_SAMPLE_BRANCH_CALL_SHIFT: u32 = 13;
/// no flags
pub const PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT: u32 = 14;
/// no cycles
pub const PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT: u32 = 15;
/// save branch type
pub const PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT: u32 = 16;
/// save low level index of raw branch records
pub const PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT: u32 = 17;

pub const PERF_SAMPLE_BRANCH_USER: u64 = 1 << PERF_SAMPLE_BRANCH_USER_SHIFT;
pub const PERF_SAMPLE_BRANCH_KERNEL: u64 = 1 << PERF_SAMPLE_BRANCH_KERNEL_SHIFT;
pub const PERF_SAMPLE_BRANCH_HV: u64 = 1 << PERF_SAMPLE_BRANCH_HV_SHIFT;
pub const PERF_SAMPLE_BRANCH_ANY: u64 = 1 << PERF_SAMPLE_BRANCH_ANY_SHIFT;
pub const PERF_SAMPLE_BRANCH_ANY_CALL: u64 = 1 << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT;
pub const PERF_SAMPLE_BRANCH_ANY_RETURN: u64 = 1 << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT;
pub const PERF_SAMPLE_BRANCH_IND_CALL: u64 = 1 << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT;
pub const PERF_SAMPLE_BRANCH_ABORT_TX: u64 = 1 << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT;
pub const PERF_SAMPLE_BRANCH_IN_TX: u64 = 1 << PERF_SAMPLE_BRANCH_IN_TX_SHIFT;
pub const PERF_SAMPLE_BRANCH_NO_TX: u64 = 1 << PERF_SAMPLE_BRANCH_NO_TX_SHIFT;
pub const PERF_SAMPLE_BRANCH_COND: u64 = 1 << PERF_SAMPLE_BRANCH_COND_SHIFT;
pub const PERF_SAMPLE_BRANCH_CALL_STACK: u64 = 1 << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT;
pub const PERF_SAMPLE_BRANCH_IND_JUMP: u64 = 1 << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT;
pub const PERF_SAMPLE_BRANCH_CALL: u64 = 1 << PERF_SAMPLE_BRANCH_CALL_SHIFT;
pub const PERF_SAMPLE_BRANCH_NO_FLAGS: u64 = 1 << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT;
pub const PERF_SAMPLE_BRANCH_NO_CYCLES: u64 = 1 << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT;
pub const PERF_SAMPLE_BRANCH_TYPE_SAVE: u64 = 1 << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT;
pub const PERF_SAMPLE_BRANCH_HW_INDEX: u64 = 1 << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT;

// The current state of perf_event_header::misc bits usage:
// ('|' used bit, '-' unused bit)
//
//  012         CDEF
//  |||---------||||
//
//  Where:
//    0-2     CPUMODE_MASK
//
//    C       PROC_MAP_PARSE_TIMEOUT
//    D       MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
//    E       MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
//    F       (reserved)
pub const PERF_RECORD_MISC_CPUMODE_MASK: u16 = 0b111;
pub const PERF_RECORD_MISC_CPUMODE_UNKNOWN: u16 = 0;
pub const PERF_RECORD_MISC_KERNEL: u16 = 1;
pub const PERF_RECORD_MISC_USER: u16 = 2;
pub const PERF_RECORD_MISC_HYPERVISOR: u16 = 3;
pub const PERF_RECORD_MISC_GUEST_KERNEL: u16 = 4;
pub const PERF_RECORD_MISC_GUEST_USER: u16 = 5;
/// Indicates that /proc/PID/maps parsing are truncated by time out.
pub const PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT: u16 = 1 << 12;
// The following PERF_RECORD_MISC_* are used on different
// events, so can reuse the same bit position.
/// Used on PERF_RECORD_MMAP events to indicate mappings which are not executable.
/// Not used on PERF_RECORD_MMAP2 events - those have the full protection bitset.
pub const PERF_RECORD_MISC_MMAP_DATA: u16 = 1 << 13;
/// Used on PERF_RECORD_COMM event.
pub const PERF_RECORD_MISC_COMM_EXEC: u16 = 1 << 13;
/// Used on PERF_RECORD_FORK events (perf internal).
pub const PERF_RECORD_MISC_FORK_EXEC: u16 = 1 << 13;
/// Used on PERF_RECORD_SWITCH* events.
pub const PERF_RECORD_MISC_SWITCH_OUT: u16 = 1 << 13;
/// Indicates that the content of PERF_SAMPLE_IP points to
/// the actual instruction that triggered the event. See also
/// perf_event_attr::precise_ip.
/// Used on PERF_RECORD_SAMPLE of precise events.
pub const PERF_RECORD_MISC_EXACT_IP: u16 = 1 << 14;
/// Indicates that thread was preempted in TASK_RUNNING state.
/// Used on PERF_RECORD_SWITCH* events.
pub const PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: u16 = 1 << 14;
/// Indicates that mmap2 event carries build id data.
/// Used on PERF_RECORD_MMAP2 events.
pub const PERF_RECORD_MISC_MMAP_BUILD_ID: u16 = 1 << 14;
/// Used in header.misc of the HEADER_BUILD_ID event. If set, the length
/// of the buildid is specified in the event (no more than 20).
pub const PERF_RECORD_MISC_BUILD_ID_SIZE: u16 = 1 << 15;

// These PERF_CONTEXT addresses are inserted into callchain to mark the
// "context" of the call chain addresses that follow. The special frames
// can be differentiated from real addresses by the fact that they are
// >= PERF_CONTEXT_MAX.
/// The callchain frames following this context marker frame are "hypervisor" frames.
pub const PERF_CONTEXT_HV: u64 = -32i64 as u64;
/// The callchain frames following this context marker frame are "kernel" frames.
pub const PERF_CONTEXT_KERNEL: u64 = -128i64 as u64;
/// The callchain frames following this context marker frame are "user" frames.
pub const PERF_CONTEXT_USER: u64 = -512i64 as u64;
/// The callchain frames following this context marker frame are "guest" frames.
pub const PERF_CONTEXT_GUEST: u64 = -2048i64 as u64;
/// The callchain frames following this context marker frame are "guest kernel" frames.
pub const PERF_CONTEXT_GUEST_KERNEL: u64 = -2176i64 as u64;
/// The callchain frames following this context marker frame are "guest user" frames.
pub const PERF_CONTEXT_GUEST_USER: u64 = -2560i64 as u64;
/// Any callchain frames which are >= PERF_CONTEXT_MAX are not real addresses;
/// instead, they mark the context of the subsequent callchain frames.
pub const PERF_CONTEXT_MAX: u64 = -4095i64 as u64;