pub struct Builder<'a> { /* private fields */ }Expand description
A builder for Counters.
There are dozens of parameters that influence a Counter’s behavior.
Builder lets you construct a Counter by specifying only those parameters
for which you don’t want the default value.
A freshly built Counter is disabled. To begin counting events, you must
call enable on the Counter or the Group to which it belongs.
For example, if you want a Counter for instructions retired by the current
process, those are Builder’s defaults, so you need only write:
let mut insns = Builder::new().build()?;The kind method lets you specify what sort of event you want to
count. So if you’d rather count branch instructions:
let mut insns = Builder::new()
.kind(Hardware::BRANCH_INSTRUCTIONS)
.build()?;The group method lets you gather individual counters into Group
that can be enabled or disabled atomically:
let mut group = Group::new()?;
let cycles = Builder::new().group(&mut group).kind(Hardware::CPU_CYCLES).build()?;
let insns = Builder::new().group(&mut group).kind(Hardware::INSTRUCTIONS).build()?;Other methods let you select:
- specific processes or cgroups to observe
- specific CPU cores to observe
Builder supports only a fraction of the many knobs and dials Linux offers,
but hopefully it will acquire methods to support more of them as time goes
on.
Internally, a Builder is just a wrapper around the kernel’s struct perf_event_attr type.
Implementations§
Source§impl<'a> Builder<'a>
impl<'a> Builder<'a>
Sourcepub fn new() -> Builder<'a>
pub fn new() -> Builder<'a>
Return a new Builder, with all parameters set to their defaults.
Examples found in repository?
3fn main() -> std::io::Result<()> {
4 let mut counter = Builder::new().build()?;
5
6 let vec = (0..=51).collect::<Vec<_>>();
7
8 counter.enable()?;
9 println!("{:?}", vec);
10 counter.disable()?;
11
12 println!("{} instructions retired", counter.read()?);
13
14 Ok(())
15}More examples
7fn main() -> std::io::Result<()> {
8 let pid: pid_t = std::env::args()
9 .nth(1)
10 .expect("Usage: insns-for-pid PID")
11 .parse()
12 .expect("Usage: insns-for-pid PID");
13
14 let mut insns = Builder::new()
15 .observe_pid(pid)
16 .kind(Hardware::BRANCH_INSTRUCTIONS)
17 .build()?;
18
19 // Count instructions in PID for five seconds.
20 insns.enable()?;
21 sleep(Duration::from_secs(5));
22 insns.disable()?;
23
24 println!("instructions in last five seconds: {}", insns.read()?);
25
26 Ok(())
27}1fn main() -> std::io::Result<()> {
2 use perf_event::events::Hardware;
3 use perf_event::{Builder, Group};
4
5 let mut group = Group::new()?;
6 let cycles = Builder::new()
7 .group(&mut group)
8 .kind(Hardware::CPU_CYCLES)
9 .build()?;
10 let insns = Builder::new()
11 .group(&mut group)
12 .kind(Hardware::INSTRUCTIONS)
13 .build()?;
14
15 let vec = (0..=51).collect::<Vec<_>>();
16
17 group.enable()?;
18 println!("{:?}", vec);
19 group.disable()?;
20
21 let counts = group.read()?;
22 println!(
23 "cycles / instructions: {} / {} ({:.2} cpi)",
24 counts[&cycles],
25 counts[&insns],
26 (counts[&cycles] as f64 / counts[&insns] as f64)
27 );
28
29 Ok(())
30}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26
27 // Note that if you add more counters than you actually have hardware for,
28 // the kernel will time-slice them, which means you may get no coverage for
29 // short measurements. See the documentation.
30
31 let vec = (0..=51).collect::<Vec<_>>();
32
33 group.enable()?;
34 println!("{:?}", vec);
35 group.disable()?;
36
37 let counts = group.read()?;
38 println!(
39 "L1D cache misses/references: {} / {} ({:.0}%)",
40 counts[&miss_counter],
41 counts[&access_counter],
42 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
43 );
44
45 println!(
46 "branch prediction misses/total: {} / {} ({:.0}%)",
47 counts[&missed_branches],
48 counts[&branches],
49 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
50 );
51
52 // You can iterate over a `Counts` value:
53 for (id, value) in &counts {
54 println!("Counter id {} has value {}", id, value);
55 }
56
57 Ok(())
58}84fn measure(label: &str, task: impl FnOnce()) {
85 use perf_event::events::{Cache, CacheOp, CacheResult, WhichCache};
86 use perf_event::{Builder, Group};
87
88 let mut group = Group::new().expect("creating group is ok");
89 let read_counter = Builder::new()
90 .group(&mut group)
91 .kind(Cache {
92 which: WhichCache::L1D,
93 operation: CacheOp::READ,
94 result: CacheResult::ACCESS,
95 })
96 .build()
97 .expect("building read_counter is ok");
98 let read_miss_counter = Builder::new()
99 .group(&mut group)
100 .kind(Cache {
101 which: WhichCache::L1D,
102 operation: CacheOp::READ,
103 result: CacheResult::MISS,
104 })
105 .build()
106 .expect("building read_miss_counter is ok");
107 let prefetch_counter = Builder::new()
108 .group(&mut group)
109 .kind(Cache {
110 which: WhichCache::L1D,
111 operation: CacheOp::PREFETCH,
112 result: CacheResult::ACCESS,
113 })
114 .build()
115 .expect("building prefetch_counter is ok");
116
117 group.enable().expect("enabling group is ok");
118 task();
119 group.disable().expect("disabling group is ok");
120
121 let counts = group.read().expect("reading group is ok");
122 let reads = counts[&read_counter];
123 let read_misses = counts[&read_miss_counter];
124 let read_hits = reads - read_misses;
125 let prefetches = counts[&prefetch_counter];
126
127 println!(
128 "{label}: hits / reads: {read_hits:8} / {reads:8} {:6.2}%, \
129 prefetched {prefetches:8}",
130 (read_hits as f64 / reads as f64) * 100.0,
131 );
132
133 if counts.time_enabled() != counts.time_running() {
134 println!(
135 "time enabled: {} time running: {}",
136 counts.time_enabled(),
137 counts.time_running(),
138 );
139 }
140}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26 let insns = Builder::new()
27 .group(&mut group)
28 .kind(Hardware::INSTRUCTIONS)
29 .build()?;
30 let cycles = Builder::new()
31 .group(&mut group)
32 .kind(Hardware::CPU_CYCLES)
33 .build()?;
34
35 // Note that if you add more counters than you actually have hardware for,
36 // the kernel will time-slice them, which means you may get no coverage for
37 // short measurements. See the documentation.
38 //
39 // On my machine, this program won't collect any data unless I disable the
40 // NMI watchdog, as described in the documentation for `Group`. My machine
41 // has four counters, and this program tries to use all of them, but the NMI
42 // watchdog uses one up.
43
44 let mut vec = (0..=100000).collect::<Vec<_>>();
45
46 group.enable()?;
47 vec.sort();
48 println!("{:?}", &vec[0..10]);
49 group.disable()?;
50
51 let counts = group.read()?;
52
53 println!(
54 "enabled for {}ns, actually running for {}ns",
55 counts.time_enabled(),
56 counts.time_running()
57 );
58
59 if counts.time_running() == 0 {
60 println!("Group was never running; no results available.");
61 return Ok(());
62 }
63
64 if counts.time_running() < counts.time_enabled() {
65 println!("Counts cover only a portion of the execution.");
66 }
67
68 println!(
69 "L1D cache misses/references: {} / {} ({:.0}%)",
70 counts[&miss_counter],
71 counts[&access_counter],
72 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
73 );
74
75 println!(
76 "branch prediction misses/total: {} / {} ({:.0}%)",
77 counts[&missed_branches],
78 counts[&branches],
79 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
80 );
81
82 println!(
83 "{} instructions, {} cycles ({:.2} cpi)",
84 counts[&insns],
85 counts[&cycles],
86 counts[&cycles] as f64 / counts[&insns] as f64
87 );
88
89 // You can iterate over a `Counts` value:
90 for (id, value) in &counts {
91 println!("Counter id {} has value {}", id, value);
92 }
93
94 Ok(())
95}Sourcepub fn kind<K: Into<Event>>(self, kind: K) -> Builder<'a>
pub fn kind<K: Into<Event>>(self, kind: K) -> Builder<'a>
Count events of the given kind. This accepts an Event value,
or any type that can be converted to one, so you can pass Hardware,
Software and Cache values directly.
The default is to count retired instructions, or
Hardware::INSTRUCTIONS events.
For example, to count level 1 data cache references and misses, pass the
appropriate events::Cache values:
use perf_event::{Builder, Group};
use perf_event::events::{Cache, CacheOp, CacheResult, WhichCache};
const ACCESS: Cache = Cache {
which: WhichCache::L1D,
operation: CacheOp::READ,
result: CacheResult::ACCESS,
};
const MISS: Cache = Cache { result: CacheResult::MISS, ..ACCESS };
let mut group = Group::new()?;
let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;Examples found in repository?
7fn main() -> std::io::Result<()> {
8 let pid: pid_t = std::env::args()
9 .nth(1)
10 .expect("Usage: insns-for-pid PID")
11 .parse()
12 .expect("Usage: insns-for-pid PID");
13
14 let mut insns = Builder::new()
15 .observe_pid(pid)
16 .kind(Hardware::BRANCH_INSTRUCTIONS)
17 .build()?;
18
19 // Count instructions in PID for five seconds.
20 insns.enable()?;
21 sleep(Duration::from_secs(5));
22 insns.disable()?;
23
24 println!("instructions in last five seconds: {}", insns.read()?);
25
26 Ok(())
27}More examples
1fn main() -> std::io::Result<()> {
2 use perf_event::events::Hardware;
3 use perf_event::{Builder, Group};
4
5 let mut group = Group::new()?;
6 let cycles = Builder::new()
7 .group(&mut group)
8 .kind(Hardware::CPU_CYCLES)
9 .build()?;
10 let insns = Builder::new()
11 .group(&mut group)
12 .kind(Hardware::INSTRUCTIONS)
13 .build()?;
14
15 let vec = (0..=51).collect::<Vec<_>>();
16
17 group.enable()?;
18 println!("{:?}", vec);
19 group.disable()?;
20
21 let counts = group.read()?;
22 println!(
23 "cycles / instructions: {} / {} ({:.2} cpi)",
24 counts[&cycles],
25 counts[&insns],
26 (counts[&cycles] as f64 / counts[&insns] as f64)
27 );
28
29 Ok(())
30}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26
27 // Note that if you add more counters than you actually have hardware for,
28 // the kernel will time-slice them, which means you may get no coverage for
29 // short measurements. See the documentation.
30
31 let vec = (0..=51).collect::<Vec<_>>();
32
33 group.enable()?;
34 println!("{:?}", vec);
35 group.disable()?;
36
37 let counts = group.read()?;
38 println!(
39 "L1D cache misses/references: {} / {} ({:.0}%)",
40 counts[&miss_counter],
41 counts[&access_counter],
42 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
43 );
44
45 println!(
46 "branch prediction misses/total: {} / {} ({:.0}%)",
47 counts[&missed_branches],
48 counts[&branches],
49 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
50 );
51
52 // You can iterate over a `Counts` value:
53 for (id, value) in &counts {
54 println!("Counter id {} has value {}", id, value);
55 }
56
57 Ok(())
58}84fn measure(label: &str, task: impl FnOnce()) {
85 use perf_event::events::{Cache, CacheOp, CacheResult, WhichCache};
86 use perf_event::{Builder, Group};
87
88 let mut group = Group::new().expect("creating group is ok");
89 let read_counter = Builder::new()
90 .group(&mut group)
91 .kind(Cache {
92 which: WhichCache::L1D,
93 operation: CacheOp::READ,
94 result: CacheResult::ACCESS,
95 })
96 .build()
97 .expect("building read_counter is ok");
98 let read_miss_counter = Builder::new()
99 .group(&mut group)
100 .kind(Cache {
101 which: WhichCache::L1D,
102 operation: CacheOp::READ,
103 result: CacheResult::MISS,
104 })
105 .build()
106 .expect("building read_miss_counter is ok");
107 let prefetch_counter = Builder::new()
108 .group(&mut group)
109 .kind(Cache {
110 which: WhichCache::L1D,
111 operation: CacheOp::PREFETCH,
112 result: CacheResult::ACCESS,
113 })
114 .build()
115 .expect("building prefetch_counter is ok");
116
117 group.enable().expect("enabling group is ok");
118 task();
119 group.disable().expect("disabling group is ok");
120
121 let counts = group.read().expect("reading group is ok");
122 let reads = counts[&read_counter];
123 let read_misses = counts[&read_miss_counter];
124 let read_hits = reads - read_misses;
125 let prefetches = counts[&prefetch_counter];
126
127 println!(
128 "{label}: hits / reads: {read_hits:8} / {reads:8} {:6.2}%, \
129 prefetched {prefetches:8}",
130 (read_hits as f64 / reads as f64) * 100.0,
131 );
132
133 if counts.time_enabled() != counts.time_running() {
134 println!(
135 "time enabled: {} time running: {}",
136 counts.time_enabled(),
137 counts.time_running(),
138 );
139 }
140}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26 let insns = Builder::new()
27 .group(&mut group)
28 .kind(Hardware::INSTRUCTIONS)
29 .build()?;
30 let cycles = Builder::new()
31 .group(&mut group)
32 .kind(Hardware::CPU_CYCLES)
33 .build()?;
34
35 // Note that if you add more counters than you actually have hardware for,
36 // the kernel will time-slice them, which means you may get no coverage for
37 // short measurements. See the documentation.
38 //
39 // On my machine, this program won't collect any data unless I disable the
40 // NMI watchdog, as described in the documentation for `Group`. My machine
41 // has four counters, and this program tries to use all of them, but the NMI
42 // watchdog uses one up.
43
44 let mut vec = (0..=100000).collect::<Vec<_>>();
45
46 group.enable()?;
47 vec.sort();
48 println!("{:?}", &vec[0..10]);
49 group.disable()?;
50
51 let counts = group.read()?;
52
53 println!(
54 "enabled for {}ns, actually running for {}ns",
55 counts.time_enabled(),
56 counts.time_running()
57 );
58
59 if counts.time_running() == 0 {
60 println!("Group was never running; no results available.");
61 return Ok(());
62 }
63
64 if counts.time_running() < counts.time_enabled() {
65 println!("Counts cover only a portion of the execution.");
66 }
67
68 println!(
69 "L1D cache misses/references: {} / {} ({:.0}%)",
70 counts[&miss_counter],
71 counts[&access_counter],
72 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
73 );
74
75 println!(
76 "branch prediction misses/total: {} / {} ({:.0}%)",
77 counts[&missed_branches],
78 counts[&branches],
79 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
80 );
81
82 println!(
83 "{} instructions, {} cycles ({:.2} cpi)",
84 counts[&insns],
85 counts[&cycles],
86 counts[&cycles] as f64 / counts[&insns] as f64
87 );
88
89 // You can iterate over a `Counts` value:
90 for (id, value) in &counts {
91 println!("Counter id {} has value {}", id, value);
92 }
93
94 Ok(())
95}Sourcepub fn build(self) -> Result<Counter>
pub fn build(self) -> Result<Counter>
Construct a Counter according to the specifications made on this
Builder.
A freshly built Counter is disabled. To begin counting events, you
must call enable on the Counter or the Group to which it belongs.
If the Builder requests features that the running kernel does not
support, it returns Err(e) where e.kind() == ErrorKind::Other and
e.raw_os_error() == Some(libc::E2BIG).
Unfortunately, problems in counter configuration are detected at this
point, by the kernel, not earlier when the offending request is made on
the Builder. The kernel’s returned errors are not always helpful.
Examples found in repository?
3fn main() -> std::io::Result<()> {
4 let mut counter = Builder::new().build()?;
5
6 let vec = (0..=51).collect::<Vec<_>>();
7
8 counter.enable()?;
9 println!("{:?}", vec);
10 counter.disable()?;
11
12 println!("{} instructions retired", counter.read()?);
13
14 Ok(())
15}More examples
7fn main() -> std::io::Result<()> {
8 let pid: pid_t = std::env::args()
9 .nth(1)
10 .expect("Usage: insns-for-pid PID")
11 .parse()
12 .expect("Usage: insns-for-pid PID");
13
14 let mut insns = Builder::new()
15 .observe_pid(pid)
16 .kind(Hardware::BRANCH_INSTRUCTIONS)
17 .build()?;
18
19 // Count instructions in PID for five seconds.
20 insns.enable()?;
21 sleep(Duration::from_secs(5));
22 insns.disable()?;
23
24 println!("instructions in last five seconds: {}", insns.read()?);
25
26 Ok(())
27}1fn main() -> std::io::Result<()> {
2 use perf_event::events::Hardware;
3 use perf_event::{Builder, Group};
4
5 let mut group = Group::new()?;
6 let cycles = Builder::new()
7 .group(&mut group)
8 .kind(Hardware::CPU_CYCLES)
9 .build()?;
10 let insns = Builder::new()
11 .group(&mut group)
12 .kind(Hardware::INSTRUCTIONS)
13 .build()?;
14
15 let vec = (0..=51).collect::<Vec<_>>();
16
17 group.enable()?;
18 println!("{:?}", vec);
19 group.disable()?;
20
21 let counts = group.read()?;
22 println!(
23 "cycles / instructions: {} / {} ({:.2} cpi)",
24 counts[&cycles],
25 counts[&insns],
26 (counts[&cycles] as f64 / counts[&insns] as f64)
27 );
28
29 Ok(())
30}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26
27 // Note that if you add more counters than you actually have hardware for,
28 // the kernel will time-slice them, which means you may get no coverage for
29 // short measurements. See the documentation.
30
31 let vec = (0..=51).collect::<Vec<_>>();
32
33 group.enable()?;
34 println!("{:?}", vec);
35 group.disable()?;
36
37 let counts = group.read()?;
38 println!(
39 "L1D cache misses/references: {} / {} ({:.0}%)",
40 counts[&miss_counter],
41 counts[&access_counter],
42 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
43 );
44
45 println!(
46 "branch prediction misses/total: {} / {} ({:.0}%)",
47 counts[&missed_branches],
48 counts[&branches],
49 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
50 );
51
52 // You can iterate over a `Counts` value:
53 for (id, value) in &counts {
54 println!("Counter id {} has value {}", id, value);
55 }
56
57 Ok(())
58}84fn measure(label: &str, task: impl FnOnce()) {
85 use perf_event::events::{Cache, CacheOp, CacheResult, WhichCache};
86 use perf_event::{Builder, Group};
87
88 let mut group = Group::new().expect("creating group is ok");
89 let read_counter = Builder::new()
90 .group(&mut group)
91 .kind(Cache {
92 which: WhichCache::L1D,
93 operation: CacheOp::READ,
94 result: CacheResult::ACCESS,
95 })
96 .build()
97 .expect("building read_counter is ok");
98 let read_miss_counter = Builder::new()
99 .group(&mut group)
100 .kind(Cache {
101 which: WhichCache::L1D,
102 operation: CacheOp::READ,
103 result: CacheResult::MISS,
104 })
105 .build()
106 .expect("building read_miss_counter is ok");
107 let prefetch_counter = Builder::new()
108 .group(&mut group)
109 .kind(Cache {
110 which: WhichCache::L1D,
111 operation: CacheOp::PREFETCH,
112 result: CacheResult::ACCESS,
113 })
114 .build()
115 .expect("building prefetch_counter is ok");
116
117 group.enable().expect("enabling group is ok");
118 task();
119 group.disable().expect("disabling group is ok");
120
121 let counts = group.read().expect("reading group is ok");
122 let reads = counts[&read_counter];
123 let read_misses = counts[&read_miss_counter];
124 let read_hits = reads - read_misses;
125 let prefetches = counts[&prefetch_counter];
126
127 println!(
128 "{label}: hits / reads: {read_hits:8} / {reads:8} {:6.2}%, \
129 prefetched {prefetches:8}",
130 (read_hits as f64 / reads as f64) * 100.0,
131 );
132
133 if counts.time_enabled() != counts.time_running() {
134 println!(
135 "time enabled: {} time running: {}",
136 counts.time_enabled(),
137 counts.time_running(),
138 );
139 }
140}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26 let insns = Builder::new()
27 .group(&mut group)
28 .kind(Hardware::INSTRUCTIONS)
29 .build()?;
30 let cycles = Builder::new()
31 .group(&mut group)
32 .kind(Hardware::CPU_CYCLES)
33 .build()?;
34
35 // Note that if you add more counters than you actually have hardware for,
36 // the kernel will time-slice them, which means you may get no coverage for
37 // short measurements. See the documentation.
38 //
39 // On my machine, this program won't collect any data unless I disable the
40 // NMI watchdog, as described in the documentation for `Group`. My machine
41 // has four counters, and this program tries to use all of them, but the NMI
42 // watchdog uses one up.
43
44 let mut vec = (0..=100000).collect::<Vec<_>>();
45
46 group.enable()?;
47 vec.sort();
48 println!("{:?}", &vec[0..10]);
49 group.disable()?;
50
51 let counts = group.read()?;
52
53 println!(
54 "enabled for {}ns, actually running for {}ns",
55 counts.time_enabled(),
56 counts.time_running()
57 );
58
59 if counts.time_running() == 0 {
60 println!("Group was never running; no results available.");
61 return Ok(());
62 }
63
64 if counts.time_running() < counts.time_enabled() {
65 println!("Counts cover only a portion of the execution.");
66 }
67
68 println!(
69 "L1D cache misses/references: {} / {} ({:.0}%)",
70 counts[&miss_counter],
71 counts[&access_counter],
72 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
73 );
74
75 println!(
76 "branch prediction misses/total: {} / {} ({:.0}%)",
77 counts[&missed_branches],
78 counts[&branches],
79 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
80 );
81
82 println!(
83 "{} instructions, {} cycles ({:.2} cpi)",
84 counts[&insns],
85 counts[&cycles],
86 counts[&cycles] as f64 / counts[&insns] as f64
87 );
88
89 // You can iterate over a `Counts` value:
90 for (id, value) in &counts {
91 println!("Counter id {} has value {}", id, value);
92 }
93
94 Ok(())
95}Source§impl<'a> Builder<'a>
impl<'a> Builder<'a>
Sourcepub fn attrs(&self) -> &perf_event_attr
pub fn attrs(&self) -> &perf_event_attr
Directly access the perf_event_attr within this builder.
Sourcepub fn attrs_mut(&mut self) -> &mut perf_event_attr
pub fn attrs_mut(&mut self) -> &mut perf_event_attr
Directly access the perf_event_attr within this builder.
Sourcepub fn observe_self(self) -> Builder<'a>
pub fn observe_self(self) -> Builder<'a>
Observe the calling process. (This is the default.)
Sourcepub fn observe_pid(self, pid: pid_t) -> Builder<'a>
pub fn observe_pid(self, pid: pid_t) -> Builder<'a>
Observe the process with the given process id. This requires
CAP_SYS_PTRACE capabilities.
Examples found in repository?
7fn main() -> std::io::Result<()> {
8 let pid: pid_t = std::env::args()
9 .nth(1)
10 .expect("Usage: insns-for-pid PID")
11 .parse()
12 .expect("Usage: insns-for-pid PID");
13
14 let mut insns = Builder::new()
15 .observe_pid(pid)
16 .kind(Hardware::BRANCH_INSTRUCTIONS)
17 .build()?;
18
19 // Count instructions in PID for five seconds.
20 insns.enable()?;
21 sleep(Duration::from_secs(5));
22 insns.disable()?;
23
24 println!("instructions in last five seconds: {}", insns.read()?);
25
26 Ok(())
27}Sourcepub fn any_pid(self) -> Builder<'a>
pub fn any_pid(self) -> Builder<'a>
Observe all processes.
Linux does not support observing all processes on all CPUs without
restriction, so combining any_pid with any_cpu will cause the
final build to return an error. This must be used together with
one_cpu, to select a specific CPU to observe.
This requires CAP_PERFMON or CAP_SYS_ADMIN
capabilities, or a /proc/sys/kernel/perf_event_paranoid value of less
than 1.
Sourcepub fn observe_cgroup(self, cgroup: &'a File) -> Builder<'a>
pub fn observe_cgroup(self, cgroup: &'a File) -> Builder<'a>
Observe code running in the given cgroup (container). The
cgroup argument should be a File referring to the cgroup’s directory
in the cgroupfs filesystem.
Sourcepub fn one_cpu(self, cpu: usize) -> Builder<'a>
pub fn one_cpu(self, cpu: usize) -> Builder<'a>
Observe only code running on the given CPU core.
Sourcepub fn any_cpu(self) -> Builder<'a>
pub fn any_cpu(self) -> Builder<'a>
Observe code running on any CPU core. (This is the default.)
Linux does not support observing all processes on all CPUs without
restriction, so combining any_cpu with any_pid will cause
build to return an error. This must be used with observe_self
(the default), observe_pid, or observe_cgroup.
Sourcepub fn group(self, group: &'a mut Group) -> Builder<'a>
pub fn group(self, group: &'a mut Group) -> Builder<'a>
Place the counter in the given Group. Groups allow a set of counters
to be enabled, disabled, or read as a single atomic operation, so that
the counts can be usefully compared.
Examples found in repository?
1fn main() -> std::io::Result<()> {
2 use perf_event::events::Hardware;
3 use perf_event::{Builder, Group};
4
5 let mut group = Group::new()?;
6 let cycles = Builder::new()
7 .group(&mut group)
8 .kind(Hardware::CPU_CYCLES)
9 .build()?;
10 let insns = Builder::new()
11 .group(&mut group)
12 .kind(Hardware::INSTRUCTIONS)
13 .build()?;
14
15 let vec = (0..=51).collect::<Vec<_>>();
16
17 group.enable()?;
18 println!("{:?}", vec);
19 group.disable()?;
20
21 let counts = group.read()?;
22 println!(
23 "cycles / instructions: {} / {} ({:.2} cpi)",
24 counts[&cycles],
25 counts[&insns],
26 (counts[&cycles] as f64 / counts[&insns] as f64)
27 );
28
29 Ok(())
30}More examples
4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26
27 // Note that if you add more counters than you actually have hardware for,
28 // the kernel will time-slice them, which means you may get no coverage for
29 // short measurements. See the documentation.
30
31 let vec = (0..=51).collect::<Vec<_>>();
32
33 group.enable()?;
34 println!("{:?}", vec);
35 group.disable()?;
36
37 let counts = group.read()?;
38 println!(
39 "L1D cache misses/references: {} / {} ({:.0}%)",
40 counts[&miss_counter],
41 counts[&access_counter],
42 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
43 );
44
45 println!(
46 "branch prediction misses/total: {} / {} ({:.0}%)",
47 counts[&missed_branches],
48 counts[&branches],
49 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
50 );
51
52 // You can iterate over a `Counts` value:
53 for (id, value) in &counts {
54 println!("Counter id {} has value {}", id, value);
55 }
56
57 Ok(())
58}84fn measure(label: &str, task: impl FnOnce()) {
85 use perf_event::events::{Cache, CacheOp, CacheResult, WhichCache};
86 use perf_event::{Builder, Group};
87
88 let mut group = Group::new().expect("creating group is ok");
89 let read_counter = Builder::new()
90 .group(&mut group)
91 .kind(Cache {
92 which: WhichCache::L1D,
93 operation: CacheOp::READ,
94 result: CacheResult::ACCESS,
95 })
96 .build()
97 .expect("building read_counter is ok");
98 let read_miss_counter = Builder::new()
99 .group(&mut group)
100 .kind(Cache {
101 which: WhichCache::L1D,
102 operation: CacheOp::READ,
103 result: CacheResult::MISS,
104 })
105 .build()
106 .expect("building read_miss_counter is ok");
107 let prefetch_counter = Builder::new()
108 .group(&mut group)
109 .kind(Cache {
110 which: WhichCache::L1D,
111 operation: CacheOp::PREFETCH,
112 result: CacheResult::ACCESS,
113 })
114 .build()
115 .expect("building prefetch_counter is ok");
116
117 group.enable().expect("enabling group is ok");
118 task();
119 group.disable().expect("disabling group is ok");
120
121 let counts = group.read().expect("reading group is ok");
122 let reads = counts[&read_counter];
123 let read_misses = counts[&read_miss_counter];
124 let read_hits = reads - read_misses;
125 let prefetches = counts[&prefetch_counter];
126
127 println!(
128 "{label}: hits / reads: {read_hits:8} / {reads:8} {:6.2}%, \
129 prefetched {prefetches:8}",
130 (read_hits as f64 / reads as f64) * 100.0,
131 );
132
133 if counts.time_enabled() != counts.time_running() {
134 println!(
135 "time enabled: {} time running: {}",
136 counts.time_enabled(),
137 counts.time_running(),
138 );
139 }
140}4fn main() -> std::io::Result<()> {
5 const ACCESS: Cache = Cache {
6 which: WhichCache::L1D,
7 operation: CacheOp::READ,
8 result: CacheResult::ACCESS,
9 };
10 const MISS: Cache = Cache {
11 result: CacheResult::MISS,
12 ..ACCESS
13 };
14
15 let mut group = Group::new()?;
16 let access_counter = Builder::new().group(&mut group).kind(ACCESS).build()?;
17 let miss_counter = Builder::new().group(&mut group).kind(MISS).build()?;
18 let branches = Builder::new()
19 .group(&mut group)
20 .kind(Hardware::BRANCH_INSTRUCTIONS)
21 .build()?;
22 let missed_branches = Builder::new()
23 .group(&mut group)
24 .kind(Hardware::BRANCH_MISSES)
25 .build()?;
26 let insns = Builder::new()
27 .group(&mut group)
28 .kind(Hardware::INSTRUCTIONS)
29 .build()?;
30 let cycles = Builder::new()
31 .group(&mut group)
32 .kind(Hardware::CPU_CYCLES)
33 .build()?;
34
35 // Note that if you add more counters than you actually have hardware for,
36 // the kernel will time-slice them, which means you may get no coverage for
37 // short measurements. See the documentation.
38 //
39 // On my machine, this program won't collect any data unless I disable the
40 // NMI watchdog, as described in the documentation for `Group`. My machine
41 // has four counters, and this program tries to use all of them, but the NMI
42 // watchdog uses one up.
43
44 let mut vec = (0..=100000).collect::<Vec<_>>();
45
46 group.enable()?;
47 vec.sort();
48 println!("{:?}", &vec[0..10]);
49 group.disable()?;
50
51 let counts = group.read()?;
52
53 println!(
54 "enabled for {}ns, actually running for {}ns",
55 counts.time_enabled(),
56 counts.time_running()
57 );
58
59 if counts.time_running() == 0 {
60 println!("Group was never running; no results available.");
61 return Ok(());
62 }
63
64 if counts.time_running() < counts.time_enabled() {
65 println!("Counts cover only a portion of the execution.");
66 }
67
68 println!(
69 "L1D cache misses/references: {} / {} ({:.0}%)",
70 counts[&miss_counter],
71 counts[&access_counter],
72 (counts[&miss_counter] as f64 / counts[&access_counter] as f64) * 100.0
73 );
74
75 println!(
76 "branch prediction misses/total: {} / {} ({:.0}%)",
77 counts[&missed_branches],
78 counts[&branches],
79 (counts[&missed_branches] as f64 / counts[&branches] as f64) * 100.0
80 );
81
82 println!(
83 "{} instructions, {} cycles ({:.2} cpi)",
84 counts[&insns],
85 counts[&cycles],
86 counts[&cycles] as f64 / counts[&insns] as f64
87 );
88
89 // You can iterate over a `Counts` value:
90 for (id, value) in &counts {
91 println!("Counter id {} has value {}", id, value);
92 }
93
94 Ok(())
95}Sourcepub fn read_format(&mut self, read_format: ReadFormat) -> &mut Self
pub fn read_format(&mut self, read_format: ReadFormat) -> &mut Self
Set the fields to include when reading from the counter.
Note that this method is not additive, unlike sample.
The implementation of this library will silently mask out certain flags
if they would be invalid. For example, we will not allow you to set
ReadFormat::GROUP when building a single counter.
Source§impl<'a> Builder<'a>
impl<'a> Builder<'a>
Sourcepub fn enabled(&mut self, enabled: bool) -> &mut Self
pub fn enabled(&mut self, enabled: bool) -> &mut Self
Whether this counter should start off enabled.
When this is set, the counter will immediately start being recorded as soon as it is created.
By default, this is false.
Sourcepub fn inherit(&mut self, inherit: bool) -> &mut Self
pub fn inherit(&mut self, inherit: bool) -> &mut Self
Set whether this counter is inherited by new threads.
When this flag is set, this counter observes activity in new threads created by any thread already being observed.
By default, the flag is unset: counters are not inherited, and observe only the threads specified when they are created.
This flag cannot be set if the counter belongs to a Group. Doing so
will result in an error when the counter is built. This is a kernel
limitation.
Sourcepub fn pinned(&mut self, pinned: bool) -> &mut Self
pub fn pinned(&mut self, pinned: bool) -> &mut Self
Set whether the counter is pinned to the PMU.
If this flag is set, the kernel will attempt to keep the counter on
always on the CPU if at all possible. If it fails to do so, the counter
will enter an error state where reading it will always return EOF. For
this crate, that would result in Counter::read returning an error
with kind ErrorKind::UnexpectedEof.
This option only applies to hardware counters and group leaders. At this time this crate provides no way to configure group leaders so this option will only work when the resulting counter is not in a group.
This is false by default.
Sourcepub fn exclusive(&mut self, exclusive: bool) -> &mut Self
pub fn exclusive(&mut self, exclusive: bool) -> &mut Self
Controls whether the counter or group can be scheduled onto a CPU alongside other counters or groups.
This is false by default.
Sourcepub fn exclude_user(&mut self, exclude_user: bool) -> &mut Self
pub fn exclude_user(&mut self, exclude_user: bool) -> &mut Self
Whether we should exclude events that occur in user space.
This is false by default.
Sourcepub fn exclude_kernel(&mut self, exclude_kernel: bool) -> &mut Self
pub fn exclude_kernel(&mut self, exclude_kernel: bool) -> &mut Self
Whether we should exclude events that occur in kernel space.
Note that setting this to false may result in permission errors if
the current perf_event_paranoid value is greater than 1.
This is true by default.
Sourcepub fn include_kernel(&mut self) -> &mut Self
pub fn include_kernel(&mut self) -> &mut Self
Include kernel code.
See exclude_kernel.
Sourcepub fn exclude_hv(&mut self, exclude_hv: bool) -> &mut Self
pub fn exclude_hv(&mut self, exclude_hv: bool) -> &mut Self
Whether we should exclude events that happen in the hypervisor.
This is not supported on all architectures as it required built-in support within the CPU itself.
Note that setting this to false may result in permission errors if
the current perf_event_paranoid value is greater than 1.
This is true by default
Sourcepub fn include_hv(&mut self) -> &mut Self
pub fn include_hv(&mut self) -> &mut Self
Include hypervisor code.
See exclude_hv.
Sourcepub fn exclude_idle(&mut self, exclude_idle: bool) -> &mut Self
pub fn exclude_idle(&mut self, exclude_idle: bool) -> &mut Self
Whether to exclude events that occur when running the idle task.
Note that this only has an effect for software events.
Sourcepub fn mmap(&mut self, mmap: bool) -> &mut Self
pub fn mmap(&mut self, mmap: bool) -> &mut Self
Enable the generation of MMAP records for executable memory maps.
MMAP records are emitted when the process/thread that is being observed creates a new executable memory mapping.
Sourcepub fn comm(&mut self, comm: bool) -> &mut Self
pub fn comm(&mut self, comm: bool) -> &mut Self
Enable the tracking of process command name changes.
This can happen when a process calls execve(2), prctl(PR_SET_NAME),
or writes to /proc/self/comm.
If you also set the comm_exec flag, then the
kernel will indicate which of these process name changes were due to
calls to execve(2).
Sourcepub fn sample_period(&mut self, period: u64) -> &mut Self
pub fn sample_period(&mut self, period: u64) -> &mut Self
Set the period at which the kernel will generate sample events.
As an example, if the event is Hardware::INSTRUCTIONS and period
is 100_000 then every 100_000 instructions the kernel will generate an
event.
Note that the actual precision at which the sample corresponds to the
instant and location at which Nth event occurred is controlled by the
precise_ip option.
This setting is mutually exclusive with sample_frequency.
Sourcepub fn sample_frequency(&mut self, frequency: u64) -> &mut Self
pub fn sample_frequency(&mut self, frequency: u64) -> &mut Self
Set the frequency at which the kernel will generate sample events (in Hz).
Note that this is not guaranteed to be exact. The kernel will adjust the period to attempt to keep the desired frequency but the rate at which events occur varies drastically then samples may not occur at the specified frequency.
The amount to which samples correspond to the instant and location at
which an event occurred is controlled by the precise_ip option.
This setting is mutually exclusive with sample_period.
Sourcepub fn inherit_stat(&mut self, inherit_stat: bool) -> &mut Self
pub fn inherit_stat(&mut self, inherit_stat: bool) -> &mut Self
Save event counts on context switch for inherited tasks.
This option is only meaningful if inherit is also enabled.
Sourcepub fn enable_on_exec(&mut self, enable_on_exec: bool) -> &mut Self
pub fn enable_on_exec(&mut self, enable_on_exec: bool) -> &mut Self
Enable the counter automatically after a call to execve(2).
Sourcepub fn task(&mut self, task: bool) -> &mut Self
pub fn task(&mut self, task: bool) -> &mut Self
If set, then the kernel will generate fork and exit records.
Sourcepub fn wakeup_watermark(&mut self, watermark: usize) -> &mut Self
pub fn wakeup_watermark(&mut self, watermark: usize) -> &mut Self
Set how many bytes will be written before the kernel sends an overflow notification.
This controls how much data will be emitted before
Sampler::next_blocking will wake up once blocked.
This setting is mutually exclusive with wakeup_events.
Sourcepub fn wakeup_events(&mut self, events: usize) -> &mut Self
pub fn wakeup_events(&mut self, events: usize) -> &mut Self
Set how many samples will be written before the kernel sends an overflow notification.
This controls how much data will be emitted before
Sampler::next_blocking will wake up once blocked. Note that only
sample records (PERF_RECORD_SAMPLE) count towards the event count.
Some caveats apply, see the manpage for the full documentation.
This method is mutually exclusive with wakeup_watermark.
Sourcepub fn precise_ip(&mut self, skid: SampleSkid) -> &mut Self
pub fn precise_ip(&mut self, skid: SampleSkid) -> &mut Self
Control how much skid is permitted when recording events.
Skid is the number of instructions that occur between an event occuring and a sample being gathered by the kernel. Less skid is better but there are hardware limitations around how small the skid can be.
Also see SampleSkid.
Sourcepub fn mmap_data(&mut self, mmap_data: bool) -> &mut Self
pub fn mmap_data(&mut self, mmap_data: bool) -> &mut Self
Enable the generation of MMAP records for non-executable memory maps.
This is the data counterpart of mmap.
Sourcepub fn sample_id_all(&mut self, sample_id_all: bool) -> &mut Self
pub fn sample_id_all(&mut self, sample_id_all: bool) -> &mut Self
If enabled, then a subset of the sample fields will additionally be
included in most non-PERF_RECORD_SAMPLE samples.
See the manpage for the exact fields that are included and which records include the trailer.
Sourcepub fn exclude_host(&mut self, exclude_host: bool) -> &mut Self
pub fn exclude_host(&mut self, exclude_host: bool) -> &mut Self
Only collect measurements for events occurring inside a VM instance.
This is only meaningful when profiling from outside the VM instance.
See the manpage for more documentation.
Sourcepub fn exclude_guest(&mut self, exclude_guest: bool) -> &mut Self
pub fn exclude_guest(&mut self, exclude_guest: bool) -> &mut Self
Don’t collect measurements for events occurring inside a VM instance.
This is only meaningful when profiling from outside the VM instance.
See the manpage for more documentation.
Sourcepub fn exclude_callchain_kernel(&mut self, exclude_kernel: bool) -> &mut Self
pub fn exclude_callchain_kernel(&mut self, exclude_kernel: bool) -> &mut Self
Do not include stack frames in the kernel when gathering callchains as a part of recording a sample.
Sourcepub fn exclude_callchain_user(&mut self, exclude_user: bool) -> &mut Self
pub fn exclude_callchain_user(&mut self, exclude_user: bool) -> &mut Self
Do not include stack frames from userspace when gathering a callchain as a part of recording a sample.
Sourcepub fn mmap2(&mut self, mmap2: bool) -> &mut Self
pub fn mmap2(&mut self, mmap2: bool) -> &mut Self
Generate an extended executable mmap record.
This record has enough info to uniquely identify which instance of a
shared map it corresponds to. Note that you also need to set the mmap
option for this to work.
Sourcepub fn comm_exec(&mut self, comm_exec: bool) -> &mut Self
pub fn comm_exec(&mut self, comm_exec: bool) -> &mut Self
Check whether the kernel will annotate COMM records with the COMM_EXEC
bit when they occur due to an execve(2) call.
This option doesn’t actually change the behaviour of the kernel. Instead, it is useful for feature detection.
Sourcepub fn clockid(&mut self, clockid: impl Into<Option<Clock>>) -> &mut Self
pub fn clockid(&mut self, clockid: impl Into<Option<Clock>>) -> &mut Self
Select which linux clock to use for timestamps.
If clockid is None then the kernel will use an internal timer. This
timer may not be any of the options for clockid.
See Clock and the clock_getttime(2) manpage for
documentation on what the different clock values mean.
Sourcepub fn context_switch(&mut self, context_switch: bool) -> &mut Self
pub fn context_switch(&mut self, context_switch: bool) -> &mut Self
Generate SWITCH records when a context switch occurs.
Also enables the generation of SWITCH_CPU_WIDE records if profiling
in cpu-wide mode.
Sourcepub fn namespaces(&mut self, namespaces: bool) -> &mut Self
pub fn namespaces(&mut self, namespaces: bool) -> &mut Self
Generate NAMESPACES records when a task enters a new namespace.
Sourcepub fn ksymbol(&mut self, ksymbol: bool) -> &mut Self
pub fn ksymbol(&mut self, ksymbol: bool) -> &mut Self
Generate KSYMBOL records when kernel symbols are registered or
unregistered.
Sourcepub fn bpf_event(&mut self, bpf_event: bool) -> &mut Self
pub fn bpf_event(&mut self, bpf_event: bool) -> &mut Self
Generate BPF_EVENT records when eBPF programs are loaded or unloaded.
Sourcepub fn aux_output(&mut self, aux_output: bool) -> &mut Self
pub fn aux_output(&mut self, aux_output: bool) -> &mut Self
Output data for non-aux events to the aux buffer, if supported by the hardware.
Sourcepub fn cgroup(&mut self, cgroup: bool) -> &mut Self
pub fn cgroup(&mut self, cgroup: bool) -> &mut Self
Generate CGROUP records when a new cgroup is created.
Sourcepub fn text_poke(&mut self, text_poke: bool) -> &mut Self
pub fn text_poke(&mut self, text_poke: bool) -> &mut Self
Generate TEXT_POKE records when the kernel text (i.e. code) is
modified.
Sourcepub fn build_id(&mut self, build_id: bool) -> &mut Self
pub fn build_id(&mut self, build_id: bool) -> &mut Self
Whether to include the build id in MMAP2 events.
Sourcepub fn inherit_thread(&mut self, inherit_thread: bool) -> &mut Self
pub fn inherit_thread(&mut self, inherit_thread: bool) -> &mut Self
Only inherit the counter to new threads in the same process, not to other processes.
Sourcepub fn remove_on_exec(&mut self, remove_on_exec: bool) -> &mut Self
pub fn remove_on_exec(&mut self, remove_on_exec: bool) -> &mut Self
Disable this counter when it successfully calls execve(2).
Sourcepub fn sigtrap(&mut self, sigtrap: bool) -> &mut Self
pub fn sigtrap(&mut self, sigtrap: bool) -> &mut Self
Synchronously send SIGTRAP to the process that created the counter
when the sampled events overflow.
Sourcepub fn sig_data(&mut self, sig_data: u64) -> &mut Self
pub fn sig_data(&mut self, sig_data: u64) -> &mut Self
Copy data to the user’s signal handler (via si_perf in siginfo_t).
This can be used to figure out which event caused the signal to be sent.
It does nothing unless sigtrap is also set to true.
Sourcepub fn branch_sample_type(&mut self, flags: SampleBranchFlag) -> &mut Self
pub fn branch_sample_type(&mut self, flags: SampleBranchFlag) -> &mut Self
Specify which branches to include in the branch record.
This does nothing unless [SampleFlag::BRANCH_STACK] is specified in
the sample flags.
Sourcepub fn sample_regs_user(&mut self, regs: u64) -> &mut Self
pub fn sample_regs_user(&mut self, regs: u64) -> &mut Self
Specify which CPU registers to dump in a sample.
This does nothing unless [SampleFlag::REGS_USER] is part of the
specified sample flags.
The actual layout of the register mask is architecture specific.
You will generally want the PERF_REG_<arch> constants in
perf_event_open_sys. (e.g. PERF_REG_X86_SP).
Sourcepub fn sample_regs_intr(&mut self, regs: u64) -> &mut Self
pub fn sample_regs_intr(&mut self, regs: u64) -> &mut Self
Specify which CPU registers to dump in a sample.
This does nothing unless [SampleFlag::REGS_INTR] is part of the
specified sample flags.
The actual layout of the register mask is architecture specific.
You will generally want the PERF_REG_<arch> constants in
perf_event_open_sys. (e.g. PERF_REG_X86_SP).
Sourcepub fn sample_stack_user(&mut self, stack: u32) -> &mut Self
pub fn sample_stack_user(&mut self, stack: u32) -> &mut Self
Specify the maximum size of the user stack to dump.
This option does nothing unless [SampleFlag::STACK_USER] is set in the
sample flags.
Note that the size of the array allocated within the sample record will
always be exactly this size, even if the actual collected stack data is
much smaller. The allocated sample buffer (when constructing a
Sampler) will need to be large enough to accommodate the chosen
stack size or else samples will be lost.
Sourcepub fn sample_max_stack(&mut self, max_stack: u16) -> &mut Self
pub fn sample_max_stack(&mut self, max_stack: u16) -> &mut Self
Specify the maximum number of stack frames to include when unwinding the user stack.
This does nothing unless [SampleFlag::CALLCHAIN] is set in the sample
flags.
Note that the kernel has a user configurable limit specified at
/proc/sys/kernel/perf_event_max_stack. Setting sample_max_stack to
larger than that limit will result in an EOVERFLOW error when building
the counter.
Sourcepub fn aux_watermark(&mut self, watermark: u32) -> &mut Self
pub fn aux_watermark(&mut self, watermark: u32) -> &mut Self
Specify how much data is required before the kernel emits an AUX record.
Sourcepub fn aux_sample_size(&mut self, sample_size: u32) -> &mut Self
pub fn aux_sample_size(&mut self, sample_size: u32) -> &mut Self
Specify the desired size of AUX data.
This does nothing unless [SampleFlag::AUX] is set in the sample flags.
Note that the emitted aux data can be smaller than the requested size.