Skip to main content

perf_event/
counter.rs

1use std::fs::File;
2use std::io::{self, Read};
3use std::os::fd::{AsRawFd, IntoRawFd, RawFd};
4
5use crate::sys;
6use crate::SliceAsBytesMut;
7use crate::{check_errno_syscall, CountAndTime};
8
9/// A counter for one kind of kernel or hardware event.
10///
11/// A `Counter` represents a single performance monitoring counter. You select
12/// what sort of event you'd like to count when the `Counter` is created, then
13/// you can enable and disable the counter, call its [`read`] method to
14/// retrieve the current count, and reset it to zero.
15///
16/// A `Counter`'s value is always a `u64`.
17///
18/// For example, this counts the number of instructions retired (completed)
19/// during a call to `println!`.
20///
21///     use perf_event::Builder;
22///
23///     fn main() -> std::io::Result<()> {
24///         let mut counter = Builder::new().build()?;
25///
26///         let vec = (0..=51).collect::<Vec<_>>();
27///
28///         counter.enable()?;
29///         println!("{:?}", vec);
30///         counter.disable()?;
31///
32///         println!("{} instructions retired", counter.read()?);
33///
34///         Ok(())
35///     }
36///
37/// It is often useful to count several different quantities over the same
38/// period of time. For example, if you want to measure the average number of
39/// clock cycles used per instruction, you must count both clock cycles and
40/// instructions retired, for the same range of execution. The [`Group`] type
41/// lets you enable, disable, read, and reset any number of counters
42/// simultaneously.
43///
44/// When a counter is dropped, its kernel resources are freed along with it.
45///
46/// Internally, a `Counter` is just a wrapper around an event file descriptor.
47///
48/// [`read`]: Counter::read
49pub struct Counter {
50    /// The file descriptor for this counter, returned by `perf_event_open`.
51    ///
52    /// When a `Counter` is dropped, this `File` is dropped, and the kernel
53    /// removes the counter from any group it belongs to.
54    file: File,
55
56    /// The unique id assigned to this counter by the kernel.
57    id: u64,
58}
59
60impl Counter {
61    pub(crate) fn new(file: File, id: u64) -> Self {
62        Self { file, id }
63    }
64
65    /// Return this counter's kernel-assigned unique id.
66    ///
67    /// This can be useful when iterating over [`Counts`].
68    ///
69    /// [`Counts`]: struct.Counts.html
70    pub fn id(&self) -> u64 {
71        self.id
72    }
73
74    /// Allow this `Counter` to begin counting its designated event.
75    ///
76    /// This does not affect whatever value the `Counter` had previously; new
77    /// events add to the current count. To clear a `Counter`, use the
78    /// [`reset`] method.
79    ///
80    /// Note that `Group` also has an [`enable`] method, which enables all
81    /// its member `Counter`s as a single atomic operation.
82    ///
83    /// [`reset`]: #method.reset
84    /// [`enable`]: struct.Group.html#method.enable
85    pub fn enable(&mut self) -> io::Result<()> {
86        check_errno_syscall(|| unsafe { sys::ioctls::ENABLE(self.file.as_raw_fd(), 0) }).map(|_| ())
87    }
88
89    /// Make this `Counter` stop counting its designated event. Its count is
90    /// unaffected.
91    ///
92    /// Note that `Group` also has a [`disable`] method, which disables all
93    /// its member `Counter`s as a single atomic operation.
94    ///
95    /// [`disable`]: struct.Group.html#method.disable
96    pub fn disable(&mut self) -> io::Result<()> {
97        check_errno_syscall(|| unsafe { sys::ioctls::DISABLE(self.file.as_raw_fd(), 0) })
98            .map(|_| ())
99    }
100
101    /// Reset the value of this `Counter` to zero.
102    ///
103    /// Note that `Group` also has a [`reset`] method, which resets all
104    /// its member `Counter`s as a single atomic operation.
105    ///
106    /// [`reset`]: struct.Group.html#method.reset
107    pub fn reset(&mut self) -> io::Result<()> {
108        check_errno_syscall(|| unsafe { sys::ioctls::RESET(self.file.as_raw_fd(), 0) }).map(|_| ())
109    }
110
111    /// Return this `Counter`'s current value as a `u64`.
112    ///
113    /// Consider using the [`read_count_and_time`] method instead of this one. Some
114    /// counters are implemented in hardware, and the processor can support only
115    /// a certain number running at a time. If more counters are requested than
116    /// the hardware can support, the kernel timeshares them on the hardware.
117    /// This method gives you no indication whether this has happened;
118    /// `read_count_and_time` does.
119    ///
120    /// Note that `Group` also has a [`read`] method, which reads all
121    /// its member `Counter`s' values at once.
122    ///
123    /// [`read`]: Group::read
124    /// [`read_count_and_time`]: Counter::read_count_and_time
125    pub fn read(&mut self) -> io::Result<u64> {
126        Ok(self.read_count_and_time()?.count)
127    }
128
129    /// Return this `Counter`'s current value and timesharing data.
130    ///
131    /// Some counters are implemented in hardware, and the processor can run
132    /// only a fixed number of them at a time. If more counters are requested
133    /// than the hardware can support, the kernel timeshares them on the
134    /// hardware.
135    ///
136    /// This method returns a [`CountAndTime`] struct, whose `count` field holds
137    /// the counter's value, and whose `time_enabled` and `time_running` fields
138    /// indicate how long you had enabled the counter, and how long the counter
139    /// was actually scheduled on the processor. This lets you detect whether
140    /// the counter was timeshared, and adjust your use accordingly. Times
141    /// are reported in nanoseconds.
142    ///
143    ///     # use perf_event::Builder;
144    ///     # fn main() -> std::io::Result<()> {
145    ///     # let mut counter = Builder::new().build()?;
146    ///     let cat = counter.read_count_and_time()?;
147    ///     if cat.time_running == 0 {
148    ///         println!("No data collected.");
149    ///     } else if cat.time_running < cat.time_enabled {
150    ///         // Note: this way of scaling is accurate, but `u128` division
151    ///         // is usually implemented in software, which may be slow.
152    ///         println!("{} instructions (estimated)",
153    ///                  (cat.count as u128 *
154    ///                   cat.time_enabled as u128 / cat.time_running as u128) as u64);
155    ///     } else {
156    ///         println!("{} instructions", cat.count);
157    ///     }
158    ///     # Ok(()) }
159    ///
160    /// Note that `Group` also has a [`read`] method, which reads all
161    /// its member `Counter`s' values at once.
162    ///
163    /// [`read`]: Group::read
164    pub fn read_count_and_time(&mut self) -> io::Result<CountAndTime> {
165        let mut buf = [0_u64; 3];
166        self.file.read_exact(u64::slice_as_bytes_mut(&mut buf))?;
167
168        let cat = CountAndTime {
169            count: buf[0],
170            time_enabled: buf[1],
171            time_running: buf[2],
172        };
173
174        // Does the kernel ever return nonsense?
175        assert!(cat.time_running <= cat.time_enabled);
176
177        Ok(cat)
178    }
179}
180
181impl std::fmt::Debug for Counter {
182    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
183        write!(
184            fmt,
185            "Counter {{ fd: {}, id: {} }}",
186            self.file.as_raw_fd(),
187            self.id
188        )
189    }
190}
191
192impl AsRawFd for Counter {
193    fn as_raw_fd(&self) -> RawFd {
194        self.file.as_raw_fd()
195    }
196}
197
198impl IntoRawFd for Counter {
199    fn into_raw_fd(self) -> RawFd {
200        self.file.into_raw_fd()
201    }
202}