use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering::SeqCst;
use errno::errno;
use libc::*;
use crate::Error;
use crate::ffi::*;
use self::Event::*;
pub struct Map {
header: AtomicPtr<perf_event_mmap_page>,
size: usize,
base: *mut u8,
extra: [u8; 256],
}
impl Map {
pub fn new(fd: c_int, pages: usize) -> Result<Self, Error> {
unsafe {
let pagesize = pagesize();
let alloc = pagesize * (pages + 1);
let header = map(alloc, fd)? as *mut u8;
Ok(Map {
header: mem::transmute(header),
size: pagesize * pages,
base: header.offset(pagesize as isize),
extra: [0u8; 256],
})
}
}
pub fn events<T>(&mut self) -> Events<T> {
unsafe {
let header = &mut *self.header.load(SeqCst);
let head = header.data_head as isize;
let tail = header.data_tail as isize;
let size = self.size as isize;
Events {
header: &mut self.header,
head: head as *mut u8,
base: self.base,
next: self.base.offset(tail % size),
end: self.base.offset(head % size),
limit: self.base.offset(size),
extra: self.extra.as_mut_ptr(),
marker: PhantomData,
}
}
}
}
pub struct Events<'m, T> {
header: &'m mut AtomicPtr<perf_event_mmap_page>,
head: *mut u8,
base: *mut u8,
next: *mut u8,
end: *mut u8,
limit: *mut u8,
extra: *mut u8,
marker: PhantomData<fn() -> &'m T>,
}
#[derive(Debug)]
pub enum Event<'e, T> {
Event(&'e T),
Lost(&'e perf_record_lost),
Comm(&'e perf_record_comm),
Other(&'e perf_record_sample),
}
impl<'m, T> Events<'m, T> {
pub fn next(&mut self) -> Option<Event<T>> {
unsafe {
if self.next == self.end {
return None;
}
let mut event = &*(self.next as *const perf_record_sample);
let size = event.header.size as isize;
let next = self.next.offset(size);
let limit = self.limit;
self.next = if next > limit {
let len = limit.offset(-(self.next as isize)) as isize;
let (part0, len0) = (self.extra, len);
let (part1, len1) = (self.extra.offset(len), size - len);
ptr::copy_nonoverlapping(self.next, part0, len0 as usize);
ptr::copy_nonoverlapping(self.base, part1, len1 as usize);
event = &*(self.extra as *const perf_record_sample);
self.base.offset(len1)
} else if next == limit {
self.base
} else {
next
};
match event.header.type_ {
PERF_RECORD_SAMPLE => Some(Event(event.event())),
PERF_RECORD_LOST => Some(Lost(event.lost())),
PERF_RECORD_COMM => Some(Comm(event.comm())),
_ => Some(Other(event)),
}
}
}
}
impl<'m, T> Drop for Events<'m, T> {
fn drop(&mut self) {
unsafe {
let header = self.header.load(SeqCst);
(*header).data_tail = self.head as u64;
}
}
}
unsafe fn map(size: usize, fd: c_int) -> Result<*mut c_void, Error> {
let prot = PROT_READ | PROT_WRITE;
let flags = MAP_SHARED;
match mmap(ptr::null_mut(), size as size_t, prot, flags, fd, 0) {
ptr if ptr == MAP_FAILED => Err(errno())?,
ptr => Ok(ptr),
}
}
unsafe fn pagesize() -> usize {
sysconf(_SC_PAGESIZE) as usize
}
impl perf_record_sample {
unsafe fn comm(&self) -> &perf_record_comm {
&*(self as *const _ as *const perf_record_comm)
}
unsafe fn lost(&self) -> &perf_record_lost {
&*(self as *const _ as *const perf_record_lost)
}
unsafe fn event<T>(&self) -> &T {
assert!(mem::size_of::<T>() <= self.size as usize);
&*(self.data.as_ptr() as *const T)
}
}