use core::ffi;
use core::fmt;
use core::fmt::Write;
use core::ops;
use core::pin;
use core::ptr;
use core::str;
use core::sync::atomic;
use eventheader_types::EventHeader;
use tracepoint::EventDataDescriptor;
use crate::Level;
use crate::_internal;
#[allow(unused_imports)] #[cfg(feature = "macros")]
use crate::define_provider;
#[allow(unused_imports)] #[cfg(feature = "macros")]
use crate::write_event;
pub struct Provider<'a> {
name: &'a [u8],
options: &'a [u8],
events: ops::Range<*mut *const EventHeaderTracepoint<'a>>,
busy: atomic::AtomicBool,
}
impl<'a> Provider<'a> {
pub fn name(&self) -> &str {
return str::from_utf8(self.name).unwrap();
}
pub fn options(&self) -> &str {
return str::from_utf8(self.options).unwrap();
}
pub fn unregister(&self) -> u32 {
let mut result = 0;
let was_busy = self.busy.swap(true, atomic::Ordering::Relaxed);
if !was_busy {
let events_slice = unsafe {
&*ptr::slice_from_raw_parts(
self.events.start,
self.events.end.offset_from(self.events.start) as usize,
)
};
for &event_ptr in events_slice {
if event_ptr.is_null() {
break;
}
let event = unsafe { &*event_ptr };
let err = event.state.unregister();
if result == 0 {
result = err;
}
}
self.busy.swap(false, atomic::Ordering::Relaxed);
}
return result as u32;
}
pub unsafe fn register(&self) -> u32 {
return self.register_impl();
}
fn register_impl(&self) -> u32 {
let mut result = 0;
let was_busy = self.busy.swap(true, atomic::Ordering::Acquire);
if was_busy {
panic!("provider.register called simultaneously with another call to register or unregister.");
}
if self.events.start < self.events.end {
let events_slice = unsafe {
&mut *ptr::slice_from_raw_parts_mut(
self.events.start,
self.events.end.offset_from(self.events.start) as usize,
)
};
events_slice.sort_unstable_by(|a, b| b.cmp(a));
let end_pos = events_slice.len();
let mut good_pos = 0;
while good_pos != end_pos - 1 {
if events_slice[good_pos] == events_slice[good_pos + 1] {
let mut next_pos = good_pos + 2;
while next_pos != end_pos {
if events_slice[good_pos] != events_slice[next_pos] {
good_pos += 1;
events_slice[good_pos] = events_slice[next_pos];
}
next_pos += 1;
}
break;
}
good_pos += 1;
}
let mut next_pos = good_pos + 1;
while next_pos != end_pos {
events_slice[next_pos] = ptr::null();
next_pos += 1;
}
let mut command_string = CommandString::new();
for &mut event_ptr in events_slice {
if event_ptr.is_null() {
break;
}
let event = unsafe { &*event_ptr };
let name_args = command_string.format(
self.name,
self.options,
event.header.level,
event.keyword,
);
let err = unsafe { pin::Pin::new_unchecked(&event.state).register(name_args) };
if result == 0 {
result = err;
}
}
}
self.busy.swap(false, atomic::Ordering::Release);
return result as u32;
}
}
unsafe impl Sync for Provider<'_> {}
impl Drop for Provider<'_> {
fn drop(&mut self) {
self.unregister();
}
}
impl fmt::Debug for Provider<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
return write!(
f,
"Provider {{ name: \"{}\", options: \"{}\" }}",
self.name(),
self.options(),
);
}
}
pub const unsafe fn provider_new<'a>(
name: &'a [u8],
options: &'a [u8],
events_start: *const usize,
events_stop: *const usize,
) -> Provider<'a> {
return Provider {
name,
options,
events: ops::Range {
start: events_start as *mut *const EventHeaderTracepoint,
end: events_stop as *mut *const EventHeaderTracepoint,
},
busy: atomic::AtomicBool::new(false),
};
}
pub struct EventHeaderTracepoint<'a> {
state: _internal::TracepointState,
header: EventHeader,
keyword: u64,
metadata: &'a [u8],
}
impl<'a> EventHeaderTracepoint<'a> {
pub const fn new(header: EventHeader, keyword: u64, metadata: &'a [u8]) -> Self {
return Self {
state: _internal::TracepointState::new(0),
header,
keyword,
metadata,
};
}
#[inline(always)]
pub fn enabled(&self) -> bool {
return self.state.enabled();
}
pub fn write_eventheader<'b>(
&self,
activity_id: Option<&[u8; 16]>,
related_id: Option<&[u8; 16]>,
data: &mut [EventDataDescriptor<'b>],
) -> i32
where
'a: 'b,
{
debug_assert!(data[1].is_empty());
data[1] = EventDataDescriptor::<'a>::from_bytes(self.metadata);
return _internal::write_eventheader(
&self.state,
&self.header,
activity_id,
related_id,
self.metadata.len() as u16,
data,
);
}
}
struct CommandStringBuffer {
buf: [u8; _internal::EVENTHEADER_COMMAND_MAX],
pos: usize,
}
impl CommandStringBuffer {
fn write(&mut self, bytes: &[u8]) {
self.buf[self.pos..self.pos + bytes.len()].copy_from_slice(bytes);
self.pos += bytes.len();
}
}
impl Write for CommandStringBuffer {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write(s.as_bytes());
return fmt::Result::Ok(());
}
}
pub struct CommandString(CommandStringBuffer);
#[allow(clippy::new_without_default)]
impl CommandString {
pub const fn new() -> Self {
return Self(CommandStringBuffer {
buf: [0; _internal::EVENTHEADER_COMMAND_MAX],
pos: 0,
});
}
pub fn format(
&mut self,
provider_name: &[u8],
provider_options: &[u8],
level: Level,
keyword: u64,
) -> &ffi::CStr {
self.0.pos = 0;
self.0.write(provider_name); write!(self.0, "_L{:x}K{:x}", level.as_int(), keyword).unwrap(); self.0.write(provider_options); write!(self.0, " {}", _internal::EVENTHEADER_COMMAND_TYPES).unwrap(); self.0.buf[self.0.pos] = b'\0';
self.0.pos += 1;
return ffi::CStr::from_bytes_with_nul(&self.0.buf[0..self.0.pos]).unwrap();
}
}