use btoi::btoi;
use memchr::arch::all::is_equal;
#[derive(Copy, Clone, Debug)]
pub enum LogLevel {
Emergent = 0,
Alert = 1,
Crit = 2,
Err = 3,
Warn = 4,
Notice = 5,
Info = 6,
Debug = 7,
}
impl LogLevel {
pub fn as_u8(self) -> u8 {
self as u8
}
pub fn as_bytes(self) -> &'static [u8] {
match self {
Self::Emergent => b"emerg",
Self::Alert => b"alert",
Self::Crit => b"crit",
Self::Err => b"error",
Self::Warn => b"warn",
Self::Notice => b"notice",
Self::Info => b"info",
Self::Debug => b"debug",
}
}
}
impl From<u8> for LogLevel {
fn from(level: u8) -> Self {
let level = level.clamp(Self::Emergent.as_u8(), Self::Debug.as_u8());
if level == Self::Emergent.as_u8() {
Self::Emergent
} else if level == Self::Alert.as_u8() {
Self::Alert
} else if level == Self::Crit.as_u8() {
Self::Crit
} else if level == Self::Err.as_u8() {
Self::Err
} else if level == Self::Warn.as_u8() {
Self::Warn
} else if level == Self::Notice.as_u8() {
Self::Notice
} else if level == Self::Info.as_u8() {
Self::Info
} else {
Self::Debug
}
}
}
impl From<i64> for LogLevel {
#[expect(clippy::cast_possible_truncation)]
#[expect(clippy::cast_sign_loss)]
fn from(level: i64) -> Self {
(level.clamp(Self::Emergent.as_u8().into(), Self::Debug.as_u8().into()) as u8).into()
}
}
pub fn parse_loglevel(level: &[u8], default: LogLevel) -> LogLevel {
let level = level.trim_ascii();
if level.is_empty() {
default
} else if let Ok(level) = btoi::<i64>(level) {
level.into()
} else if is_equal(level, b"emerg") {
LogLevel::Emergent
} else if is_equal(level, b"alert") {
LogLevel::Alert
} else if is_equal(level, b"crit") {
LogLevel::Crit
} else if is_equal(level, b"error") {
LogLevel::Err
} else if is_equal(level, b"warn") {
LogLevel::Warn
} else if is_equal(level, b"notice") {
LogLevel::Notice
} else if is_equal(level, b"info") {
LogLevel::Info
} else if is_equal(level, b"debug") {
LogLevel::Debug
} else {
default
}
}
pub const SYSLOG_ACTION_CLOSE: libc::c_int = 0;
pub const SYSLOG_ACTION_OPEN: libc::c_int = 1;
pub const SYSLOG_ACTION_READ: libc::c_int = 2;
pub const SYSLOG_ACTION_READ_ALL: libc::c_int = 3;
pub const SYSLOG_ACTION_READ_CLEAR: libc::c_int = 4;
pub const SYSLOG_ACTION_CLEAR: libc::c_int = 5;
pub const SYSLOG_ACTION_CONSOLE_OFF: libc::c_int = 6;
pub const SYSLOG_ACTION_CONSOLE_ON: libc::c_int = 7;
pub const SYSLOG_ACTION_CONSOLE_LEVEL: libc::c_int = 8;
pub const SYSLOG_ACTION_SIZE_UNREAD: libc::c_int = 9;
pub const SYSLOG_ACTION_SIZE_BUFFER: libc::c_int = 10;
#[cfg(feature = "log")]
mod syslog_enabled {
use std::{
io::{BufWriter, Write},
mem::MaybeUninit,
os::fd::{AsRawFd, BorrowedFd},
sync::{
atomic::{AtomicBool, AtomicU8, Ordering},
OnceLock, RwLock,
},
};
use nix::{
errno::Errno,
time::{clock_gettime, ClockId},
};
use ringbuf::{
storage::{Array, Heap},
traits::*,
wrap::caching::Caching,
Arc, SharedRb,
};
use crate::{config::SYSLOG_STACK_SIZE, log::LockedWriter, syslog::*};
enum RbMode {
Heap {
shared: Arc<SharedRb<Heap<u8>>>,
prod: Caching<Arc<SharedRb<Heap<u8>>>, true, false>,
cons: Caching<Arc<SharedRb<Heap<u8>>>, false, true>,
},
Static {
shared: Arc<SharedRb<Array<u8, SYSLOG_STACK_SIZE>>>,
prod: Caching<Arc<SharedRb<Array<u8, SYSLOG_STACK_SIZE>>>, true, false>,
cons: Caching<Arc<SharedRb<Array<u8, SYSLOG_STACK_SIZE>>>, false, true>,
},
}
struct RingData {
mode: RbMode,
}
pub struct Syslog {
ring_lock: RwLock<Option<RingData>>,
locked: AtomicBool,
fd: Option<BorrowedFd<'static>>,
level: AtomicU8,
}
unsafe impl Sync for Syslog {}
impl Syslog {
pub fn new(
capacity: usize,
fd: Option<std::os::fd::RawFd>,
level: LogLevel,
use_stack: bool,
) -> Self {
let ring_data = if use_stack {
let shared = Arc::new(SharedRb::<Array<u8, SYSLOG_STACK_SIZE>>::default());
let prod = Caching::new(Arc::clone(&shared));
let cons = Caching::new(Arc::clone(&shared));
Some(RingData {
mode: RbMode::Static { shared, prod, cons },
})
} else {
let shared = Arc::new(SharedRb::<Heap<u8>>::new(capacity));
let prod = Caching::new(Arc::clone(&shared));
let cons = Caching::new(Arc::clone(&shared));
Some(RingData {
mode: RbMode::Heap { shared, prod, cons },
})
};
Syslog {
ring_lock: RwLock::new(ring_data),
locked: AtomicBool::new(false),
fd: fd.map(|fd| {
unsafe { BorrowedFd::borrow_raw(fd) }
}),
level: AtomicU8::new(level as u8),
}
}
pub fn write_log(&self, level: LogLevel, msg: &str, msg_pretty: Option<&str>) {
if level.as_u8() > self.loglevel() {
return;
}
if let Some(fd) = self.fd {
if fd.as_raw_fd() >= 0 {
let msg = msg_pretty.unwrap_or(msg);
if let Ok(mut writer) = LockedWriter::new(fd).map(BufWriter::new) {
let _ = writer.write_all(msg.as_bytes());
let _ = writer.write_all(b"\n");
}
}
}
if self.is_locked() {
return;
}
let ring_str = self.format_ring_message(level, msg_pretty.unwrap_or(msg));
{
let mut guard = self
.ring_lock
.write()
.unwrap_or_else(|err| err.into_inner());
if self.locked.load(Ordering::SeqCst) {
} else if let Some(ring_data) = guard.as_mut() {
match &mut ring_data.mode {
RbMode::Heap { prod, .. } => {
let _ = prod.push_slice(ring_str.as_bytes());
}
RbMode::Static { prod, .. } => {
let _ = prod.push_slice(ring_str.as_bytes());
}
}
}
}
}
#[expect(clippy::type_complexity)]
pub fn syslog(
&self,
action: libc::c_int,
len: usize,
) -> Result<(usize, Option<Vec<u8>>), Errno> {
if self.is_locked() {
return Err(Errno::EPERM);
}
match action {
SYSLOG_ACTION_CLOSE | SYSLOG_ACTION_OPEN => Ok((0, None)),
SYSLOG_ACTION_READ => {
if len == 0 {
return Ok((0, None));
}
let mut write_guard = self.ring_lock.try_write().or(Err(Errno::EINTR))?;
let ring_data = match write_guard.as_mut() {
None => {
return Ok((0, None));
}
Some(ring_data) => ring_data,
};
Ok(self.read_and_consume(ring_data, len))
}
SYSLOG_ACTION_READ_ALL => {
if len == 0 {
return Ok((0, None));
}
let read_guard = self.ring_lock.try_read().or(Err(Errno::EINTR))?;
let ring_data = match read_guard.as_ref() {
None => {
return Ok((0, None));
}
Some(ring_data) => ring_data,
};
Ok(self.read_all_no_consume(ring_data, len))
}
SYSLOG_ACTION_READ_CLEAR => {
if len == 0 {
return Ok((0, None));
}
let mut write_guard = self.ring_lock.try_write().or(Err(Errno::EINTR))?;
let ring_data = match write_guard.as_mut() {
None => {
return Ok((0, None));
}
Some(ring_data) => ring_data,
};
let (count, data_vec) = self.read_all_no_consume_mut(ring_data, len);
if count > 0 {
self.pop_count(ring_data, count);
}
Ok((count, data_vec))
}
SYSLOG_ACTION_CLEAR => {
let mut write_guard = self.ring_lock.try_write().or(Err(Errno::EINTR))?;
let ring_data = match write_guard.as_mut() {
None => {
return Ok((0, None));
}
Some(ring_data) => ring_data,
};
self.skip_all(ring_data);
Ok((0, None))
}
SYSLOG_ACTION_CONSOLE_OFF => {
self.set_loglevel(LogLevel::Emergent.as_u8());
Ok((0, None))
}
SYSLOG_ACTION_CONSOLE_ON => {
self.set_loglevel(LogLevel::Warn.as_u8());
Ok((0, None))
}
SYSLOG_ACTION_CONSOLE_LEVEL => {
let level: u8 = len.try_into().or(Err(Errno::EINVAL))?;
self.set_loglevel(level);
Ok((0, None))
}
SYSLOG_ACTION_SIZE_UNREAD => {
let read_guard = self.ring_lock.try_read().or(Err(Errno::EINTR))?;
let ring_data = match read_guard.as_ref() {
None => {
return Ok((0, None));
}
Some(ring_data) => ring_data,
};
let unread = self.ring_unread(ring_data);
Ok((unread, None))
}
SYSLOG_ACTION_SIZE_BUFFER => {
let read_guard = self.ring_lock.try_read().or(Err(Errno::EINTR))?;
let ring_data = match read_guard.as_ref() {
None => {
return Ok((0, None));
}
Some(ring_data) => ring_data,
};
let cap = self.ring_capacity(ring_data);
Ok((cap, None))
}
_ => {
Err(Errno::EINVAL)
}
}
}
pub fn loglevel(&self) -> u8 {
self.level.load(Ordering::SeqCst)
}
pub fn set_loglevel(&self, level: u8) {
let lv = level.clamp(LogLevel::Emergent.as_u8(), LogLevel::Debug.as_u8());
self.level.store(lv, Ordering::SeqCst);
}
pub fn is_locked(&self) -> bool {
self.locked.load(Ordering::SeqCst)
}
pub fn lock(&self) -> bool {
if self
.locked
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.is_ok()
{
let mut guard = self
.ring_lock
.write()
.unwrap_or_else(|err| err.into_inner());
*guard = None; true
} else {
false
}
}
fn format_ring_message(&self, level: LogLevel, msg: &str) -> String {
#[expect(clippy::cast_precision_loss)]
let now = match clock_gettime(ClockId::CLOCK_BOOTTIME) {
Ok(ts) => ts.tv_sec() as f64 + (ts.tv_nsec() as f64 / 1_000_000_000.0),
Err(_) => 0.0,
};
format!("<{}>[{:12.6}] {}\n", level.as_u8(), now, msg)
}
fn read_and_consume(
&self,
ring_data: &mut RingData,
len: usize,
) -> (usize, Option<Vec<u8>>) {
let (count, out) = self.peek_and_copy(ring_data, len);
if count == 0 {
return (0, None);
}
self.pop_count(ring_data, count);
(count, Some(out))
}
fn read_all_no_consume(
&self,
ring_data: &RingData,
len: usize,
) -> (usize, Option<Vec<u8>>) {
let (count, out) = self.peek_and_copy(ring_data, len);
if count == 0 {
(0, None)
} else {
(count, Some(out))
}
}
fn read_all_no_consume_mut(
&self,
ring_data: &mut RingData,
len: usize,
) -> (usize, Option<Vec<u8>>) {
let (count, out) = self.peek_and_copy_mut(ring_data, len);
if count == 0 {
(0, None)
} else {
(count, Some(out))
}
}
fn skip_all(&self, ring_data: &mut RingData) {
match &mut ring_data.mode {
RbMode::Heap { cons, .. } => {
let to_skip = cons.occupied_len();
if to_skip > 0 {
let mut scratch = vec![MaybeUninit::<u8>::uninit(); to_skip];
let _ = cons.pop_slice_uninit(&mut scratch);
}
}
RbMode::Static { cons, .. } => {
let to_skip = cons.occupied_len();
if to_skip > 0 {
let mut scratch = vec![MaybeUninit::<u8>::uninit(); to_skip];
let _ = cons.pop_slice_uninit(&mut scratch);
}
}
}
}
fn pop_count(&self, ring_data: &mut RingData, count: usize) {
if count == 0 {
return;
}
match &mut ring_data.mode {
RbMode::Heap { cons, .. } => {
let mut scratch = vec![MaybeUninit::<u8>::uninit(); count];
let _ = cons.pop_slice_uninit(&mut scratch);
}
RbMode::Static { cons, .. } => {
let mut scratch = vec![MaybeUninit::<u8>::uninit(); count];
let _ = cons.pop_slice_uninit(&mut scratch);
}
}
}
fn peek_and_copy(&self, ring_data: &RingData, len: usize) -> (usize, Vec<u8>) {
match &ring_data.mode {
RbMode::Heap { cons, .. } => {
let rlen = cons.occupied_len().min(len);
if rlen == 0 {
return (0, Vec::new());
}
let mut tmp = vec![MaybeUninit::<u8>::uninit(); rlen];
let actual = cons.peek_slice_uninit(&mut tmp);
let mut out = Vec::with_capacity(actual);
for item in tmp.iter().take(actual) {
out.push(unsafe { item.assume_init() });
}
(actual, out)
}
RbMode::Static { cons, .. } => {
let rlen = cons.occupied_len().min(len);
if rlen == 0 {
return (0, Vec::new());
}
let mut tmp = vec![MaybeUninit::<u8>::uninit(); rlen];
let actual = cons.peek_slice_uninit(&mut tmp);
let mut out = Vec::with_capacity(actual);
for item in tmp.iter().take(actual) {
out.push(unsafe { item.assume_init() });
}
(actual, out)
}
}
}
fn peek_and_copy_mut(&self, ring_data: &mut RingData, len: usize) -> (usize, Vec<u8>) {
match &mut ring_data.mode {
RbMode::Heap { cons, .. } => {
let rlen = cons.occupied_len().min(len);
if rlen == 0 {
return (0, Vec::new());
}
let mut tmp = vec![MaybeUninit::<u8>::uninit(); rlen];
let actual = cons.peek_slice_uninit(&mut tmp);
let mut out = Vec::with_capacity(actual);
for item in tmp.iter().take(actual) {
out.push(unsafe { item.assume_init() });
}
(actual, out)
}
RbMode::Static { cons, .. } => {
let rlen = cons.occupied_len().min(len);
if rlen == 0 {
return (0, Vec::new());
}
let mut tmp = vec![MaybeUninit::<u8>::uninit(); rlen];
let actual = cons.peek_slice_uninit(&mut tmp);
let mut out = Vec::with_capacity(actual);
for item in tmp.iter().take(actual) {
out.push(unsafe { item.assume_init() });
}
(actual, out)
}
}
}
fn ring_unread(&self, ring_data: &RingData) -> usize {
match &ring_data.mode {
RbMode::Heap { cons, .. } => cons.occupied_len(),
RbMode::Static { cons, .. } => cons.occupied_len(),
}
}
fn ring_capacity(&self, ring_data: &RingData) -> usize {
match &ring_data.mode {
RbMode::Heap { shared, .. } => shared.capacity().get(),
RbMode::Static { shared, .. } => shared.capacity().get(),
}
}
}
pub static SYSLOG_INSTANCE: OnceLock<Syslog> = OnceLock::new();
pub fn init_global_syslog(
capacity: usize,
fd: Option<std::os::fd::RawFd>,
level: LogLevel,
use_stack: bool,
) -> Result<(), Errno> {
if !use_stack && capacity == 0 {
return Err(Errno::EINVAL);
}
SYSLOG_INSTANCE
.set(Syslog::new(capacity, fd, level, use_stack))
.or(Err(Errno::EAGAIN))
}
pub fn global_syslog() -> Option<&'static Syslog> {
SYSLOG_INSTANCE.get()
}
}
#[cfg(feature = "log")]
pub use syslog_enabled::*;
#[cfg(feature = "log")]
#[inline(always)]
pub fn current_loglevel() -> u8 {
global_syslog().map(|sys| sys.loglevel()).unwrap_or(4)
}
#[cfg(not(feature = "log"))]
#[inline(always)]
pub fn current_loglevel() -> u8 {
use std::{os::unix::ffi::OsStrExt, sync::LazyLock};
static LOGLEVEL: LazyLock<u8> = LazyLock::new(|| {
std::env::var_os(crate::config::ENV_LOG)
.map(|val| parse_loglevel(val.as_os_str().as_bytes(), LogLevel::Warn))
.unwrap_or(LogLevel::Warn)
.as_u8()
});
*LOGLEVEL
}
#[macro_export]
macro_rules! log_enabled {
($level:expr) => {
$crate::syslog::current_loglevel() >= $level.as_u8()
};
}