#[cfg(any(feature = "cloudwatch", feature = "file", feature = "http"))]
use once_cell::sync::Lazy;
use std::env;
use std::error;
#[cfg(feature = "cloudwatch")]
use once_cell::sync::OnceCell;
#[cfg(feature = "cloudwatch")]
use std::collections::HashMap;
#[cfg(feature = "cloudwatch")]
use std::sync::Arc;
#[cfg(feature = "file")]
use std::sync::atomic::{AtomicU64, Ordering};
#[cfg(feature = "cloudwatch")]
use tokio::sync::RwLock;
#[cfg(feature = "cloudwatch")]
use aws_config::{Region, SdkConfig};
#[cfg(feature = "cloudwatch")]
use aws_sdk_cloudwatchlogs::config::{Credentials, SharedCredentialsProvider};
#[cfg(feature = "cloudwatch")]
use aws_sdk_cloudwatchlogs::operation::put_log_events::PutLogEventsError;
#[cfg(feature = "cloudwatch")]
use aws_sdk_cloudwatchlogs::{Client as CloudWatchLogsClient, types::InputLogEvent};
#[cfg(any(feature = "cloudwatch", feature = "file"))]
use chrono::Utc;
use colored::{ColoredString, Colorize};
use env_logger::Builder;
use log::Level;
#[cfg(feature = "cloudwatch")]
use tokio::time::{Duration, sleep};
#[cfg(feature = "cloudwatch")]
static GROUP_EXISTS_CACHE: Lazy<RwLock<HashMap<String, bool>>> =
Lazy::new(|| RwLock::new(HashMap::new()));
#[cfg(feature = "cloudwatch")]
static STREAM_EXISTS_CACHE: Lazy<RwLock<HashMap<String, bool>>> =
Lazy::new(|| RwLock::new(HashMap::new()));
#[cfg(feature = "cloudwatch")]
static NEXT_SEQUENCE_TOKENS: Lazy<RwLock<HashMap<String, Option<String>>>> =
Lazy::new(|| RwLock::new(HashMap::new()));
#[cfg(feature = "cloudwatch")]
static LOG_TO_CLOUDWATCH: Lazy<bool> = Lazy::new(|| {
env::var("LOG_TO_CLOUDWATCH")
.map(|val| val.to_lowercase() == "true")
.unwrap_or(false)
});
#[cfg(feature = "cloudwatch")]
pub fn is_log_to_cloudwatch_enabled() -> bool {
*LOG_TO_CLOUDWATCH
}
#[cfg(not(feature = "cloudwatch"))]
pub fn is_log_to_cloudwatch_enabled() -> bool {
false
}
#[cfg(feature = "file")]
static LOG_TO_FILE: Lazy<bool> = Lazy::new(|| {
env::var("LOG_TO_FILE")
.map(|val| val.to_lowercase() == "true")
.unwrap_or(false)
});
#[cfg(feature = "file")]
static LOG_FILE_DIR: Lazy<String> =
Lazy::new(|| env::var("LOG_FILE_DIR").unwrap_or_else(|_| "logs".to_string()));
#[cfg(feature = "file")]
pub fn is_log_to_file_enabled() -> bool {
*LOG_TO_FILE
}
#[cfg(not(feature = "file"))]
pub fn is_log_to_file_enabled() -> bool {
false
}
#[cfg(feature = "http")]
static LOG_TO_HTTP: Lazy<bool> = Lazy::new(|| {
env::var("LOG_TO_HTTP")
.map(|val| val.to_lowercase() == "true")
.unwrap_or(false)
});
#[cfg(feature = "http")]
pub fn is_log_to_http_enabled() -> bool {
*LOG_TO_HTTP
}
#[cfg(not(feature = "http"))]
pub fn is_log_to_http_enabled() -> bool {
false
}
#[cfg(feature = "file")]
pub fn log_file_dir() -> &'static str {
LOG_FILE_DIR.as_str()
}
pub fn is_log_location_enabled() -> bool {
env::var("LOG_SHOW_LOCATION")
.map(|val| val.to_lowercase() == "true")
.unwrap_or(false)
}
#[cfg(feature = "cloudwatch")]
static BATCH_SIZE: Lazy<usize> = Lazy::new(|| {
env::var("LOG_BATCH_SIZE")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(10)
});
#[cfg(feature = "cloudwatch")]
static BATCH_TIMEOUT: Lazy<std::time::Duration> = Lazy::new(|| {
env::var("BATCH_TIMEOUT")
.ok()
.and_then(|s| s.parse::<u64>().ok())
.map(std::time::Duration::from_secs)
.unwrap_or(std::time::Duration::from_secs(5))
});
#[cfg(feature = "file")]
static LOG_RETENTION_DAYS: Lazy<u64> = Lazy::new(|| {
env::var("LOG_RETENTION_DAYS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(30)
});
#[cfg(feature = "file")]
static LOG_RETENTION_SIZE_MB: Lazy<u64> = Lazy::new(|| {
env::var("LOG_RETENTION_SIZE_MB")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(512)
});
#[cfg(feature = "file")]
static LOG_DELETE_BATCH_MB: Lazy<u64> = Lazy::new(|| {
env::var("LOG_DELETE_BATCH_MB")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(100)
});
#[cfg(feature = "file")]
static FILE_WRITE_COUNT: AtomicU64 = AtomicU64::new(0);
#[cfg(feature = "cloudwatch")]
struct BatchLogItem {
group: String,
stream: String,
event: InputLogEvent,
}
#[cfg(feature = "cloudwatch")]
static LOG_BATCH_SENDER: Lazy<tokio::sync::mpsc::Sender<BatchLogItem>> = Lazy::new(|| {
let (tx, rx) = tokio::sync::mpsc::channel::<BatchLogItem>(1000);
tokio::spawn(async move {
process_log_batches(rx).await;
});
tx
});
#[cfg(feature = "cloudwatch")]
static GLOBAL_CLIENT: Lazy<Arc<CloudWatchLogsClient>> = Lazy::new(|| {
let region_str = env::var("CLOUDWATCH_AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
let region = Region::new(region_str);
let access_key =
env::var("CLOUDWATCH_AWS_ACCESS_KEY").unwrap_or_else(|_| "MISSING_KEY".to_string());
let secret_key =
env::var("CLOUDWATCH_AWS_SECRET_KEY").unwrap_or_else(|_| "MISSING_SECRET".to_string());
let credentials = Credentials::new(access_key, secret_key, None, None, "default");
let creds_provider = SharedCredentialsProvider::new(credentials);
let config = SdkConfig::builder()
.region(region)
.credentials_provider(creds_provider)
.build();
Arc::new(CloudWatchLogsClient::new(&config))
});
#[cfg(feature = "cloudwatch")]
static CREDENTIAL_CHECK: OnceCell<Result<(), Error>> = OnceCell::new();
#[cfg(feature = "cloudwatch")]
async fn verify_cloudwatch_credentials(
client: &CloudWatchLogsClient,
group: &str,
) -> Result<(), Error> {
match client
.describe_log_groups()
.log_group_name_prefix(group)
.limit(1)
.send()
.await
{
Ok(_) => Ok(()),
Err(e) => {
let msg = format!("{e:?}");
if msg.contains("AccessDenied") {
log::error!("CloudWatch access denied — check IAM permissions for CloudWatch Logs");
Err(Error::InvalidCredentials)
} else {
log::error!("Failed to verify CloudWatch credentials: {msg}");
Err(Error::AwsConfig(msg))
}
}
}
}
#[derive(Debug)]
pub enum Error {
EnvVarMissing(String),
AwsConfig(String),
InvalidCredentials,
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::EnvVarMissing(var) => write!(f, "Missing environment variable: {var}"),
Error::AwsConfig(msg) => write!(f, "AWS configuration error: {msg}"),
Error::InvalidCredentials => write!(f, "Invalid AWS credentials"),
}
}
}
impl error::Error for Error {}
#[cfg(feature = "cloudwatch")]
async fn retry_on_throttling<F, Fut, T>(mut op: F) -> Result<T, Error>
where
F: FnMut() -> Fut,
Fut: std::future::Future<Output = Result<T, Error>>,
{
let mut delay = Duration::from_millis(100);
for _ in 0..3 {
match op().await {
Ok(v) => return Ok(v),
Err(Error::AwsConfig(msg)) if msg.contains("Throttling") => {
sleep(delay).await;
delay *= 2;
}
Err(e) => return Err(e),
}
}
op().await
}
pub enum LogStream {
ServerErrorResponses,
ClientErrorResponses,
RedirectionResponses,
SuccessfulResponses,
InformationalResponses,
UnknownOrUnassigned,
Custom(String),
}
impl LogStream {
pub fn as_str_pub(&self) -> &str {
self.as_str()
}
fn as_str(&self) -> &str {
match *self {
LogStream::ServerErrorResponses => "Server_Error_Responses",
LogStream::ClientErrorResponses => "Client_Error_Responses",
LogStream::RedirectionResponses => "Redirection_Responses",
LogStream::SuccessfulResponses => "Successful_Responses",
LogStream::InformationalResponses => "Informational_Responses",
LogStream::UnknownOrUnassigned => "Unknown_Or_Unassigned",
LogStream::Custom(ref s) => s,
}
}
pub fn from_string(stream_name: String) -> Self {
match stream_name.as_str() {
"Server_Error_Responses" => LogStream::ServerErrorResponses,
"Client_Error_Responses" => LogStream::ClientErrorResponses,
"Redirection_Responses" => LogStream::RedirectionResponses,
"Successful_Responses" => LogStream::SuccessfulResponses,
"Informational_Responses" => LogStream::InformationalResponses,
"Unknown_Or_Unassigned" => LogStream::UnknownOrUnassigned,
_ => LogStream::Custom(stream_name),
}
}
#[cfg(any(feature = "cloudwatch", feature = "file"))]
fn with_date(&self) -> String {
let current_date = Utc::now().format("%Y-%m-%d").to_string();
format!("{}-{}", current_date, self.as_str())
}
pub fn from_level(level: &Level) -> LogStream {
match level {
Level::Error => LogStream::ServerErrorResponses,
Level::Warn => LogStream::ClientErrorResponses,
Level::Info => LogStream::InformationalResponses,
Level::Debug => LogStream::ServerErrorResponses,
Level::Trace => LogStream::ServerErrorResponses,
}
}
}
pub fn format_log_entry(level: Level, message: &str, file: &str, line: u32) -> String {
if is_log_location_enabled() {
format!("{level} - {message} (File: {file}, Line: {line})")
} else {
format!("{level} - {message}")
}
}
pub fn build_structured_message(
message: &str,
mut fields: serde_json::Map<String, serde_json::Value>,
) -> String {
fields.insert(
"message".into(),
serde_json::Value::String(message.to_string()),
);
serde_json::Value::Object(fields).to_string()
}
pub fn colored_level(level: Level) -> ColoredString {
match level {
Level::Error => "ERROR".red().bold(),
Level::Warn => "WARN".yellow().bold(),
Level::Info => "INFO".green().bold(),
Level::Debug => "DEBUG".blue().bold(),
Level::Trace => "TRACE".magenta().bold(),
}
}
#[cfg(feature = "cloudwatch")]
pub async fn custom_cloudwatch_log(
level: Level,
message: &str,
log_stream: LogStream,
file: &str,
line: u32,
) -> Result<(), Error> {
let log_group_name = env::var("LOG_GROUP")
.or_else(|_| env::var("AWS_LOG_GROUP"))
.map_err(|_| Error::EnvVarMissing("LOG_GROUP".to_string()))?;
let log_stream_name = log_stream.with_date();
let msg_str = format_log_entry(level, message, file, line);
let client = GLOBAL_CLIENT.clone();
if CREDENTIAL_CHECK.get().is_none() {
let result = verify_cloudwatch_credentials(&client, &log_group_name).await;
let _ = CREDENTIAL_CHECK.set(result);
}
if let Some(Err(_)) = CREDENTIAL_CHECK.get() {
return Err(Error::InvalidCredentials);
}
log::debug!("Logging to CloudWatch group '{log_group_name}', stream '{log_stream_name}'");
if let Err(e) = ensure_log_stream_exists(&client, &log_group_name, &log_stream_name).await {
log::error!(
"Failed to ensure log stream '{log_stream_name}' in group '{log_group_name}': {e}"
);
return Err(e);
}
let log_event = InputLogEvent::builder()
.message(msg_str)
.timestamp(Utc::now().timestamp_millis())
.build()
.expect("Failed to build log event");
let batch_item = BatchLogItem {
group: log_group_name,
stream: log_stream_name,
event: log_event,
};
if let Err(e) = LOG_BATCH_SENDER.send(batch_item).await {
log::error!("Failed to enqueue log batch: {e}");
return Err(Error::AwsConfig(e.to_string()));
}
Ok(())
}
#[cfg(feature = "file")]
pub async fn write_log_to_file(
level: Level,
message: &str,
log_stream: LogStream,
file: &str,
line: u32,
) -> Result<(), std::io::Error> {
use std::path::PathBuf;
use tokio::fs::{OpenOptions, create_dir_all};
use tokio::io::AsyncWriteExt;
let group = env::var("LOG_GROUP")
.or_else(|_| env::var("AWS_LOG_GROUP"))
.unwrap_or_else(|_| "default".to_string());
let stream_name = log_stream.with_date();
let mut path = PathBuf::from(log_file_dir());
path.push(&group);
create_dir_all(&path).await?;
path.push(format!("{stream_name}.log"));
let mut fh = OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await?;
let ts = Utc::now().format("%Y-%m-%d %H:%M:%S%.3f");
let entry_content = format_log_entry(level, message, file, line);
let entry = format!("[{ts}] {entry_content}\n");
fh.write_all(entry.as_bytes()).await?;
fh.flush().await?;
if FILE_WRITE_COUNT
.fetch_add(1, Ordering::Relaxed)
.is_multiple_of(100)
{
let _ = cleanup_logs().await;
}
Ok(())
}
#[cfg(feature = "cloudwatch")]
async fn put_log_events_batch_with_retry(
client: &CloudWatchLogsClient,
group: &str,
stream: &str,
events: Vec<InputLogEvent>,
) -> Result<(), Box<dyn error::Error + Send + Sync>> {
let key = format!("{group}::{stream}");
let token_opt = {
let map = NEXT_SEQUENCE_TOKENS.read().await;
map.get(&key).cloned().unwrap_or(None)
};
let mut attempt_count = 1;
match put_log_events_once(client, group, stream, token_opt, events.clone()).await {
Ok(new_tok) => {
update_sequence_token(key, new_tok).await;
log::debug!("Log batch sent successfully in {attempt_count} attempt(s)");
Ok(())
}
Err(e) => {
if let Some(PutLogEventsError::InvalidSequenceTokenException(_)) =
e.downcast_ref::<PutLogEventsError>()
{
attempt_count += 1;
log::debug!(
"Invalid sequence token detected for {group}/{stream}, fetching latest token"
);
let fresh = fetch_latest_stream_token(client, group, stream).await;
match put_log_events_once(client, group, stream, fresh.clone(), events).await {
Ok(new_tok2) => {
update_sequence_token(format!("{group}::{stream}"), new_tok2).await;
log::debug!("Log batch sent successfully in {attempt_count} attempt(s)");
Ok(())
}
Err(e2) => {
log::error!("Retry after InvalidSequenceTokenException failed: {e2}");
Err(e2)
}
}
} else {
log::error!("Failed to send log batch: {e}");
Err(e)
}
}
}
}
#[cfg(feature = "cloudwatch")]
async fn put_log_events_once(
client: &CloudWatchLogsClient,
group: &str,
stream: &str,
sequence_token: Option<String>,
events: Vec<InputLogEvent>,
) -> Result<Option<String>, Box<dyn error::Error + Send + Sync>> {
let mut req = client
.put_log_events()
.log_group_name(group)
.log_stream_name(stream);
if let Some(tok) = sequence_token {
req = req.sequence_token(tok);
}
for ev in events {
req = req.log_events(ev);
}
let resp = req.send().await?;
let next_tok = resp.next_sequence_token().map(|s| s.to_string());
Ok(next_tok)
}
#[cfg(feature = "cloudwatch")]
async fn fetch_latest_stream_token(
client: &CloudWatchLogsClient,
group: &str,
stream: &str,
) -> Option<String> {
if let Ok(resp) = client
.describe_log_streams()
.log_group_name(group)
.log_stream_name_prefix(stream)
.send()
.await
{
for s in resp.log_streams() {
if let Some(name) = s.log_stream_name()
&& name == stream
{
return s.upload_sequence_token().map(|st| st.to_string());
}
}
}
None
}
#[cfg(feature = "cloudwatch")]
async fn update_sequence_token(key: String, new_tok: Option<String>) {
let mut map = NEXT_SEQUENCE_TOKENS.write().await;
map.insert(key, new_tok);
}
#[cfg(feature = "cloudwatch")]
async fn ensure_log_stream_exists(
client: &CloudWatchLogsClient,
group: &str,
stream: &str,
) -> Result<(), Error> {
ensure_log_group_exists(client, group).await?;
let key = format!("{group}::{stream}");
{
let read_map = STREAM_EXISTS_CACHE.read().await;
if let Some(already_exists) = read_map.get(&key)
&& *already_exists
{
log::debug!("Log stream '{stream}' already exists in group '{group}' (cache)");
return Ok(());
}
}
let resp = retry_on_throttling(|| async {
client
.describe_log_streams()
.log_group_name(group)
.log_stream_name_prefix(stream)
.send()
.await
.map_err(|e| Error::AwsConfig(format!("{e:?}")))
})
.await?;
let found = resp
.log_streams()
.iter()
.any(|s| s.log_stream_name().map(|n| n == stream).unwrap_or(false));
if !found {
log::debug!("Creating log stream '{stream}' in group '{group}'");
if let Err(e) = retry_on_throttling(|| async {
client
.create_log_stream()
.log_group_name(group)
.log_stream_name(stream)
.send()
.await
.map_err(|e| Error::AwsConfig(format!("{e:?}")))
})
.await
{
let msg = format!("{e:?}");
if msg.contains("ResourceAlreadyExists") {
log::debug!("Log stream '{stream}' already exists according to AWS");
} else {
return Err(Error::AwsConfig(msg));
}
}
}
{
let mut write_map = STREAM_EXISTS_CACHE.write().await;
write_map.insert(key, true);
}
log::debug!("Log stream '{stream}' verified in group '{group}'");
Ok(())
}
#[cfg(feature = "cloudwatch")]
async fn ensure_log_group_exists(client: &CloudWatchLogsClient, group: &str) -> Result<(), Error> {
{
let read_map = GROUP_EXISTS_CACHE.read().await;
if let Some(already) = read_map.get(group)
&& *already
{
log::debug!("Log group '{group}' already exists (cache)");
return Ok(());
}
}
let resp = retry_on_throttling(|| async {
client
.describe_log_groups()
.log_group_name_prefix(group)
.send()
.await
.map_err(|e| Error::AwsConfig(format!("{e:?}")))
})
.await?;
let found = resp
.log_groups()
.iter()
.any(|g| g.log_group_name().map(|n| n == group).unwrap_or(false));
if !found {
log::debug!("Creating log group '{group}'");
if let Err(e) = retry_on_throttling(|| async {
client
.create_log_group()
.log_group_name(group)
.send()
.await
.map_err(|e| Error::AwsConfig(format!("{e:?}")))
})
.await
{
let msg = format!("{e:?}");
if msg.contains("ResourceAlreadyExists") {
log::debug!("Log group '{group}' already exists according to AWS");
} else {
return Err(Error::AwsConfig(msg));
}
}
}
{
let mut write_map = GROUP_EXISTS_CACHE.write().await;
write_map.insert(group.to_string(), true);
}
log::debug!("Log group '{group}' verified");
Ok(())
}
#[cfg(feature = "cloudwatch")]
async fn process_log_batches(mut rx: tokio::sync::mpsc::Receiver<BatchLogItem>) {
use std::collections::HashMap;
use tokio::time;
let mut batches: HashMap<(String, String), Vec<InputLogEvent>> = HashMap::new();
let mut interval = time::interval(*BATCH_TIMEOUT);
loop {
tokio::select! {
maybe_item = rx.recv() => {
if let Some(item) = maybe_item {
let key = (item.group, item.stream);
batches.entry(key.clone()).or_default().push(item.event);
if let Some(events) = batches.get(&key)
&& events.len() >= *BATCH_SIZE
{
let events_to_send = batches.remove(&key).unwrap();
let client = GLOBAL_CLIENT.clone();
tokio::spawn(async move {
let _ = put_log_events_batch_with_retry(&client, &key.0, &key.1, events_to_send).await;
});
}
} else {
break;
}
},
_ = interval.tick() => {
for (key, events) in batches.drain() {
if !events.is_empty() {
let client = GLOBAL_CLIENT.clone();
tokio::spawn(async move {
let _ = put_log_events_batch_with_retry(&client, &key.0, &key.1, events).await;
});
}
}
},
}
}
}
#[cfg(feature = "file")]
pub async fn cleanup_logs() -> Result<(), std::io::Error> {
use std::path::PathBuf;
use tokio::fs;
use tokio::fs::read_dir;
let root = PathBuf::from(log_file_dir());
if fs::metadata(&root).await.is_err() {
return Ok(());
}
let mut to_visit = vec![root];
let mut files: Vec<(PathBuf, std::time::SystemTime, u64)> = Vec::new();
while let Some(dir) = to_visit.pop() {
let mut entries = match read_dir(&dir).await {
Ok(e) => e,
Err(_) => continue,
};
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
let ty = entry.file_type().await?;
if ty.is_dir() {
to_visit.push(path);
} else if ty.is_file()
&& let Ok(meta) = entry.metadata().await
{
let modified = meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH);
files.push((path, modified, meta.len()));
}
}
}
let retention_duration = std::time::Duration::from_secs(*LOG_RETENTION_DAYS * 24 * 60 * 60);
let now = std::time::SystemTime::now();
let mut kept: Vec<(PathBuf, std::time::SystemTime, u64)> = Vec::new();
for (p, m, s) in files {
if now.duration_since(m).unwrap_or_default() > retention_duration {
let _ = fs::remove_file(p).await;
} else {
kept.push((p, m, s));
}
}
kept.sort_by_key(|(_, m, _)| *m);
let mut total_size: u64 = kept.iter().map(|(_, _, s)| *s).sum();
let max_bytes = *LOG_RETENTION_SIZE_MB * 1024 * 1024;
let delete_bytes = *LOG_DELETE_BATCH_MB * 1024 * 1024;
if total_size > max_bytes {
let mut removed: u64 = 0;
for (p, _m, s) in kept {
if removed >= delete_bytes {
break;
}
if fs::remove_file(&p).await.is_ok() {
removed += s;
total_size = total_size.saturating_sub(s);
}
}
}
Ok(())
}
pub fn initialize_logs() {
Builder::from_default_env().init();
std::panic::set_hook(Box::new(|panic_info| {
let location = panic_info.location().unwrap();
let panic_message = if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
format!(
"Panic in file '{}' at line {}: {}",
location.file(),
location.line(),
s
)
} else if let Some(s) = panic_info.payload().downcast_ref::<String>() {
format!(
"Panic in file '{}' at line {}: {}",
location.file(),
location.line(),
s
)
} else {
format!(
"Panic occurred in file '{}' at line {}. The panic message is not a string.",
location.file(),
location.line()
)
};
eprintln!("PANIC => {panic_message}");
}));
}
#[cfg(not(feature = "cloudwatch"))]
pub async fn custom_cloudwatch_log(
_level: Level,
_message: &str,
_log_stream: LogStream,
_file: &str,
_line: u32,
) -> Result<(), Error> {
Ok(())
}
#[cfg(not(feature = "file"))]
pub async fn write_log_to_file(
_level: Level,
_message: &str,
_log_stream: LogStream,
_file: &str,
_line: u32,
) -> Result<(), std::io::Error> {
Ok(())
}
#[cfg(not(feature = "file"))]
pub async fn cleanup_logs() -> Result<(), std::io::Error> {
Ok(())
}
#[cfg(feature = "http")]
pub async fn queue_http_log(
level: Level,
message: &str,
log_stream: LogStream,
file: &str,
line: u32,
) {
crate::http_backend::queue_http_log(level, message, log_stream, file, line).await;
}
#[cfg(not(feature = "http"))]
pub async fn queue_http_log(
_level: Level,
_message: &str,
_log_stream: LogStream,
_file: &str,
_line: u32,
) {
}