1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
use chrono::Local;
use std::path::PathBuf;
use tokio::io::AsyncWriteExt;
use tokio::sync::mpsc::{self, Sender};
use crate::size_parser::parse_size;
/// Logger that can write to both console and file with built-in log rotation
pub struct Logger {
log_sender: Sender<String>,
}
impl Logger {
/// Creates a new logger with optional log file path, rotation size, and maximum number of rotated files
pub async fn new(log_path: Option<PathBuf>, log_size_str: String, max_log_files: u32) -> Self {
// Parse the human-readable size format
let max_size_bytes = match parse_size(&log_size_str) {
Ok(size) => size,
Err(e) => {
eprintln!("Error parsing log rotation size '{log_size_str}': {e:?}");
eprintln!("Using default size of 10MB");
10 * 1024 * 1024 // Default to 10MB if parsing fails
}
};
// Create a channel for logging messages
let (log_sender, mut log_receiver) = mpsc::channel::<String>(100);
// Clone log_path for use in the spawned task
let task_log_path = log_path.clone();
let max_files = max_log_files; // Clone the max_log_files for the task
// Open log file if path provided
let log_file = if let Some(path) = log_path.clone() {
match tokio::fs::OpenOptions::new()
.create(true)
.append(true)
.open(&path)
.await
{
Ok(file) => Some(file),
Err(e) => {
eprintln!("Error opening log file: {e}");
None
}
}
} else {
None
};
// Spawn a background task to handle log messages
tokio::spawn(async move {
let mut file = log_file;
let mut current_size: u64 = if let Some(ref f) = file {
match f.metadata().await {
Ok(metadata) => metadata.len(),
Err(_) => 0,
}
} else {
0
};
while let Some(message) = log_receiver.recv().await {
// Always print to console
println!("{message}");
// Also log to file if configured
if file.is_some() {
let message_with_newline = format!("{message}\n");
let message_bytes = message_with_newline.as_bytes();
// Check if we need to rotate the log file
if max_size_bytes > 0
&& current_size + message_bytes.len() as u64 > max_size_bytes
{
if let Some(ref path) = task_log_path.clone() {
drop(file);
// Generate timestamp for the rotated log filename
let timestamp = Local::now().format("%Y%m%d_%H%M%S").to_string();
let rotated_path = path.with_file_name(format!(
"{}.{}",
path.file_name().unwrap().to_string_lossy(),
timestamp
));
// Rename the current log file to the rotated filename
if let Err(e) = tokio::fs::rename(path, &rotated_path).await {
eprintln!("Error rotating log file: {e}");
} else {
println!("Rotated log file to: {}", rotated_path.display());
// Clean up old log files if max_files is set
if max_files > 0 {
// Try to clean up old log files
if let Err(e) = cleanup_old_log_files(path, max_files).await {
eprintln!("Error cleaning up old log files: {e}");
}
}
}
// Create a new log file
match tokio::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
{
Ok(new_file) => {
file = Some(new_file);
current_size = 0;
}
Err(e) => {
eprintln!("Error creating new log file after rotation: {e}");
file = None;
}
}
}
}
// Write to the log file (either existing or newly rotated)
if let Some(ref mut f) = file {
// Ignore error if we can't write to the file
if let Err(e) = f.write_all(message_bytes).await {
eprintln!("Error writing to log file: {e}");
} else {
current_size += message_bytes.len() as u64;
}
// Try to flush, but ignore errors
let _ = f.flush().await;
}
}
}
});
Self { log_sender }
}
/// Logs a message to both console and file (if configured)
pub async fn log(&self, message: &str) {
// Send message to the logger task
// If send fails, just print to stderr and continue
if let Err(e) = self.log_sender.send(message.to_string()).await {
eprintln!("Failed to send log message: {e}");
// Fallback to direct console output
println!("{message}");
}
}
}
/// Function to clean up old log files when max limit is reached
pub async fn cleanup_old_log_files(
log_path: &std::path::Path,
max_files: u32,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// First, get the base filename to match rotated files
let base_name = log_path
.file_name()
.ok_or("Invalid log path")?
.to_string_lossy()
.to_string();
// Get the directory of the log file
let dir_path = log_path.parent().unwrap_or(std::path::Path::new("."));
// Read the directory entries
let mut entries = Vec::new();
let mut dir = tokio::fs::read_dir(dir_path).await?;
// Collect all rotated log files
while let Some(entry) = dir.next_entry().await? {
let path = entry.path();
if let Some(filename) = path.file_name() {
let filename_str = filename.to_string_lossy();
// Check if this is a rotated log file for our base log file
if filename_str.starts_with(&base_name) && filename_str != base_name {
entries.push(path);
}
}
}
// If we have more files than the max allowed, delete the oldest ones
if entries.len() > max_files as usize {
// Sort files by modification time (oldest first)
entries.sort_by(|a, b| {
let a_meta = std::fs::metadata(a).ok();
let b_meta = std::fs::metadata(b).ok();
match (a_meta, b_meta) {
(Some(a_meta), Some(b_meta)) => a_meta
.modified()
.unwrap_or(std::time::SystemTime::UNIX_EPOCH)
.cmp(
&b_meta
.modified()
.unwrap_or(std::time::SystemTime::UNIX_EPOCH),
),
_ => std::cmp::Ordering::Equal,
}
});
// Calculate how many files to delete
let files_to_delete = entries.len() - max_files as usize;
// Delete the oldest files
for path in entries.iter().take(files_to_delete) {
println!("Deleting old log file: {}", path.display());
tokio::fs::remove_file(path).await?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::io::Write;
use std::time::Duration;
use tempfile::tempdir;
use tokio::time::sleep;
#[tokio::test]
async fn test_logger_creation() {
let logger = Logger::new(None, "10MB".to_string(), 0).await;
assert!(logger.log_sender.capacity() >= 100);
}
#[tokio::test]
async fn test_log_to_console_only() {
let logger = Logger::new(None, "10MB".to_string(), 0).await;
// This only tests that the operation doesn't panic
logger.log("Test log message").await;
// We can't easily verify console output in a test
}
#[tokio::test]
async fn test_log_to_file() {
// Create a temporary directory that will be deleted at the end of the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let log_path = temp_dir.path().join("test.log");
// Create a logger with a small max size to trigger rotation quickly
let logger = Logger::new(Some(log_path.clone()), "100B".to_string(), 3).await;
// Allow some time for the log file to be created
sleep(Duration::from_millis(100)).await;
// Log a message that should go to the file
logger.log("Test message to file").await;
// Allow some time for the write to complete
sleep(Duration::from_millis(100)).await;
// Verify the message was written to the file
let content = fs::read_to_string(&log_path).expect("Failed to read log file");
assert!(content.contains("Test message to file"));
}
#[tokio::test]
async fn test_log_rotation() {
// Create a temporary directory
let temp_dir = tempdir().expect("Failed to create temp directory");
let log_path = temp_dir.path().join("rotation_test.log");
// Create file with initial content to have a specific size
{
let mut file = fs::File::create(&log_path).expect("Failed to create log file");
// Write 90 bytes to the file
file.write_all(&[b'x'; 90])
.expect("Failed to write to log file");
}
// Create a logger with a 100 byte rotation threshold
let logger = Logger::new(Some(log_path.clone()), "100B".to_string(), 3).await;
// Allow some time for the logger to initialize
sleep(Duration::from_millis(100)).await;
// Log a message that should trigger rotation (> 10 bytes)
logger.log("This should trigger rotation").await;
// Allow some time for rotation to occur
sleep(Duration::from_millis(500)).await;
// Check if rotation created a new file
let mut rotated_found = false;
for entry in fs::read_dir(temp_dir.path()).expect("Failed to read directory") {
let entry = entry.expect("Failed to read directory entry");
let filename = entry.file_name().to_string_lossy().to_string();
if filename.starts_with("rotation_test.log.") {
rotated_found = true;
break;
}
}
assert!(rotated_found, "Log rotation did not create a rotated file");
}
#[tokio::test]
async fn test_max_log_files() {
// Create a temporary directory
let temp_dir = tempdir().expect("Failed to create temp directory");
let log_path = temp_dir.path().join("max_test.log");
// Create initial log file
{
let mut file = fs::File::create(&log_path).expect("Failed to create log file");
file.write_all(b"initial content")
.expect("Failed to write to log file");
}
// Create 5 rotated log files
for i in 1..=5 {
let rotated_path = temp_dir
.path()
.join(format!("max_test.log.2025051{i}_120000"));
fs::write(&rotated_path, format!("rotated content {i}"))
.expect("Failed to create rotated file");
// Set modification time to ensure proper ordering
// Note: This is OS-specific and might not work in all environments
#[cfg(unix)]
{
let now = std::time::SystemTime::now();
let seconds =
now.duration_since(std::time::UNIX_EPOCH).unwrap().as_secs() - (5 - i) * 3600;
#[allow(clippy::cast_possible_wrap)]
let times = filetime::FileTime::from_unix_time(seconds as i64, 0);
filetime::set_file_mtime(&rotated_path, times).expect("Failed to set file time");
}
}
// Manually run cleanup with max 3 rotated files
cleanup_old_log_files(&log_path, 3)
.await
.expect("Failed to clean up log files");
// Count rotated files after manual cleanup
let rotated_count = count_rotated_files(temp_dir.path(), "max_test.log").await;
assert_eq!(
rotated_count, 3,
"Incorrect number of files after manual cleanup"
);
// Create a new logger with max 3 rotated files
let logger = Logger::new(Some(log_path.clone()), "100B".to_string(), 3).await;
// Log enough data to trigger a rotation
for i in 0..30 {
logger
.log(&format!("Log message {i} to trigger rotation"))
.await;
// Small delay to ensure logs are processed
tokio::time::sleep(Duration::from_millis(10)).await;
}
// Allow time for cleanup to finish
tokio::time::sleep(Duration::from_millis(500)).await;
// Count rotated files again after rotation
let final_count = count_rotated_files(temp_dir.path(), "max_test.log").await;
// Should still be 3 rotated files (max_files setting)
assert_eq!(
final_count, 3,
"Incorrect number of rotated files after logger rotation"
);
}
// Helper function to count rotated log files
async fn count_rotated_files(dir_path: &std::path::Path, base_name: &str) -> usize {
let mut count = 0;
let mut dir = tokio::fs::read_dir(dir_path)
.await
.expect("Failed to read directory");
while let Some(entry) = dir
.next_entry()
.await
.expect("Failed to get directory entry")
{
let path = entry.path();
if let Some(file_name) = path.file_name() {
let name = file_name.to_string_lossy();
// Count only rotated log files (with a timestamp), not the base log file
if name.starts_with(base_name) && name != base_name {
count += 1;
}
}
}
count
}
}