scirs2_core/observability/audit/
storage.rs1use crate::error::CoreError;
4use crate::observability::audit::types::{AuditConfig, AuditEvent};
5use chrono::{DateTime, Utc};
6use std::fs::{File, OpenOptions};
7use std::io::{BufRead, BufReader, Write};
8use std::path::Path;
9
10#[cfg(feature = "crypto")]
11use sha2::{Digest, Sha256};
12
13pub struct LogFileManager {
15 pub config: AuditConfig,
16 pub current_file: Option<File>,
17 pub current_file_size: u64,
18 pub file_counter: u64,
19 pub last_event_hash: Option<String>,
20 pub hash_chain: Vec<String>,
21}
22
23impl LogFileManager {
24 pub fn new(config: AuditConfig) -> Result<Self, CoreError> {
30 std::fs::create_dir_all(&config.log_directory).map_err(|e| {
32 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
33 "Failed to create log directory: {e}"
34 )))
35 })?;
36
37 Ok(Self {
38 config,
39 current_file: None,
40 current_file_size: 0,
41 file_counter: 0,
42 last_event_hash: None,
43 hash_chain: Vec::new(),
44 })
45 }
46
47 pub fn write_event(&mut self, event: &mut AuditEvent) -> Result<(), CoreError> {
53 if self.config.enable_hash_chain {
55 event.previous_hash = self.last_event_hash.clone();
56 let event_hash = self.calculate_event_hash(event)?;
57 event.event_hash = Some(event_hash.clone());
58 self.last_event_hash = Some(event_hash.clone());
59 self.hash_chain.push(event_hash);
60 }
61
62 let serialized = if self.config.enable_json_format {
63 self.serialize_json(event)?
64 } else {
65 self.serialize_text(event)
66 };
67
68 let data = format!("{serialized}\n");
69 let data_size = data.len() as u64;
70
71 if self.current_file.is_none()
73 || self.current_file_size + data_size > self.config.max_file_size
74 {
75 self.rotate_log_file()?;
76 }
77
78 if let Some(ref mut file) = self.current_file {
79 file.write_all(data.as_bytes()).map_err(|e| {
80 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
81 "Failed to write to log file: {e}"
82 )))
83 })?;
84
85 self.current_file_size += data_size;
86 }
87
88 Ok(())
89 }
90
91 pub fn rotate_log_file(&mut self) -> Result<(), CoreError> {
97 if let Some(mut file) = self.current_file.take() {
99 file.flush().map_err(|e| {
100 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
101 "Failed to flush log file: {e}"
102 )))
103 })?;
104 }
105
106 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
108 let filename = format!("audit_{timestamp}_{:06}.log", self.file_counter);
109 let filepath = self.config.log_directory.join(filename);
110
111 let file = OpenOptions::new()
112 .create(true)
113 .append(true)
114 .open(&filepath)
115 .map_err(|e| {
116 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
117 "Failed to create log file: {e}"
118 )))
119 })?;
120
121 self.current_file = Some(file);
122 self.current_file_size = 0;
123 self.file_counter += 1;
124
125 self.cleanup_old_files()?;
127
128 Ok(())
129 }
130
131 pub fn cleanup_old_files(&self) -> Result<(), CoreError> {
137 let mut log_files = Vec::new();
138
139 if let Ok(entries) = std::fs::read_dir(&self.config.log_directory) {
141 for entry in entries.flatten() {
142 if let Some(filename) = entry.file_name().to_str() {
143 if filename.starts_with("audit_") && filename.ends_with(".log") {
144 if let Ok(metadata) = entry.metadata() {
145 log_files.push((
146 entry.path(),
147 metadata
148 .modified()
149 .unwrap_or(std::time::SystemTime::UNIX_EPOCH),
150 ));
151 }
152 }
153 }
154 }
155 }
156
157 log_files.sort_by_key(|(_, time)| *time);
159
160 if log_files.len() > self.config.max_files {
162 let files_to_remove = log_files.len() - self.config.max_files;
163 for (path, _) in log_files.iter().take(files_to_remove) {
164 if let Err(e) = std::fs::remove_file(path) {
165 eprintln!("Failed to remove old log file {path:?}: {e}");
166 }
167 }
168 }
169
170 Ok(())
171 }
172
173 #[cfg(feature = "serialization")]
179 pub fn serialize_json(&self, event: &AuditEvent) -> Result<String, CoreError> {
180 serde_json::to_string(event).map_err(|e| {
181 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
182 "Failed to serialize event to JSON: {e}"
183 )))
184 })
185 }
186
187 #[cfg(not(feature = "serialization"))]
193 pub fn serialize_json(&self, _event: &AuditEvent) -> Result<String, CoreError> {
194 Err(CoreError::ComputationError(
195 crate::error::ErrorContext::new(
196 "JSON serialization requires serde feature".to_string(),
197 ),
198 ))
199 }
200
201 #[must_use]
203 pub fn serialize_text(&self, event: &AuditEvent) -> String {
204 format!(
205 "[{}] {} {} {} user={} resource={} outcome={} description=\"{}\"",
206 event.timestamp.format("%Y-%m-%d %H:%M:%S UTC"),
207 event.category.as_str(),
208 event.severity.as_str(),
209 event.action,
210 event.userid.as_deref().unwrap_or("-"),
211 event.resourceid.as_deref().unwrap_or("-"),
212 event.outcome.as_str(),
213 event.description
214 )
215 }
216
217 pub fn flush(&mut self) -> Result<(), CoreError> {
223 if let Some(ref mut file) = self.current_file {
224 file.flush().map_err(|e| {
225 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
226 "Failed to flush log file: {e}"
227 )))
228 })?;
229 }
230 Ok(())
231 }
232
233 #[cfg(feature = "crypto")]
239 pub fn calculate_event_hash(&self, event: &AuditEvent) -> Result<String, CoreError> {
240 let mut hasher = Sha256::new();
241
242 hasher.update(event.event_id.to_string());
244 hasher.update(event.timestamp.to_rfc3339());
245 hasher.update(event.category.as_str());
246 hasher.update(&event.action);
247
248 if let Some(ref userid) = event.userid {
249 hasher.update(userid);
250 }
251
252 if let Some(ref resourceid) = event.resourceid {
253 hasher.update(resourceid);
254 }
255
256 hasher.update(&event.description);
257 hasher.update(event.outcome.as_str());
258
259 if let Some(ref prev_hash) = event.previous_hash {
260 hasher.update(prev_hash);
261 }
262
263 let result = hasher.finalize();
264 Ok(format!("{:x}", result))
265 }
266
267 #[cfg(not(feature = "crypto"))]
273 pub fn calculate_event_hash(&self, event: &AuditEvent) -> Result<String, CoreError> {
274 use std::collections::hash_map::DefaultHasher;
276 use std::hash::{Hash, Hasher};
277
278 let mut hasher = DefaultHasher::new();
279 event.event_id.hash(&mut hasher);
280 event.timestamp.timestamp().hash(&mut hasher);
281 event.category.as_str().hash(&mut hasher);
282 event.action.hash(&mut hasher);
283
284 Ok(format!("{:x}", hasher.finish()))
285 }
286
287 pub fn verify_hash_chain(&self) -> Result<bool, CoreError> {
293 if !self.config.enable_hash_chain {
294 return Ok(true); }
296
297 if self.hash_chain.is_empty() {
299 return Ok(true); }
301
302 for (i, hash) in self.hash_chain.iter().enumerate() {
304 if hash.len() != 64 {
306 return Ok(false); }
308
309 if !hash.chars().all(|c| c.is_ascii_hexdigit()) {
311 return Ok(false); }
313
314 if i > 0 {
317 let prev_hash = &self.hash_chain[i - 1];
318 if prev_hash.is_empty() {
321 return Ok(false); }
323 }
324 }
325
326 Ok(true)
327 }
328
329 #[allow(dead_code)]
335 pub fn archive_old_files(&self) -> Result<(), CoreError> {
336 if !self.config.retention_policy.enable_auto_archive {
337 return Ok(());
338 }
339
340 let cutoff_date = Utc::now()
341 - chrono::Duration::days(self.config.retention_policy.active_retention_days as i64);
342
343 let archive_path = self
344 .config
345 .retention_policy
346 .archive_path
347 .as_ref()
348 .cloned()
349 .unwrap_or_else(|| self.config.log_directory.join("archive"));
350
351 std::fs::create_dir_all(&archive_path).map_err(|e| {
353 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
354 "Failed to create archive directory: {e}"
355 )))
356 })?;
357
358 if let Ok(entries) = std::fs::read_dir(&self.config.log_directory) {
360 for entry in entries.flatten() {
361 if let Some(filename) = entry.file_name().to_str() {
362 if filename.starts_with("audit_") && filename.ends_with(".log") {
363 if let Ok(metadata) = entry.metadata() {
364 if let Ok(modified_time) = metadata.modified() {
365 let modified_datetime: DateTime<Utc> = modified_time.into();
366
367 if modified_datetime < cutoff_date {
368 let source_path = entry.path();
370 let archive_filename = format!("archived_{filename}");
371 let dest_path = archive_path.join(archive_filename);
372
373 if let Err(e) = std::fs::copy(&source_path, &dest_path) {
375 eprintln!("Failed to archive file {source_path:?}: {e}");
376 continue;
377 }
378
379 #[cfg(feature = "compression")]
381 {
382 if let Err(e) = self.compress_archived_file(&dest_path) {
383 eprintln!(
384 "Failed to compress archived file {:?}: {}",
385 dest_path, e
386 );
387 }
388 }
389
390 if let Err(e) = std::fs::remove_file(&source_path) {
392 eprintln!(
393 "Failed to remove original file {source_path:?}: {e}"
394 );
395 } else {
396 println!(
397 "Archived log file: {source_path:?} -> {dest_path:?}"
398 );
399 }
400 }
401 }
402 }
403 }
404 }
405 }
406 }
407
408 Ok(())
409 }
410
411 #[allow(dead_code)]
417 pub fn cleanup_expired_files(&self) -> Result<(), CoreError> {
418 if !self.config.retention_policy.enable_auto_delete {
419 return Ok(());
420 }
421
422 let archive_cutoff = Utc::now()
423 - chrono::Duration::days(self.config.retention_policy.archive_retention_days as i64);
424
425 let archive_path = self
426 .config
427 .retention_policy
428 .archive_path
429 .as_ref()
430 .cloned()
431 .unwrap_or_else(|| self.config.log_directory.join("archive"));
432
433 if archive_path.exists() {
435 if let Ok(entries) = std::fs::read_dir(&archive_path) {
436 for entry in entries.flatten() {
437 if let Some(filename) = entry.file_name().to_str() {
438 if filename.starts_with("archived_audit_") {
439 if let Ok(metadata) = entry.metadata() {
440 if let Ok(modified_time) = metadata.modified() {
441 let modified_datetime: DateTime<Utc> = modified_time.into();
442
443 if modified_datetime < archive_cutoff {
444 let file_path = entry.path();
446 if let Err(e) = std::fs::remove_file(&file_path) {
447 eprintln!(
448 "Failed to delete expired archive file {file_path:?}: {e}"
449 );
450 } else {
451 println!("Deleted expired archive file: {file_path:?}");
452 }
453 }
454 }
455 }
456 }
457 }
458 }
459 }
460 }
461
462 let min_free_space = self.config.retention_policy.min_free_space;
464 if let Ok(available_space) = self.get_available_disk_space(&self.config.log_directory) {
465 if available_space < min_free_space {
466 let mut log_files = Vec::new();
468
469 for dir in [&self.config.log_directory, &archive_path] {
471 if dir.exists() {
472 if let Ok(entries) = std::fs::read_dir(dir) {
473 for entry in entries.flatten() {
474 if let Some(filename) = entry.file_name().to_str() {
475 if filename.contains("audit_") && filename.ends_with(".log") {
476 if let Ok(metadata) = entry.metadata() {
477 if let Ok(modified_time) = metadata.modified() {
478 log_files.push((entry.path(), modified_time));
479 }
480 }
481 }
482 }
483 }
484 }
485 }
486 }
487
488 log_files.sort_by_key(|(_, time)| *time);
490
491 for (file_path, _) in log_files {
493 if let Err(e) = std::fs::remove_file(&file_path) {
494 eprintln!("Failed to remove file for disk space: {file_path:?}: {e}");
495 } else {
496 println!("Removed file to free disk space: {file_path:?}");
497
498 if let Ok(new_available) =
500 self.get_available_disk_space(&self.config.log_directory)
501 {
502 if new_available >= min_free_space {
503 break;
504 }
505 }
506 }
507 }
508 }
509 }
510
511 Ok(())
512 }
513
514 #[cfg(feature = "compression")]
520 pub fn compress_archived_file(&self, file_path: &std::path::Path) -> Result<(), CoreError> {
521 use std::fs::File;
522 use std::io::{BufReader, BufWriter};
523
524 let input_file = File::open(file_path).map_err(|e| {
525 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
526 "Failed to open file for compression: {e}"
527 )))
528 })?;
529
530 let compressed_path = file_path.with_extension("log.gz");
531 let output_file = File::create(&compressed_path).map_err(|e| {
532 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
533 "Failed to create compressed file: {e}"
534 )))
535 })?;
536
537 let mut reader = BufReader::new(input_file);
538 let writer = BufWriter::new(output_file);
539
540 #[cfg(feature = "flate2")]
542 {
543 use flate2::write::GzEncoder;
544 use flate2::Compression;
545 use std::io::copy;
546
547 let mut encoder = GzEncoder::new(writer, Compression::default());
548 copy(&mut reader, &mut encoder).map_err(|e| {
549 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
550 "Failed to compress file: {e}"
551 )))
552 })?;
553
554 encoder.finish().map_err(|e| {
555 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
556 "Failed to finalize compression: {e}"
557 )))
558 })?;
559 }
560
561 #[cfg(not(feature = "flate2"))]
562 {
563 return Err(CoreError::ComputationError(
564 crate::error::ErrorContext::new("Compression requires flate2 feature".to_string()),
565 ));
566 }
567
568 std::fs::remove_file(file_path).map_err(|e| {
570 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
571 "Failed to remove original file after compression: {e}"
572 )))
573 })?;
574
575 println!("Compressed archive file: {file_path:?} -> {compressed_path:?}");
576 Ok(())
577 }
578
579 pub fn get_available_disk_space(&self, path: &std::path::Path) -> Result<u64, CoreError> {
585 #[cfg(feature = "libc")]
586 {
587 use std::ffi::CString;
588 use std::mem;
589
590 let path_cstr = CString::new(path.to_string_lossy().as_bytes()).map_err(|e| {
591 CoreError::ComputationError(crate::error::ErrorContext::new(format!(
592 "Failed to convert path to CString: {e}"
593 )))
594 })?;
595
596 let mut stat: libc::statvfs = unsafe { mem::zeroed() };
597 let result = unsafe { libc::statvfs(path_cstr.as_ptr(), &mut stat) };
598
599 if result == 0 {
600 Ok(stat.f_bavail as u64 * stat.f_frsize)
602 } else {
603 Err(CoreError::ComputationError(
604 crate::error::ErrorContext::new(
605 "Failed to get filesystem statistics".to_string(),
606 ),
607 ))
608 }
609 }
610
611 #[cfg(not(feature = "libc"))]
612 {
613 let _ = path; Ok(1024 * 1024 * 1024 * 10) }
617 }
618}